If nanosleep fails (typically with EINTR because we handled a signal),
we end up not sleeping long enough. In practice, this patch seldom
makes a difference: directing signals at the nbdkit process tends to
get serviced by the thread running poll(), and when that happens, the
thread blocked in nanosleep() continues uninterrupted. As a result,
nbdkit stalls in exiting because it is waiting for the filter plugin
to quit servicing commands. But if you get lucky (or if you direct the
kill to the specific thread stuck in nanosleep rather than the process
as a whole), then this patch causes us to report early failure with
EINTR mapped to EIO over the wire, at which point, we are back to
practice: the only signals we handle trigger nbdkit to shutdown, so we
may not even have a chance to get our unusual error reported over the
wire. In theory, we could resume nanosleep where it left off by
passing a non-null second argument, but that only matters if we have a
signal handler whose action doesn't intend to request nbdkit to shut
down soon.
A later patch will add a utility function so that the server can do a
better job, both for preventing short sleeps by resuming on EINTR
(assuming the server ever adds support for a non-fatal signal, whether
that be support for SIGHUP re-reading configuration or support for
SIGUSR1/SIGINFO in providing server statistics so far), and for
terminating sleeps with a better error than EINTR the moment we know a
particular client is shutting down (whether because a signal is
terminating all of nbdkit, or we got NBD_CMD_DISC or detected EOF on
this particular connection). At that point, we'll only have to tweak
the delay function to use that utility function in place of nanosleep.
Signed-off-by: Eric Blake <eblake(a)redhat.com>
---
filters/rate/rate.c | 24 +++++++++++++++++-------
1 file changed, 17 insertions(+), 7 deletions(-)
diff --git a/filters/rate/rate.c b/filters/rate/rate.c
index 30e093ee..f8dda0b0 100644
--- a/filters/rate/rate.c
+++ b/filters/rate/rate.c
@@ -228,8 +228,9 @@ maybe_adjust (const char *file, struct bucket *bucket, pthread_mutex_t
*lock)
old_rate, new_rate);
}
-static inline void
-maybe_sleep (struct bucket *bucket, pthread_mutex_t *lock, uint32_t count)
+static inline int
+maybe_sleep (struct bucket *bucket, pthread_mutex_t *lock, uint32_t count,
+ int *err)
{
struct timespec ts;
uint64_t bits;
@@ -248,8 +249,13 @@ maybe_sleep (struct bucket *bucket, pthread_mutex_t *lock, uint32_t
count)
}
if (bits > 0)
- nanosleep (&ts, NULL);
+ if (nanosleep (&ts, NULL) == -1) {
+ nbdkit_error ("nanosleep: %m");
+ *err = errno;
+ return -1;
+ }
}
+ return 0;
}
/* Read data. */
@@ -261,9 +267,11 @@ rate_pread (struct nbdkit_next_ops *next_ops, void *nxdata,
struct rate_handle *h = handle;
maybe_adjust (rate_file, &read_bucket, &read_bucket_lock);
- maybe_sleep (&read_bucket, &read_bucket_lock, count);
+ if (maybe_sleep (&read_bucket, &read_bucket_lock, count, err))
+ return -1;
maybe_adjust (connection_rate_file, &h->read_bucket,
&h->read_bucket_lock);
- maybe_sleep (&h->read_bucket, &h->read_bucket_lock, count);
+ if (maybe_sleep (&h->read_bucket, &h->read_bucket_lock, count, err))
+ return -1;
return next_ops->pread (nxdata, buf, count, offset, flags, err);
}
@@ -278,9 +286,11 @@ rate_pwrite (struct nbdkit_next_ops *next_ops, void *nxdata,
struct rate_handle *h = handle;
maybe_adjust (rate_file, &write_bucket, &write_bucket_lock);
- maybe_sleep (&write_bucket, &write_bucket_lock, count);
+ if (maybe_sleep (&write_bucket, &write_bucket_lock, count, err))
+ return -1;
maybe_adjust (connection_rate_file, &h->write_bucket,
&h->write_bucket_lock);
- maybe_sleep (&h->write_bucket, &h->write_bucket_lock, count);
+ if (maybe_sleep (&h->write_bucket, &h->write_bucket_lock, count, err))
+ return -1;
return next_ops->pwrite (nxdata, buf, count, offset, flags, err);
}
--
2.20.1