Once more per-connection threads are introduced for parallel
plugins, we run a much higher risk of the scheduler causing a
hang during test-socket-activation. The hang is caused by
waiting for a second new client in accept_incoming_connections()
even though the test sent a SIGTERM after the first connection
to request an orderly shutdown, via the following scenario:
main signal context
--------------------------------------
first iteration, finish accept_connect()
checks !quit, starts second iteration
SIGTERM received
set quit
call poll()
That's because there is a window of time between when we read
the global 'quit' and when we start our poll(), in relation to
the signal handler which sets 'quit'. If the signal arrives
after poll() is started, then poll() gets an EINTR failure
which lets us recheck 'quit'; but as more threads are competing
for execution time, the window gets bigger where the handler
can finish execution before poll() starts.
We want to avoid accepting any new connections once we detect
a signal, but basing it on the non-atomic 'quit' is inadequate,
so the solution is to set up a pipe-to-self as an additional
fd to poll for, so that we are guaranteed to break the poll()
loop the moment we know a signal has arrived. Note, however,
that this means that shutdown signals are more responsive than
previously, and while we have a pthread_rwlock that prevents
us from shutting down while a plugin is active, we are more
likely to call exit() from the main thread before our detached
handler threads have detected that quit was set; perhaps we
will want a followup patch that gives the handler threads more
time to do an orderly shutdown() for less abrupt deaths to any
connected clients.
Signed-off-by: Eric Blake <eblake(a)redhat.com>
---
src/internal.h | 4 ++--
src/main.c | 19 ++++++++++++++++++-
src/sockets.c | 14 +++++++++++---
3 files changed, 31 insertions(+), 6 deletions(-)
diff --git a/src/internal.h b/src/internal.h
index 1fc5d69..5953185 100644
--- a/src/internal.h
+++ b/src/internal.h
@@ -1,5 +1,5 @@
/* nbdkit
- * Copyright (C) 2013 Red Hat Inc.
+ * Copyright (C) 2013-2017 Red Hat Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -103,8 +103,8 @@ extern const char *tls_certificates_dir;
extern int tls_verify_peer;
extern char *unixsocket;
extern int verbose;
-
extern volatile int quit;
+extern int quit_fd;
/* cleanup.c */
extern void cleanup_free (void *ptr);
diff --git a/src/main.c b/src/main.c
index c9f08ab..224ee8a 100644
--- a/src/main.c
+++ b/src/main.c
@@ -1,5 +1,5 @@
/* nbdkit
- * Copyright (C) 2013-2016 Red Hat Inc.
+ * Copyright (C) 2013-2017 Red Hat Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,7 +92,13 @@ const char *user, *group; /* -u & -g */
int verbose; /* -v */
unsigned int socket_activation /* $LISTEN_FDS and $LISTEN_PID set */;
+/* Detection of request to exit via signal. Most places in the code
+ * can just poll quit at opportune moments, while sockets.c needs a
+ * pipe-to-self through quit_fd in order to break a poll loop without
+ * a race. */
volatile int quit;
+int quit_fd;
+static int write_quit_fd;
static char *random_fifo_dir = NULL;
static char *random_fifo = NULL;
@@ -657,13 +663,24 @@ start_serving (void)
static void
handle_quit (int sig)
{
+ char c = sig;
+
quit = 1;
+ write (write_quit_fd, &c, 1);
}
static void
set_up_signals (void)
{
struct sigaction sa;
+ int fds[2];
+
+ if (pipe (fds) < 0) {
+ perror ("pipe");
+ exit (EXIT_FAILURE);
+ }
+ quit_fd = fds[0];
+ write_quit_fd = fds[1];
memset (&sa, 0, sizeof sa);
sa.sa_flags = SA_RESTART;
diff --git a/src/sockets.c b/src/sockets.c
index 1d63523..0ea40a1 100644
--- a/src/sockets.c
+++ b/src/sockets.c
@@ -1,5 +1,5 @@
/* nbdkit
- * Copyright (C) 2013 Red Hat Inc.
+ * Copyright (C) 2013-2017 Red Hat Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -316,7 +316,7 @@ accept_connection (int listen_sock)
void
accept_incoming_connections (int *socks, size_t nr_socks)
{
- struct pollfd fds[nr_socks];
+ struct pollfd fds[nr_socks + 1];
size_t i;
int r;
@@ -326,8 +326,11 @@ accept_incoming_connections (int *socks, size_t nr_socks)
fds[i].events = POLLIN;
fds[i].revents = 0;
}
+ fds[i].fd = quit_fd;
+ fds[i].events = POLLIN;
+ fds[i].revents = 0;
- r = poll (fds, nr_socks, -1);
+ r = poll (fds, nr_socks + 1, -1);
if (r == -1) {
if (errno == EINTR || errno == EAGAIN)
continue;
@@ -335,6 +338,11 @@ accept_incoming_connections (int *socks, size_t nr_socks)
exit (EXIT_FAILURE);
}
+ /* We don't even have to read quit_fd - just knowing that it has data
+ * means the signal handler ran, so we are ready to quit the loop. */
+ if (fds[i].revents & POLLIN)
+ continue;
+
for (i = 0; i < nr_socks; ++i) {
if (fds[i].revents & POLLIN)
accept_connection (fds[i].fd);
--
2.13.6