The next patch wants to add a utility function which will sleep until
an appropriate amount of time has elapsed, except that it will
short-circuit and report failure if the client is going away. The
most obvious kernel interface for waiting for an action or a timeout
is pselect or ppoll, but it requires a file descriptor as the witness
of an action. We already have a pipe-to-self for when nbdkit itself
is quitting because of a signal, but we also want a pipe-to-self for
each multi-threaded connection so that detection of NBD_CMD_DISC or
EOF in one thread can serve as the short circuit for another thread
waiting for a timeout.
Signed-off-by: Eric Blake <eblake(a)redhat.com>
---
server/internal.h | 1 +
server/connections.c | 62 +++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 62 insertions(+), 1 deletion(-)
diff --git a/server/internal.h b/server/internal.h
index df4df0f2..8c229816 100644
--- a/server/internal.h
+++ b/server/internal.h
@@ -152,6 +152,7 @@ struct connection {
pthread_mutex_t write_lock;
pthread_mutex_t status_lock;
int status; /* 1 for more I/O with client, 0 for shutdown, -1 on error */
+ int status_pipe[2]; /* track status changes via poll when nworkers > 1 */
void *crypto_session;
int nworkers;
diff --git a/server/connections.c b/server/connections.c
index f49a74ee..c173df8d 100644
--- a/server/connections.c
+++ b/server/connections.c
@@ -39,8 +39,11 @@
#include <string.h>
#include <unistd.h>
#include <sys/socket.h>
+#include <fcntl.h>
+#include <assert.h>
#include "internal.h"
+#include "utils.h"
/* Default number of parallel requests. */
#define DEFAULT_PARALLEL_REQUESTS 16
@@ -114,8 +117,16 @@ connection_set_status (struct connection *conn, int value)
if (conn->nworkers &&
pthread_mutex_lock (&conn->status_lock))
abort ();
- if (value < conn->status)
+ if (value < conn->status) {
+ if (conn->nworkers && conn->status > 0) {
+ char c = 0;
+
+ assert (conn->status_pipe[1] >= 0);
+ if (write (conn->status_pipe[1], &c, 1) != 1 && errno != EAGAIN)
+ nbdkit_debug ("failed to notify pipe-to-self: %m");
+ }
conn->status = value;
+ }
if (conn->nworkers &&
pthread_mutex_unlock (&conn->status_lock))
abort ();
@@ -284,6 +295,50 @@ new_connection (int sockin, int sockout, int nworkers)
conn->status = 1;
conn->nworkers = nworkers;
+ if (nworkers) {
+#ifdef HAVE_PIPE2
+ if (pipe2 (conn->status_pipe, O_NONBLOCK | O_CLOEXEC)) {
+ perror ("pipe2");
+ free (conn);
+ return NULL;
+ }
+#else
+ /* If we were fully parallel, then this function could be
+ * accepting connections in one thread while another thread could
+ * be in a plugin trying to fork. But plugins.c forced
+ * thread_model to serialize_all_requests when it detects a lack
+ * of atomic CLOEXEC, at which point, we can use a mutex to ensure
+ * we aren't accepting until the plugin is not running, making
+ * non-atomicity okay.
+ */
+ assert (backend->thread_model (backend) <=
+ NBDKIT_THREAD_MODEL_SERIALIZE_ALL_REQUESTS);
+ lock_request (NULL);
+ if (pipe (conn->status_pipe)) {
+ perror ("pipe");
+ free (conn);
+ unlock_request (NULL);
+ return NULL;
+ }
+ if (set_nonblock (set_cloexec (conn->status_pipe[0])) == -1) {
+ perror ("fcntl");
+ close (conn->status_pipe[1]);
+ free (conn);
+ unlock_request (NULL);
+ return NULL;
+ }
+ if (set_nonblock (set_cloexec (conn->status_pipe[1])) == -1) {
+ perror ("fcntl");
+ close (conn->status_pipe[0]);
+ free (conn);
+ unlock_request (NULL);
+ return NULL;
+ }
+ unlock_request (NULL);
+#endif
+ }
+ else
+ conn->status_pipe[0] = conn->status_pipe[1] = -1;
conn->sockin = sockin;
conn->sockout = sockout;
pthread_mutex_init (&conn->request_lock, NULL);
@@ -324,6 +379,11 @@ free_connection (struct connection *conn)
}
}
+ if (conn->status_pipe[0] >= 0) {
+ close (conn->status_pipe[0]);
+ close (conn->status_pipe[1]);
+ }
+
pthread_mutex_destroy (&conn->request_lock);
pthread_mutex_destroy (&conn->read_lock);
pthread_mutex_destroy (&conn->write_lock);
--
2.20.1