On Tue, Jul 13, 2021 at 11:26:05PM +0200, Martin Kletzander wrote:
This is the most trivial way to fix the issue with macOS not having
SOCK_CLOEXEC
and SOCK_NONBLOCK. There is not much better way, so this is the only way to
make it work on such platform(s).
Signed-off-by: Martin Kletzander <mkletzan(a)redhat.com>
---
+int nbd_internal_socket(int domain,
+ int type,
+ int protocol,
+ bool nonblock)
+{
+ int fd;
+
+#ifdef __APPLE__
Feels too specific; better might be #ifndef SOCK_CLOEXEC.
+ int flags;
+#else
+ type |= SOCK_CLOEXEC;
+ if (nonblock)
+ type |= SOCK_NONBLOCK;
+#endif
+
+ fd = socket (domain, type, protocol);
+
+#ifdef __APPLE__
and again
+ if (fd == -1)
+ return -1;
+
+ if (fcntl (fd, F_SETFD, FD_CLOEXEC) == -1) {
+ close(fd);
+ return -1;
I'd add a comment documenting that this is a known race when libnbd is
used in a multithreaded program, but that we can't work around it
without Apple fixing their bug.
+ }
+
+ if (nonblock) {
+ flags = fcntl (fd, F_GETFL, 0);
+ if (flags == -1 ||
+ fcntl (fd, F_SETFL, flags|O_NONBLOCK) == -1) {
+ close(fd);
+ return -1;
+ }
This one is not a race, merely an inconvenience.
+ }
+#endif
+
+ return fd;
+}
+
+int
+nbd_internal_socketpair (int domain, int type, int protocol, int *fds)
+{
+ int ret;
+
+#ifdef __APPLE__
and again
+ size_t i;
+#else
+ type |= SOCK_CLOEXEC;
+#endif
+
+ ret = socketpair (domain, type, protocol, fds);
+
+#ifdef __APPLE__
+ if (ret == 0) {
+ for (i = 0; i < 2; i++) {
+ if (fcntl (fds[i], F_SETFD, FD_CLOEXEC) == -1) {
+ close(fds[0]);
+ close(fds[1]);
+ return -1;
+ }
+ }
+ }
Do we care about the value of errno left in place after this function
fails? Right now, we aren't very careful about what gets left there,
but if none of the callers care, we're okay.
+#endif
+
+ return ret;
+}
diff --git a/fuzzing/libnbd-fuzz-wrapper.c b/fuzzing/libnbd-fuzz-wrapper.c
index 99a6d803258f..3d127e673e9e 100644
--- a/fuzzing/libnbd-fuzz-wrapper.c
+++ b/fuzzing/libnbd-fuzz-wrapper.c
@@ -41,6 +41,34 @@
static void client (int s);
static void server (int fd, int s);
+static int
+get_socketpair (int domain, int type, int protocol, int *fds)
+{
+ int ret;
+
+#ifdef __APPLE__
again for the #ifdef witness
+ size_t i;
+#else
+ type |= SOCK_CLOEXEC;
+#endif
+
+ ret = socketpair (domain, type, protocol, fds);
+
+#ifdef __APPLE__
+ if (ret == 0) {
+ for (i = 0; i < 2; i++) {
+ if (fcntl (fds[i], F_SETFD, FD_CLOEXEC) == -1) {
+ close(fds[0]);
+ close(fds[1]);
+ return -1;
+ }
+ }
+ }
Is the fuzzer multithreaded? Or can we just blindly do the workaround
ourselves and not worry about trying to use SOCK_CLOEXEC here? Also, it feels like some
duplication; can we not reuse nbd_internal_socketpair from the fuzzer?
--
Eric Blake, Principal Software Engineer
Red Hat, Inc. +1-919-301-3266
Virtualization:
qemu.org |
libvirt.org