The guestfs__launch code does many different things at once.
To enable code reuse in later patches, pull out some reusable
bits of functionality into static help methods
- guestfs__connect_tcp() wait for a connection from a
QEMU slirp client over a TCP socket
- guestfs__connect_unix() connect to a UNIX domain socket
server running in QEMU
- guestfs__connect_handshake() perform the initial
handshake with the guest daemon.
---
src/guestfs.c | 240 +++++++++++++++++++++++++++++++++------------------------
1 files changed, 138 insertions(+), 102 deletions(-)
diff --git a/src/guestfs.c b/src/guestfs.c
index 1439361..6e9947f 100644
--- a/src/guestfs.c
+++ b/src/guestfs.c
@@ -935,6 +935,141 @@ static void print_cmdline (guestfs_h *g);
static const char *kernel_name = "vmlinuz." REPO "." host_cpu;
static const char *initrd_name = "initramfs." REPO "." host_cpu
".img";
+/* Null vmchannel implementation: We listen on g->sock for a
+ * connection. The connection could come from any local process
+ * so we must check it comes from the appliance (or at least
+ * from our UID) for security reasons.
+ */
+int
+guestfs__connect_tcp (guestfs_h *g, int null_vmchannel_sock)
+{
+ int sock = -1;
+ uid_t uid;
+
+ while (sock == -1) {
+ sock = accept_from_daemon (g);
+ if (sock == -1)
+ goto cleanup1;
+
+ if (check_peer_euid (g, sock, &uid) == -1)
+ goto cleanup1;
+ if (uid != geteuid ()) {
+ fprintf (stderr,
+ "libguestfs: warning: unexpected connection from UID %d to port %d\n",
+ uid, null_vmchannel_sock);
+ close (sock);
+ sock = -1;
+ continue;
+ }
+ }
+
+ if (fcntl (sock, F_SETFL, O_NONBLOCK) == -1) {
+ perrorf (g, "fcntl");
+ goto cleanup1;
+ }
+
+ close (g->sock);
+ g->sock = sock;
+
+ g->state = LAUNCHING;
+ return 0;
+
+ cleanup1:
+ return -1;
+}
+
+
+/* Other vmchannel. Open the Unix socket.
+ *
+ * The vmchannel implementation that got merged with qemu sucks in
+ * a number of ways. Both ends do connect(2), which means that no
+ * one knows what, if anything, is connected to the other end, or
+ * if it becomes disconnected. Even worse, we have to wait some
+ * indeterminate time for qemu to create the socket and connect to
+ * it (which happens very early in qemu's start-up), so any code
+ * that uses vmchannel is inherently racy. Hence this silly loop.
+ */
+int
+guestfs__connect_unix(guestfs_h *g, const char *unixsock)
+{
+ struct sockaddr_un addr;
+
+ g->sock = socket (AF_UNIX, SOCK_STREAM, 0);
+ if (g->sock == -1) {
+ perrorf (g, "socket");
+ goto cleanup1;
+ }
+
+ if (fcntl (g->sock, F_SETFL, O_NONBLOCK) == -1) {
+ perrorf (g, "fcntl");
+ goto cleanup1;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ strncpy (addr.sun_path, unixsock, UNIX_PATH_MAX);
+ addr.sun_path[UNIX_PATH_MAX-1] = '\0';
+
+ int tries = 100;
+ /* Always sleep at least once to give qemu a small chance to start up. */
+ usleep (10000);
+ while (tries > 0) {
+ int r = connect (g->sock, (struct sockaddr *) &addr, sizeof addr);
+ if ((r == -1 && errno == EINPROGRESS) || r == 0)
+ goto connected;
+
+ if (errno != ENOENT)
+ perrorf (g, "connect");
+ tries--;
+ usleep (100000);
+ }
+
+ error (g, _("failed to connect to vmchannel socket"));
+ goto cleanup1;
+
+ connected: ;
+ g->state = LAUNCHING;
+ return 0;
+
+ cleanup1:
+ return -1;
+}
+
+
+/* Wait for qemu to start and to connect back to us via vmchannel and
+ * send the GUESTFS_LAUNCH_FLAG message.
+ */
+int
+guestfs__connect_handshake(guestfs_h *g)
+{
+ uint32_t size;
+ void *buf = NULL;
+ int r = recv_from_daemon (g, &size, &buf);
+ free (buf);
+
+ if (r == -1) return -1;
+
+ if (size != GUESTFS_LAUNCH_FLAG) {
+ error (g, _("guestfs_launch failed, see earlier error messages"));
+ return -1;
+ }
+
+ if (g->verbose)
+ print_timestamped_message (g, "appliance is up");
+
+ /* This is possible in some really strange situations, such as
+ * guestfsd starts up OK but then qemu immediately exits. Check for
+ * it because the caller is probably expecting to be able to send
+ * commands after this function returns.
+ */
+ if (g->state != READY) {
+ error (g, _("qemu launched and contacted daemon, but state != READY"));
+ return -1;
+ }
+
+ return 0;
+}
+
int
guestfs__launch (guestfs_h *g)
{
@@ -948,7 +1083,6 @@ guestfs__launch (guestfs_h *g)
char *kernel = NULL, *initrd = NULL;
int null_vmchannel_sock;
char unixsock[256];
- struct sockaddr_un addr;
/* Configured? */
if (!g->cmdline) {
@@ -1405,113 +1539,15 @@ guestfs__launch (guestfs_h *g)
}
if (null_vmchannel_sock) {
- int sock = -1;
- uid_t uid;
-
- /* Null vmchannel implementation: We listen on g->sock for a
- * connection. The connection could come from any local process
- * so we must check it comes from the appliance (or at least
- * from our UID) for security reasons.
- */
- while (sock == -1) {
- sock = accept_from_daemon (g);
- if (sock == -1)
- goto cleanup1;
-
- if (check_peer_euid (g, sock, &uid) == -1)
- goto cleanup1;
- if (uid != geteuid ()) {
- fprintf (stderr,
- "libguestfs: warning: unexpected connection from UID %d to port
%d\n",
- uid, null_vmchannel_sock);
- close (sock);
- sock = -1;
- continue;
- }
- }
-
- if (fcntl (sock, F_SETFL, O_NONBLOCK) == -1) {
- perrorf (g, "fcntl");
+ if (guestfs__connect_tcp (g, null_vmchannel_sock) < 0)
goto cleanup1;
- }
-
- close (g->sock);
- g->sock = sock;
} else {
- /* Other vmchannel. Open the Unix socket.
- *
- * The vmchannel implementation that got merged with qemu sucks in
- * a number of ways. Both ends do connect(2), which means that no
- * one knows what, if anything, is connected to the other end, or
- * if it becomes disconnected. Even worse, we have to wait some
- * indeterminate time for qemu to create the socket and connect to
- * it (which happens very early in qemu's start-up), so any code
- * that uses vmchannel is inherently racy. Hence this silly loop.
- */
- g->sock = socket (AF_UNIX, SOCK_STREAM, 0);
- if (g->sock == -1) {
- perrorf (g, "socket");
+ if (guestfs__connect_unix (g, unixsock) < 0)
goto cleanup1;
- }
-
- if (fcntl (g->sock, F_SETFL, O_NONBLOCK) == -1) {
- perrorf (g, "fcntl");
- goto cleanup1;
- }
-
- addr.sun_family = AF_UNIX;
- strncpy (addr.sun_path, unixsock, UNIX_PATH_MAX);
- addr.sun_path[UNIX_PATH_MAX-1] = '\0';
-
- tries = 100;
- /* Always sleep at least once to give qemu a small chance to start up. */
- usleep (10000);
- while (tries > 0) {
- r = connect (g->sock, (struct sockaddr *) &addr, sizeof addr);
- if ((r == -1 && errno == EINPROGRESS) || r == 0)
- goto connected;
-
- if (errno != ENOENT)
- perrorf (g, "connect");
- tries--;
- usleep (100000);
- }
-
- error (g, _("failed to connect to vmchannel socket"));
- goto cleanup1;
-
- connected: ;
}
- g->state = LAUNCHING;
-
- /* Wait for qemu to start and to connect back to us via vmchannel and
- * send the GUESTFS_LAUNCH_FLAG message.
- */
- uint32_t size;
- void *buf = NULL;
- r = recv_from_daemon (g, &size, &buf);
- free (buf);
-
- if (r == -1) return -1;
-
- if (size != GUESTFS_LAUNCH_FLAG) {
- error (g, _("guestfs_launch failed, see earlier error messages"));
- goto cleanup1;
- }
-
- if (g->verbose)
- print_timestamped_message (g, "appliance is up");
-
- /* This is possible in some really strange situations, such as
- * guestfsd starts up OK but then qemu immediately exits. Check for
- * it because the caller is probably expecting to be able to send
- * commands after this function returns.
- */
- if (g->state != READY) {
- error (g, _("qemu launched and contacted daemon, but state != READY"));
+ if (guestfs__connect_handshake (g) < 0)
goto cleanup1;
- }
return 0;
--
1.6.6.1