[PATCH v2] launch: add support for autodetection of appliance image format
by Pavel Butsykin
This feature allows you to use different image formats for the fixed
appliance. The raw format is used by default.
Signed-off-by: Pavel Butsykin <pbutsykin(a)virtuozzo.com>
---
lib/launch-direct.c | 2 ++
lib/launch-libvirt.c | 19 ++++++++++++-------
m4/guestfs_appliance.m4 | 11 +++++++++++
3 files changed, 25 insertions(+), 7 deletions(-)
diff --git a/lib/launch-direct.c b/lib/launch-direct.c
index 0be662e25..b9b54857a 100644
--- a/lib/launch-direct.c
+++ b/lib/launch-direct.c
@@ -592,7 +592,9 @@ launch_direct (guestfs_h *g, void *datav, const char *arg)
append_list ("id=appliance");
append_list ("cache=unsafe");
append_list ("if=none");
+#ifndef APPLIANCE_FMT_AUTO
append_list ("format=raw");
+#endif
} end_list ();
start_list ("-device") {
append_list ("scsi-hd");
diff --git a/lib/launch-libvirt.c b/lib/launch-libvirt.c
index 4adb2cfb3..030ea6911 100644
--- a/lib/launch-libvirt.c
+++ b/lib/launch-libvirt.c
@@ -212,9 +212,10 @@ get_source_format_or_autodetect (guestfs_h *g, struct drive *drv)
/**
* Create a qcow2 format overlay, with the given C<backing_drive>
- * (file). The C<format> parameter, which must be non-NULL, is the
- * backing file format. This is used to create the appliance overlay,
- * and also for read-only drives.
+ * (file). The C<format> parameter is the backing file format.
+ * The C<format> parameter can be NULL, in this case the backing
+ * format will be determined automatically. This is used to create
+ * the appliance overlay, and also for read-only drives.
*/
static char *
make_qcow2_overlay (guestfs_h *g, const char *backing_drive,
@@ -223,8 +224,6 @@ make_qcow2_overlay (guestfs_h *g, const char *backing_drive,
char *overlay;
struct guestfs_disk_create_argv optargs;
- assert (format != NULL);
-
if (guestfs_int_lazy_make_tmpdir (g) == -1)
return NULL;
@@ -232,8 +231,10 @@ make_qcow2_overlay (guestfs_h *g, const char *backing_drive,
optargs.bitmask = GUESTFS_DISK_CREATE_BACKINGFILE_BITMASK;
optargs.backingfile = backing_drive;
- optargs.bitmask |= GUESTFS_DISK_CREATE_BACKINGFORMAT_BITMASK;
- optargs.backingformat = format;
+ if (format) {
+ optargs.bitmask |= GUESTFS_DISK_CREATE_BACKINGFORMAT_BITMASK;
+ optargs.backingformat = format;
+ }
if (guestfs_disk_create_argv (g, overlay, "qcow2", -1, &optargs) == -1) {
free (overlay);
@@ -461,7 +462,11 @@ launch_libvirt (guestfs_h *g, void *datav, const char *libvirt_uri)
/* Note that appliance can be NULL if using the old-style appliance. */
if (appliance) {
+#ifdef APPLIANCE_FMT_AUTO
+ params.appliance_overlay = make_qcow2_overlay (g, appliance, NULL);
+#else
params.appliance_overlay = make_qcow2_overlay (g, appliance, "raw");
+#endif
if (!params.appliance_overlay)
goto cleanup;
}
diff --git a/m4/guestfs_appliance.m4 b/m4/guestfs_appliance.m4
index 81c43879f..4e1ec8135 100644
--- a/m4/guestfs_appliance.m4
+++ b/m4/guestfs_appliance.m4
@@ -139,3 +139,14 @@ AC_SUBST([GUESTFS_DEFAULT_PATH])
AC_DEFINE_UNQUOTED([GUESTFS_DEFAULT_PATH], ["$GUESTFS_DEFAULT_PATH"],
[Define guestfs default path.])
+
+AC_ARG_ENABLE([appliance-fmt-auto],
+ [AS_HELP_STRING([--enable-appliance-fmt-auto],
+ [enable autodetection of appliance image format @<:@default=no@:>@])],
+ [ENABLE_APPLIANCE_FMT_AUTO="$enableval"],
+ [ENABLE_APPLIANCE_FMT_AUTO=no])
+
+if test "x$ENABLE_APPLIANCE_FMT_AUTO" = "xyes"; then
+ AC_DEFINE([APPLIANCE_FMT_AUTO], [1],
+ [Define to 1 if enabled autodetection of appliance image format.])
+fi
--
2.13.0
4 years, 8 months
1.39 proposal: Let's split up the libguestfs git repo and tarballs
by Richard W.M. Jones
My contention is that the libguestfs git repository is too large and
unwieldy. There are too many separate, unrelated projects and as a
result of that the source has too many dependencies and takes too long
to build and test.
The project divides (sort of) naturally into layers -- the library,
the bindings, the various virt tools -- and could be split along those
lines into separate projects which can then be released and evolve at
their own pace.
My suggested split would be something like this:
* libguestfs: The library, daemon and appliance. That would include
the following directories in a single project:
appliance
bash
contrib
daemon
docs
examples
gnulib
lib
logo
test-tool
tmp
utils
website
* 1 project for each language binding:
csharp
erlang
gobject
golang
haskell
java
lua
ocaml
php
perl
python
ruby
* virt-customize and related tools, we'd probably call this subproject
"virt-builder". It would include virt-builder, virt-customize and
virt-sysprep, since they share a lot of common code.
* 1 project for each of the following items:
small tools written in C
(virt-cat, virt-filesystems, virt-log, virt-ls, virt-tail,
virt-diff, virt-edit, virt-format, guestmount, virt-inspector,
virt-make-fs, virt-rescue)
guestfish
virt-alignment-scan and virt-df
virt-dib
virt-get-kernel
virt-resize
virt-sparsify
virt-v2v and virt-p2v
virt-win-reg
* I'd be inclined to drop the legacy Perl tools virt-tar,
virt-list-filesystems, virt-list-partitions unless someone
especially wished to step forward to maintain them.
* common code and generator: Off to the side we'd somehow need to
package up the common code and the generator for use by all of the
above projects. It wouldn't be a separate project for downstream
packagers, but instead the code would be included (ie. duplicated)
in tarballs and upstream available as a side git repo that you'd
need to include when building (git submodule?). This is somewhat
unspecified.
M4, PO, and tests would be split between the projects as appropriate.
My proposal would be to do this incrementally, rather than all at
once, moving the easier things out first.
Thoughts?
Rich.
--
Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones
Read my programming and virtualization blog: http://rwmj.wordpress.com
libguestfs lets you edit virtual machines. Supports shell scripting,
bindings from many languages. http://libguestfs.org
4 years, 10 months
[PATCH] v2v: rhv-upload-plugin - improve wait logic after finalize (RHBZ#1680361)
by Daniel Erez
After invoking transfer_service.finalize, check operation
status by examining ImageTransferPhase and DiskStatus.
This is done instead of failing after a predefined timeout
regardless the status.
* not verified *
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1680361
---
v2v/rhv-upload-plugin.py | 26 +++++++++++++++++++++-----
1 file changed, 21 insertions(+), 5 deletions(-)
diff --git a/v2v/rhv-upload-plugin.py b/v2v/rhv-upload-plugin.py
index 2a950c5ed..873c11ce1 100644
--- a/v2v/rhv-upload-plugin.py
+++ b/v2v/rhv-upload-plugin.py
@@ -523,14 +523,30 @@ def close(h):
# waiting for the transfer object to cease to exist, which
# falls through to the exception case and then we can
# continue.
- endt = time.time() + timeout
+ start = time.time()
try:
while True:
time.sleep(1)
- tmp = transfer_service.get()
- if time.time() > endt:
- raise RuntimeError("timed out waiting for transfer "
- "to finalize")
+ transfer = transfer_service.get()
+
+ if transfer is None:
+ disk_service = h['disk_service']
+ disk = disk_service.get()
+ if disk.status == types.DiskStatus.OK:
+ continue
+
+ if transfer.phase == types.ImageTransferPhase.FINISHED_SUCCESS:
+ debug("finalized after %s seconds", time.time() - start)
+ break
+
+ if transfer.phase ==
types.ImageTransferPhase.FINALIZING_SUCCESS:
+ if time.time() > start + timeout:
+ raise RuntimeError("timed out waiting for transfer "
+ "to finalize")
+ continue
+
+ raise RuntimeError("Unexpected transfer phase while
finalizing "
+ "upload %r" % transfer.phase)
except sdk.NotFoundError:
pass
--
5 years, 2 months
[libnbd PATCH 0/6] new APIs: aio_in_flight, aio_FOO_notify
by Eric Blake
I still need to wire in the use of *_notify functions into nbdkit to
prove whether it makes the code any faster or easier to maintain, but
at least the added example shows one good use case for the new API.
Eric Blake (6):
api: Add nbd_aio_in_flight
generator: Allow DEAD state actions to run
generator: Allow Int64 in callbacks
states: Prepare for aio notify callback
api: Add new nbd_aio_FOO_notify functions
examples: New example for strict read validations
.gitignore | 1 +
docs/libnbd.pod | 22 +-
examples/Makefile.am | 14 +
examples/batched-read-write.c | 17 +-
examples/strict-structured-reads.c | 270 +++++++++++++
generator/generator | 375 ++++++++++++++++--
generator/states-connect.c | 24 +-
generator/states-issue-command.c | 4 +-
generator/states-magic.c | 6 +-
generator/states-newstyle-opt-export-name.c | 8 +-
generator/states-newstyle-opt-go.c | 20 +-
.../states-newstyle-opt-set-meta-context.c | 24 +-
generator/states-newstyle-opt-starttls.c | 18 +-
.../states-newstyle-opt-structured-reply.c | 10 +-
generator/states-newstyle.c | 6 +-
generator/states-oldstyle.c | 4 +-
generator/states-reply-simple.c | 2 +-
generator/states-reply-structured.c | 58 +--
generator/states-reply.c | 16 +-
generator/states.c | 33 +-
lib/aio.c | 9 +
lib/internal.h | 6 +-
lib/rw.c | 105 ++++-
tests/aio-parallel-load.c | 29 +-
tests/aio-parallel.c | 15 +-
tests/server-death.c | 17 +-
26 files changed, 925 insertions(+), 188 deletions(-)
create mode 100644 examples/strict-structured-reads.c
--
2.20.1
5 years, 2 months
[PATCH libnbd v2 0/4] api: Implement concurrent writer.
by Richard W.M. Jones
v1:
https://www.redhat.com/archives/libguestfs/2019-June/msg00014.html
I pushed a few bits which are uncontroversial. The main
changes since v1 are:
An extra patch removes the want_to_send / check for nbd_aio_is_ready
in examples/threaded-reads-and-writes.c. This logic was wrong since
commit 6af72b87 as was pointed out by Eric in his review. Comments
and structure of examples/concurrent-writes.c has been updated to
match.
Callbacks now return int instead of void. In some cases we ignore
this return value.
I added a lot more commentary in the commit message for the main patch
(now patch 3 in this series).
Rich.
5 years, 3 months
[libnbd] How close are we to declaring a stable API?
by Richard W.M. Jones
As the subject says, how close are we to being able to declare a
stable API for libnbd?
I believe these are the main topics:
* Do we need to have an extra thread for writing? I'm unclear about
whether b92392b717 (which allows the state machine to break during
reply processing) means we definitely don't need threads. I imagine
that two threads doing simultaneous send(2) and recv(2) calls could
still improve performance (eg. having two cores copying skbs from
userspace to and from kernel).
* Should ‘nbd_shutdown’ take an extra parameter to indicate that it
should be delayed until all commands in the queue are retired?
Is there anything else?
We could also consider doing a "soft stable API" release where we bump
the version up to 0.9.x, announce that we're going to make the API
stable soon, have a much higher bar for breaking the API, but don't
actually prevent API breaks in cases where it's necessary.
Rich.
--
Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones
Read my programming and virtualization blog: http://rwmj.wordpress.com
Fedora Windows cross-compiler. Compile Windows programs, test, and
build Windows installers. Over 100 libraries supported.
http://fedoraproject.org/wiki/MinGW
5 years, 3 months
[libnbd PATCH] tests: Enhance errors test
by Eric Blake
Let's check for a quite a few more errors. Among other things, this
adds some coverage for a few things I've patched recently.
---
And these enhancements set me up for my next fix: making NBD_CMD_DISC
prevent future commands.
tests/errors.c | 167 +++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 154 insertions(+), 13 deletions(-)
diff --git a/tests/errors.c b/tests/errors.c
index 99d5820..415c378 100644
--- a/tests/errors.c
+++ b/tests/errors.c
@@ -27,12 +27,40 @@
#include <libnbd.h>
+#define MAXSIZE (65 * 1024 * 1024) /* Oversize on purpose */
+
+static char *progname;
+static char buf[MAXSIZE];
+
+static void
+check (int experr, const char *prefix)
+{
+ const char *msg = nbd_get_error ();
+ int errnum = nbd_get_errno ();
+
+ printf ("error: \"%s\"\n", msg);
+ printf ("errno: %d (%s)\n", errnum, strerror (errnum));
+ if (strncmp (msg, prefix, strlen (prefix)) != 0) {
+ fprintf (stderr, "%s: test failed: missing context prefix: %s\n",
+ progname, msg);
+ exit (EXIT_FAILURE);
+ }
+ if (errnum != experr) {
+ fprintf (stderr, "%s: test failed: "
+ "expected errno = %d (%s), but got %d\n",
+ progname, experr, strerror (experr), errnum);
+ exit (EXIT_FAILURE);
+ }
+}
+
int
main (int argc, char *argv[])
{
struct nbd_handle *nbd;
- const char *msg;
- int errnum;
+ const char *cmd[] = { "nbdkit", "-s", "--exit-with-parent", "memory",
+ "size=128m", NULL };
+
+ progname = argv[0];
nbd = nbd_create ();
if (nbd == NULL) {
@@ -47,23 +75,136 @@ main (int argc, char *argv[])
argv[0]);
exit (EXIT_FAILURE);
}
- msg = nbd_get_error ();
- errnum = nbd_get_errno ();
- printf ("error: \"%s\"\n", msg);
- printf ("errno: %d (%s)\n", errnum, strerror (errnum));
- if (strncmp (msg, "nbd_pread: ", strlen ("nbd_pread: ")) != 0) {
- fprintf (stderr, "%s: test failed: missing context prefix: %s\n",
- argv[0], msg);
+ check (ENOTCONN, "nbd_pread: ");
+
+ /* Request a name that is too long. */
+ memset (buf, 'a', 4999);
+ buf[4999] = '\0';
+ if (nbd_set_export_name (nbd, buf) != -1) {
+ fprintf (stderr, "%s: test failed: "
+ "nbd_set_export_name did not reject large name\n",
+ argv[0]);
+ exit (EXIT_FAILURE);
+ }
+ check (ENAMETOOLONG, "nbd_set_export_name: ");
+
+ /* Poll while there is no fd. */
+ if (nbd_aio_get_fd (nbd) != -1) {
+ fprintf (stderr, "%s: test failed: "
+ "nbd_aio_get_fd did not fail prior to connection\n",
+ argv[0]);
+ }
+ check (EINVAL, "nbd_aio_get_fd: ");
+ if (nbd_poll (nbd, 1000) != -1) {
+ fprintf (stderr, "%s: test failed: "
+ "nbd_poll did not fail prior to connection\n",
+ argv[0]);
+ }
+ check (EINVAL, "nbd_poll: ");
+
+ /* Connect to a working server, then try to connect again. */
+ if (nbd_connect_command (nbd, (char **) cmd) == -1) {
+ fprintf (stderr, "%s: %s\n", argv[0], nbd_get_error ());
+ exit (EXIT_FAILURE);
+ }
+ if (nbd_connect_command (nbd, (char **) cmd) != -1) {
+ fprintf (stderr, "%s: test failed: "
+ "nbd_connect_command did not reject repeat attempt\n",
+ argv[0]);
+ exit (EXIT_FAILURE);
+ }
+ check (EINVAL, "nbd_connect_command: ");
+
+ /* Try to notify that writes are ready when we aren't blocked on POLLOUT */
+ if (nbd_aio_notify_write (nbd) != -1) {
+ fprintf (stderr, "%s: test failed: "
+ "nbd_aio_notify_write in wrong state did not fail\n",
+ argv[0]);
+ exit (EXIT_FAILURE);
+ }
+ check (EINVAL, "nbd_aio_notify_write: ");
+
+ /* Check for status of a bogus handle */
+ if (nbd_aio_command_completed (nbd, 0) != -1) {
+ fprintf (stderr, "%s: test failed: "
+ "nbd_aio_command_completed on bogus handle did not fail\n",
+ argv[0]);
exit (EXIT_FAILURE);
}
- if (errnum != ENOTCONN) {
+ check (EINVAL, "nbd_aio_command_completed: ");
+
+ /* Read from an invalid offset */
+ if (nbd_pread (nbd, NULL, 0, -1, 0) != -1) {
fprintf (stderr, "%s: test failed: "
- "expected errno = ENOTCONN, but got %d\n",
- argv[0], errnum);
+ "nbd_pread did not fail with bogus offset\n",
+ argv[0]);
exit (EXIT_FAILURE);
}
+ check (EINVAL, "nbd_pread: ");
- /* XXX Test some more stuff here. */
+ /* Use unknown command flags */
+ if (nbd_pread (nbd, NULL, 0, 0, -1) != -1) {
+ fprintf (stderr, "%s: test failed: "
+ "nbd_pread did not fail with bogus flags\n",
+ argv[0]);
+ exit (EXIT_FAILURE);
+ }
+ check (EINVAL, "nbd_pread: ");
+
+ /* Check that oversized requests are rejected */
+ if (nbd_pread (nbd, buf, MAXSIZE, 0, 0) != -1) {
+ fprintf (stderr, "%s: test failed: "
+ "nbd_pread did not fail with oversize request\n",
+ argv[0]);
+ exit (EXIT_FAILURE);
+ }
+ check (ERANGE, "nbd_pread: ");
+ if (nbd_aio_pwrite (nbd, buf, MAXSIZE, 0, 0) != -1) {
+ fprintf (stderr, "%s: test failed: "
+ "nbd_aio_pwrite did not fail with oversize request\n",
+ argv[0]);
+ exit (EXIT_FAILURE);
+ }
+ check (ERANGE, "nbd_aio_pwrite: ");
+
+ /* Queue up a write command so large that we block on POLLIN, then queue
+ * multiple disconnects. XXX The last one should fail.
+ */
+ if (nbd_aio_pwrite (nbd, buf, 2 * 1024 * 1024, 0, 0) == -1) {
+ fprintf (stderr, "%s: %s\n", argv[0], nbd_get_error ());
+ exit (EXIT_FAILURE);
+ }
+ if ((nbd_aio_get_direction (nbd) & LIBNBD_AIO_DIRECTION_WRITE) == 0) {
+ fprintf (stderr, "%s: test failed: "
+ "expect to be blocked on write\n",
+ argv[0]);
+ exit (EXIT_FAILURE);
+ }
+ if (nbd_aio_disconnect (nbd, 0) == -1) {
+ fprintf (stderr, "%s: %s\n", argv[0], nbd_get_error ());
+ exit (EXIT_FAILURE);
+ }
+ if (nbd_aio_disconnect (nbd, 0) == -1) { /* XXX */
+ fprintf (stderr, "%s: %s\n", argv[0], nbd_get_error ());
+ exit (EXIT_FAILURE);
+ }
+
+ /* Flush the queue (whether this one fails is a race with how fast
+ * the server shuts down, so don't enforce status), then try to send
+ * another command while CLOSED/DEAD
+ */
+ if (nbd_shutdown (nbd) == -1) {
+ fprintf (stderr, "%s: ignoring %s\n", argv[0], nbd_get_error ());
+ }
+ else
+ fprintf (stderr, "%s: shutdown completed successfully\n", argv[0]);
+ if (nbd_pread (nbd, NULL, 0, 0, 0) != -1) {
+ fprintf (stderr, "%s: test failed: "
+ "nbd_pread did not fail on non-connected handle\n",
+ argv[0]);
+ exit (EXIT_FAILURE);
+ }
+ check (EINVAL, "nbd_pread: ");
nbd_close (nbd);
exit (EXIT_SUCCESS);
--
2.20.1
5 years, 3 months
Guestfish command - "copy-out" not working for symbolic links
by Chintan Patel
HI,
I'm trying to use copu-out command to copy files from remote disk but it's not working if the file is symbolic link.
copy-out giving an error like below.
libguestfs: error: '/etc/resolv.conf' is not a file or directory
This file is there but it's a symbolic link
Do you have any other way to copy remote symbolic link files?
Thanks,
Chintan
5 years, 3 months
[libnbd PATCH] disconnect: Prevent any further commands
by Eric Blake
Once the client has requested NBD_CMD_DISC, the protocol states that
it must not send any further information to the server (further writes
may still be needed for a clean TLS shutdown, but that's a different
matter requiring more states).
Our state machine can prevent some of this if we have moved to CLOSED,
but that's not foolproof because we can queue commands that can't be
written right away and thus not be in CLOSED yet. Solve this by
instead tracking when we queue a disconnect request, and rejecting all
commands after that point even while still allowing replies from the
server for existing in-flight commands.
The protocol also recommends that NBD_CMD_DISC not be sent until there
are no other pending in-flight commands, but at the moment, we place
that burden on the client. Perhaps we should add a knob to
nbd_shutdown and/or add a new API nbd_aio_in_flight returning the
number of in-flight commands, to make things easier?
---
lib/disconnect.c | 6 ++++--
lib/internal.h | 2 ++
lib/rw.c | 5 +++++
tests/errors.c | 9 ++++++---
4 files changed, 17 insertions(+), 5 deletions(-)
diff --git a/lib/disconnect.c b/lib/disconnect.c
index 95e9a37..53de386 100644
--- a/lib/disconnect.c
+++ b/lib/disconnect.c
@@ -29,8 +29,9 @@
int
nbd_unlocked_shutdown (struct nbd_handle *h)
{
- if (nbd_internal_is_state_ready (get_next_state (h)) ||
- nbd_internal_is_state_processing (get_next_state (h))) {
+ if (!h->disconnect_request &&
+ (nbd_internal_is_state_ready (get_next_state (h)) ||
+ nbd_internal_is_state_processing (get_next_state (h)))) {
if (nbd_unlocked_aio_disconnect (h, 0) == -1)
return -1;
}
@@ -57,6 +58,7 @@ nbd_unlocked_aio_disconnect (struct nbd_handle *h, uint32_t flags)
id = nbd_internal_command_common (h, 0, NBD_CMD_DISC, 0, 0, NULL, NULL);
if (id == -1)
return -1;
+ h->disconnect_request = true;
/* This will leave the command on the in-flight list. Is this a
* problem? Probably it isn't. If it is, we could add a flag to
diff --git a/lib/internal.h b/lib/internal.h
index 88ad703..11e0db6 100644
--- a/lib/internal.h
+++ b/lib/internal.h
@@ -191,6 +191,8 @@ struct nbd_handle {
struct command_in_flight *cmds_to_issue, *cmds_in_flight, *cmds_done;
/* Current command during a REPLY cycle */
struct command_in_flight *reply_cmd;
+
+ bool disconnect_request; /* True if we've sent NBD_CMD_DISC */
};
struct meta_context {
diff --git a/lib/rw.c b/lib/rw.c
index 2dc60de..6b57f11 100644
--- a/lib/rw.c
+++ b/lib/rw.c
@@ -163,6 +163,11 @@ nbd_internal_command_common (struct nbd_handle *h,
{
struct command_in_flight *cmd, *prev_cmd;
+ if (h->disconnect_request) {
+ set_error (EINVAL, "cannot request more commands after NBD_CMD_DISC");
+ return -1;
+ }
+
switch (type) {
/* Commands which send or receive data are limited to MAX_REQUEST_SIZE. */
case NBD_CMD_READ:
diff --git a/tests/errors.c b/tests/errors.c
index 415c378..faa1488 100644
--- a/tests/errors.c
+++ b/tests/errors.c
@@ -168,7 +168,7 @@ main (int argc, char *argv[])
check (ERANGE, "nbd_aio_pwrite: ");
/* Queue up a write command so large that we block on POLLIN, then queue
- * multiple disconnects. XXX The last one should fail.
+ * multiple disconnects.
*/
if (nbd_aio_pwrite (nbd, buf, 2 * 1024 * 1024, 0, 0) == -1) {
fprintf (stderr, "%s: %s\n", argv[0], nbd_get_error ());
@@ -184,10 +184,13 @@ main (int argc, char *argv[])
fprintf (stderr, "%s: %s\n", argv[0], nbd_get_error ());
exit (EXIT_FAILURE);
}
- if (nbd_aio_disconnect (nbd, 0) == -1) { /* XXX */
- fprintf (stderr, "%s: %s\n", argv[0], nbd_get_error ());
+ if (nbd_aio_disconnect (nbd, 0) != -1) {
+ fprintf (stderr, "%s: test failed: "
+ "no diagnosis that nbd_aio_disconnect prevents new commands\n",
+ argv[0]);
exit (EXIT_FAILURE);
}
+ check (EINVAL, "nbd_aio_disconnect: ");
/* Flush the queue (whether this one fails is a race with how fast
* the server shuts down, so don't enforce status), then try to send
--
2.20.1
5 years, 3 months