Similar to the previous patch in caching size, we want to avoid
calling into the plugin more than once per connection on any of the
flag determination callbacks.
The following script demonstrates the speedup, where we avoid repeated
calls into a slow can_fua. Pre-patch:
$ cat script
case "$1" in
get_size) echo 1m;;
can_fua) sleep 1; echo native;;
can_write | can_zero | pwrite | zero) ;;
*) exit 2 ;;
esac
$ /bin/time -f %e ./nbdkit --filter=blocksize sh script maxlen=4k \
--run 'qemu-io -f raw -c "w -z -f 0 16k" $nbd'
wrote 16384/16384 bytes at offset 0
16 KiB, 1 ops; 0:00:05.07 (3.157 KiB/sec and 0.1973 ops/sec)
6.14
there are six executions (one to determine eflags, one by the
blocksize filter to determine whether to pass the flag down or call
flush, and four by each .zero call over the split range from
blocksize). After:
$ /bin/time -f %e ./nbdkit --filter=blocksize sh script maxlen=4k \
--run 'qemu-io -f raw -c "w -z -f 0 16k" $nbd'
wrote 16384/16384 bytes at offset 0
16 KiB, 1 ops; 00.03 sec (585.570 KiB/sec and 36.5981 ops/sec)
1.13
we've reduced things to a single call, and everything else using the
cache.
Note that the cache of can_zero answers the filter semantics (whether
we advertise zero support in eflags) and not the plugin semantics
(whether we should attempt .zero or just automatically fall back to
.pwrite); that will be improved in the next patch.
In the cow filter, adding a call to next_ops->can_cache in .prepare
means that .cache no longer has to worry about can_cache failing (the
can_FOO methods do not guarantee easy access to a sane errno value).
Signed-off-by: Eric Blake <eblake(a)redhat.com>
---
docs/nbdkit-filter.pod | 9 +--
docs/nbdkit-plugin.pod | 6 +-
server/internal.h | 19 +++---
server/backend.c | 75 ++++++++++++++++++++---
server/connections.c | 3 +-
server/plugins.c | 7 ++-
server/protocol-handshake.c | 87 +++++++++++----------------
server/protocol.c | 110 +++++++++++++++++++++-------------
filters/blocksize/blocksize.c | 2 -
filters/cache/cache.c | 1 -
filters/cow/cow.c | 16 +++--
11 files changed, 204 insertions(+), 131 deletions(-)
diff --git a/docs/nbdkit-filter.pod b/docs/nbdkit-filter.pod
index 1e2fe99c..3333d6b5 100644
--- a/docs/nbdkit-filter.pod
+++ b/docs/nbdkit-filter.pod
@@ -415,10 +415,11 @@ on to only the final sub-request, or by dropping the flag and
ending
with a direct call to C<next_ops-E<gt>flush>).
If there is an error, the callback should call C<nbdkit_error> with an
-error message and return C<-1>. If these functions are called more
-than once for the same connection, they should return the same value;
-similarly, the filter may cache the results of each counterpart in
-C<next_ops> for a given connection rather than repeating calls.
+error message and return C<-1>. These functions are only called once
+per connection and cached by nbdkit. Similarly, repeated calls to any
+of the C<next_ops> counterparts will return a cached value; by calling
+into the plugin during C<.prepare>, you can ensure that later use of
+the cached values during data commands like <.pwrite> will not fail.
=head2 C<.pread>
diff --git a/docs/nbdkit-plugin.pod b/docs/nbdkit-plugin.pod
index bc3d9749..17239a5c 100644
--- a/docs/nbdkit-plugin.pod
+++ b/docs/nbdkit-plugin.pod
@@ -150,10 +150,8 @@ A new client has connected.
These are called during option negotiation with the client, but before
any data is served. These callbacks may return different values
-across different C<.open> calls, but within a single connection, must
-always return the same value; other code in nbdkit may cache the
-per-connection value returned rather than using the callback a second
-time.
+across different C<.open> calls, but within a single connection, they
+are called at most once and cached by nbdkit for that connection.
=item C<.pread>, C<.pwrite> and other data serving callbacks
diff --git a/server/internal.h b/server/internal.h
index ec8a894c..ddb79623 100644
--- a/server/internal.h
+++ b/server/internal.h
@@ -152,6 +152,15 @@ struct b_conn_handle {
void *handle;
uint64_t exportsize;
+ int can_write;
+ int can_flush;
+ int is_rotational;
+ int can_trim;
+ int can_zero;
+ int can_fua;
+ int can_multi_conn;
+ int can_cache;
+ int can_extents;
};
struct connection {
@@ -169,16 +178,6 @@ struct connection {
uint32_t cflags;
uint16_t eflags;
- bool readonly;
- bool can_flush;
- bool is_rotational;
- bool can_trim;
- bool can_zero;
- bool can_fua;
- bool can_multi_conn;
- bool can_cache;
- bool emulate_cache;
- bool can_extents;
bool using_tls;
bool structured_replies;
bool meta_context_base_allocation;
diff --git a/server/backend.c b/server/backend.c
index 374d8540..196b48e4 100644
--- a/server/backend.c
+++ b/server/backend.c
@@ -144,73 +144,130 @@ backend_get_size (struct backend *b, struct connection *conn)
int
backend_can_write (struct backend *b, struct connection *conn)
{
+ struct b_conn_handle *h = &conn->handles[b->i];
+
debug ("%s: can_write", b->name);
- return b->can_write (b, conn);
+ if (h->can_write == -1)
+ h->can_write = b->can_write (b, conn);
+ return h->can_write;
}
int
backend_can_flush (struct backend *b, struct connection *conn)
{
+ struct b_conn_handle *h = &conn->handles[b->i];
+
debug ("%s: can_flush", b->name);
- return b->can_flush (b, conn);
+ if (h->can_flush == -1)
+ h->can_flush = b->can_flush (b, conn);
+ return h->can_flush;
}
int
backend_is_rotational (struct backend *b, struct connection *conn)
{
+ struct b_conn_handle *h = &conn->handles[b->i];
+
debug ("%s: is_rotational", b->name);
- return b->is_rotational (b, conn);
+ if (h->is_rotational == -1)
+ h->is_rotational = b->is_rotational (b, conn);
+ return h->is_rotational;
}
int
backend_can_trim (struct backend *b, struct connection *conn)
{
+ struct b_conn_handle *h = &conn->handles[b->i];
+ int r;
+
debug ("%s: can_trim", b->name);
- return b->can_trim (b, conn);
+ if (h->can_trim == -1) {
+ r = backend_can_write (b, conn);
+ if (r != 1) {
+ h->can_trim = 0;
+ return r;
+ }
+ h->can_trim = b->can_trim (b, conn);
+ }
+ return h->can_trim;
}
int
backend_can_zero (struct backend *b, struct connection *conn)
{
+ struct b_conn_handle *h = &conn->handles[b->i];
+ int r;
+
debug ("%s: can_zero", b->name);
- return b->can_zero (b, conn);
+ if (h->can_zero == -1) {
+ r = backend_can_write (b, conn);
+ if (r != 1) {
+ h->can_zero = 0;
+ return r;
+ }
+ h->can_zero = b->can_zero (b, conn);
+ }
+ return h->can_zero;
}
int
backend_can_extents (struct backend *b, struct connection *conn)
{
+ struct b_conn_handle *h = &conn->handles[b->i];
+
debug ("%s: can_extents", b->name);
- return b->can_extents (b, conn);
+ if (h->can_extents == -1)
+ h->can_extents = b->can_extents (b, conn);
+ return h->can_extents;
}
int
backend_can_fua (struct backend *b, struct connection *conn)
{
+ struct b_conn_handle *h = &conn->handles[b->i];
+ int r;
+
debug ("%s: can_fua", b->name);
- return b->can_fua (b, conn);
+ if (h->can_fua == -1) {
+ r = backend_can_write (b, conn);
+ if (r != 1) {
+ h->can_fua = NBDKIT_FUA_NONE;
+ return r;
+ }
+ h->can_fua = b->can_fua (b, conn);
+ }
+ return h->can_fua;
}
int
backend_can_multi_conn (struct backend *b, struct connection *conn)
{
+ struct b_conn_handle *h = &conn->handles[b->i];
+
debug ("%s: can_multi_conn", b->name);
- return b->can_multi_conn (b, conn);
+ if (h->can_multi_conn == -1)
+ h->can_multi_conn = b->can_multi_conn (b, conn);
+ return h->can_multi_conn;
}
int
backend_can_cache (struct backend *b, struct connection *conn)
{
+ struct b_conn_handle *h = &conn->handles[b->i];
+
debug ("%s: can_cache", b->name);
- return b->can_cache (b, conn);
+ if (h->can_cache == -1)
+ h->can_cache = b->can_cache (b, conn);
+ return h->can_cache;
}
int
diff --git a/server/connections.c b/server/connections.c
index 2a5150cd..7609e9a7 100644
--- a/server/connections.c
+++ b/server/connections.c
@@ -274,8 +274,9 @@ new_connection (int sockin, int sockout, int nworkers)
return NULL;
}
conn->nr_handles = backend->i + 1;
+ memset (conn->handles, -1, conn->nr_handles * sizeof *conn->handles);
for_each_backend (b)
- conn->handles[b->i].exportsize = -1;
+ conn->handles[b->i].handle = NULL;
conn->status = 1;
conn->nworkers = nworkers;
diff --git a/server/plugins.c b/server/plugins.c
index d1654f8d..c8f4af90 100644
--- a/server/plugins.c
+++ b/server/plugins.c
@@ -509,7 +509,7 @@ plugin_pwrite (struct backend *b, struct connection *conn,
assert (connection_get_handle (conn, 0));
- if (fua && plugin_can_fua (b, conn) != NBDKIT_FUA_NATIVE) {
+ if (fua && backend_can_fua (b, conn) != NBDKIT_FUA_NATIVE) {
flags &= ~NBDKIT_FLAG_FUA;
need_flush = true;
}
@@ -541,7 +541,7 @@ plugin_trim (struct backend *b, struct connection *conn,
assert (connection_get_handle (conn, 0));
- if (fua && plugin_can_fua (b, conn) != NBDKIT_FUA_NATIVE) {
+ if (fua && backend_can_fua (b, conn) != NBDKIT_FUA_NATIVE) {
flags &= ~NBDKIT_FLAG_FUA;
need_flush = true;
}
@@ -574,13 +574,14 @@ plugin_zero (struct backend *b, struct connection *conn,
assert (connection_get_handle (conn, 0));
- if (fua && plugin_can_fua (b, conn) != NBDKIT_FUA_NATIVE) {
+ if (fua && backend_can_fua (b, conn) != NBDKIT_FUA_NATIVE) {
flags &= ~NBDKIT_FLAG_FUA;
need_flush = true;
}
if (!count)
return 0;
if (p->plugin.can_zero) {
+ /* Calling backend_can_zero would answer the wrong question */
can_zero = p->plugin.can_zero (connection_get_handle (conn, 0));
assert (can_zero != -1);
}
diff --git a/server/protocol-handshake.c b/server/protocol-handshake.c
index 4d12b3dc..16261c34 100644
--- a/server/protocol-handshake.c
+++ b/server/protocol-handshake.c
@@ -51,87 +51,72 @@ protocol_compute_eflags (struct connection *conn, uint16_t *flags)
{
uint16_t eflags = NBD_FLAG_HAS_FLAGS;
int fl;
+ bool can_write = true;
fl = backend_can_write (backend, conn);
if (fl == -1)
return -1;
if (readonly || !fl) {
eflags |= NBD_FLAG_READ_ONLY;
- conn->readonly = true;
+ can_write = false;
}
- if (!conn->readonly) {
- fl = backend_can_zero (backend, conn);
- if (fl == -1)
- return -1;
- if (fl) {
- eflags |= NBD_FLAG_SEND_WRITE_ZEROES;
- conn->can_zero = true;
- }
- fl = backend_can_trim (backend, conn);
- if (fl == -1)
- return -1;
- if (fl) {
- eflags |= NBD_FLAG_SEND_TRIM;
- conn->can_trim = true;
- }
+ /* Check all flags even if they won't be advertised, to prime the
+ * cache and thus simplify later EINVAL handling of a client that
+ * makes a non-compliant request that did not match eflags.
+ */
+ fl = backend_can_zero (backend, conn);
+ if (fl == -1)
+ return -1;
+ if (fl && can_write)
+ eflags |= NBD_FLAG_SEND_WRITE_ZEROES;
- fl = backend_can_fua (backend, conn);
- if (fl == -1)
- return -1;
- if (fl) {
- eflags |= NBD_FLAG_SEND_FUA;
- conn->can_fua = true;
- }
- }
+ fl = backend_can_trim (backend, conn);
+ if (fl == -1)
+ return -1;
+ if (fl && can_write)
+ eflags |= NBD_FLAG_SEND_TRIM;
+
+ fl = backend_can_fua (backend, conn);
+ if (fl == -1)
+ return -1;
+ if (fl && can_write)
+ eflags |= NBD_FLAG_SEND_FUA;
fl = backend_can_flush (backend, conn);
if (fl == -1)
return -1;
- if (fl) {
+ if (fl)
eflags |= NBD_FLAG_SEND_FLUSH;
- conn->can_flush = true;
- }
fl = backend_is_rotational (backend, conn);
if (fl == -1)
return -1;
- if (fl) {
+ if (fl)
eflags |= NBD_FLAG_ROTATIONAL;
- conn->is_rotational = true;
- }
- /* multi-conn is useless if parallel connections are not allowed */
- if (backend->thread_model (backend) >
- NBDKIT_THREAD_MODEL_SERIALIZE_CONNECTIONS) {
- fl = backend_can_multi_conn (backend, conn);
- if (fl == -1)
- return -1;
- if (fl) {
- eflags |= NBD_FLAG_CAN_MULTI_CONN;
- conn->can_multi_conn = true;
- }
- }
+ /* multi-conn is useless if parallel connections are not allowed. */
+ fl = backend_can_multi_conn (backend, conn);
+ if (fl == -1)
+ return -1;
+ if (fl && (backend->thread_model (backend) >
+ NBDKIT_THREAD_MODEL_SERIALIZE_CONNECTIONS))
+ eflags |= NBD_FLAG_CAN_MULTI_CONN;
fl = backend_can_cache (backend, conn);
if (fl == -1)
return -1;
- if (fl) {
+ if (fl)
eflags |= NBD_FLAG_SEND_CACHE;
- conn->can_cache = true;
- conn->emulate_cache = fl == NBDKIT_CACHE_EMULATE;
- }
- /* The result of this is not returned to callers here (or at any
- * time during the handshake). However it makes sense to do it once
- * per connection and store the result in the handle anyway. This
- * protocol_compute_eflags function is a bit misnamed XXX.
+ /* The result of this is not directly advertised as part of the
+ * handshake, but priming the cache here makes BLOCK_STATUS handling
+ * not have to worry about errors, and makes test-layers easier to
+ * write.
*/
fl = backend_can_extents (backend, conn);
if (fl == -1)
return -1;
- if (fl)
- conn->can_extents = true;
if (conn->structured_replies)
eflags |= NBD_FLAG_SEND_DF;
diff --git a/server/protocol.c b/server/protocol.c
index 06f1ee15..0ecf0b5c 100644
--- a/server/protocol.c
+++ b/server/protocol.c
@@ -64,14 +64,19 @@ validate_request (struct connection *conn,
uint16_t cmd, uint16_t flags, uint64_t offset, uint32_t count,
uint32_t *error)
{
+ int r;
+
/* Readonly connection? */
- if (conn->readonly &&
- (cmd == NBD_CMD_WRITE || cmd == NBD_CMD_TRIM ||
- cmd == NBD_CMD_WRITE_ZEROES)) {
- nbdkit_error ("invalid request: %s: write request on readonly connection",
- name_of_nbd_cmd (cmd));
- *error = EROFS;
- return false;
+ if (cmd == NBD_CMD_WRITE || cmd == NBD_CMD_TRIM ||
+ cmd == NBD_CMD_WRITE_ZEROES) {
+ r = backend_can_write (backend, conn);
+ assert (r >= 0); /* Guaranteed by eflags computation */
+ if (!r) {
+ nbdkit_error ("invalid request: %s: write request on readonly
connection",
+ name_of_nbd_cmd (cmd));
+ *error = EROFS;
+ return false;
+ }
}
/* Validate cmd, offset, count. */
@@ -142,10 +147,14 @@ validate_request (struct connection *conn,
*error = EINVAL;
return false;
}
- if (!conn->can_fua && (flags & NBD_CMD_FLAG_FUA)) {
- nbdkit_error ("invalid request: FUA flag not supported");
- *error = EINVAL;
- return false;
+ if (flags & NBD_CMD_FLAG_FUA) {
+ r = backend_can_fua (backend, conn);
+ assert (r >= 0); /* Guaranteed by eflags computation */
+ if (!r) {
+ nbdkit_error ("invalid request: FUA flag not supported");
+ *error = EINVAL;
+ return false;
+ }
}
/* Refuse over-large read and write requests. */
@@ -159,35 +168,51 @@ validate_request (struct connection *conn,
}
/* Flush allowed? */
- if (!conn->can_flush && cmd == NBD_CMD_FLUSH) {
- nbdkit_error ("invalid request: %s: flush operation not supported",
- name_of_nbd_cmd (cmd));
- *error = EINVAL;
- return false;
+ if (cmd == NBD_CMD_FLUSH) {
+ r = backend_can_flush (backend, conn);
+ assert (r >= 0); /* Guaranteed by eflags computation */
+ if (!r) {
+ nbdkit_error ("invalid request: %s: flush operation not supported",
+ name_of_nbd_cmd (cmd));
+ *error = EINVAL;
+ return false;
+ }
}
/* Trim allowed? */
- if (!conn->can_trim && cmd == NBD_CMD_TRIM) {
- nbdkit_error ("invalid request: %s: trim operation not supported",
- name_of_nbd_cmd (cmd));
- *error = EINVAL;
- return false;
+ if (cmd == NBD_CMD_TRIM) {
+ r = backend_can_trim (backend, conn);
+ assert (r >= 0); /* Guaranteed by eflags computation */
+ if (!r) {
+ nbdkit_error ("invalid request: %s: trim operation not supported",
+ name_of_nbd_cmd (cmd));
+ *error = EINVAL;
+ return false;
+ }
}
/* Zero allowed? */
- if (!conn->can_zero && cmd == NBD_CMD_WRITE_ZEROES) {
- nbdkit_error ("invalid request: %s: write zeroes operation not supported",
- name_of_nbd_cmd (cmd));
- *error = EINVAL;
- return false;
+ if (cmd == NBD_CMD_WRITE_ZEROES) {
+ r = backend_can_zero (backend, conn);
+ assert (r >= 0); /* Guaranteed by eflags computation */
+ if (!r) {
+ nbdkit_error ("invalid request: %s: write zeroes operation not
supported",
+ name_of_nbd_cmd (cmd));
+ *error = EINVAL;
+ return false;
+ }
}
/* Cache allowed? */
- if (!conn->can_cache && cmd == NBD_CMD_CACHE) {
- nbdkit_error ("invalid request: %s: cache operation not supported",
- name_of_nbd_cmd (cmd));
- *error = EINVAL;
- return false;
+ if (cmd == NBD_CMD_CACHE) {
+ r = backend_can_cache (backend, conn);
+ assert (r >= 0); /* Guaranteed by eflags computation */
+ if (!r) {
+ nbdkit_error ("invalid request: %s: cache operation not supported",
+ name_of_nbd_cmd (cmd));
+ *error = EINVAL;
+ return false;
+ }
}
/* Block status allowed? */
@@ -232,8 +257,8 @@ handle_request (struct connection *conn,
void *buf, struct nbdkit_extents *extents)
{
uint32_t f = 0;
- bool fua = conn->can_fua && (flags & NBD_CMD_FLAG_FUA);
int err = 0;
+ int r;
/* Clear the error, so that we know if the plugin calls
* nbdkit_set_error() or relied on errno. */
@@ -246,7 +271,7 @@ handle_request (struct connection *conn,
break;
case NBD_CMD_WRITE:
- if (fua)
+ if (flags & NBD_CMD_FLAG_FUA)
f |= NBDKIT_FLAG_FUA;
if (backend_pwrite (backend, conn, buf, count, offset, f, &err) == -1)
return err;
@@ -258,14 +283,16 @@ handle_request (struct connection *conn,
break;
case NBD_CMD_TRIM:
- if (fua)
+ if (flags & NBD_CMD_FLAG_FUA)
f |= NBDKIT_FLAG_FUA;
if (backend_trim (backend, conn, count, offset, f, &err) == -1)
return err;
break;
case NBD_CMD_CACHE:
- if (conn->emulate_cache) {
+ r = backend_can_cache (backend, conn);
+ assert (r > 0); /* Guaranteed by validate_request */
+ if (r == NBDKIT_CACHE_EMULATE) {
static char buf[MAX_REQUEST_SIZE]; /* data sink, never read */
uint32_t limit;
@@ -284,7 +311,7 @@ handle_request (struct connection *conn,
case NBD_CMD_WRITE_ZEROES:
if (!(flags & NBD_CMD_FLAG_NO_HOLE))
f |= NBDKIT_FLAG_MAY_TRIM;
- if (fua)
+ if (flags & NBD_CMD_FLAG_FUA)
f |= NBDKIT_FLAG_FUA;
if (backend_zero (backend, conn, count, offset, f, &err) == -1)
return err;
@@ -293,10 +320,13 @@ handle_request (struct connection *conn,
case NBD_CMD_BLOCK_STATUS:
/* The other backend methods don't check can_*. That is because
* those methods are implicitly suppressed by returning eflags to
- * the client. However there is no eflag for extents so we must
- * check it here.
+ * the client (see validate_request), but there is no eflag for
+ * extents. We did prime the cache earlier, but must check here
+ * in order to perform a fallback when needed.
*/
- if (conn->can_extents) {
+ r = backend_can_extents (backend, conn);
+ assert (r >= 0); /* Guaranteed during eflags computation */
+ if (r) {
if (flags & NBD_CMD_FLAG_REQ_ONE)
f |= NBDKIT_FLAG_REQ_ONE;
if (backend_extents (backend, conn, count, offset, f,
@@ -304,8 +334,6 @@ handle_request (struct connection *conn,
return err;
}
else {
- int r;
-
/* By default it is safe assume that everything in the range is
* allocated.
*/
diff --git a/filters/blocksize/blocksize.c b/filters/blocksize/blocksize.c
index 0978887f..0fa05301 100644
--- a/filters/blocksize/blocksize.c
+++ b/filters/blocksize/blocksize.c
@@ -138,8 +138,6 @@ blocksize_config_complete (nbdkit_next_config_complete *next, void
*nxdata)
"maxdata=<SIZE> Maximum size for read/write (default 64M).\n" \
"maxlen=<SIZE> Maximum size for trim/zero (default
4G-minblock)."
-/* TODO: Should we have a .prepare to cache per-connection FUA mode? */
-
/* Round size down to avoid issues at end of file. */
static int64_t
blocksize_get_size (struct nbdkit_next_ops *next_ops, void *nxdata,
diff --git a/filters/cache/cache.c b/filters/cache/cache.c
index b5dbccd2..e5f18d9b 100644
--- a/filters/cache/cache.c
+++ b/filters/cache/cache.c
@@ -239,7 +239,6 @@ cache_prepare (struct nbdkit_next_ops *next_ops, void *nxdata,
r = cache_get_size (next_ops, nxdata, handle);
if (r < 0)
return -1;
- /* TODO: cache per-connection FUA mode? */
return 0;
}
diff --git a/filters/cow/cow.c b/filters/cow/cow.c
index 9d91d432..e4330bf3 100644
--- a/filters/cow/cow.c
+++ b/filters/cow/cow.c
@@ -127,7 +127,7 @@ cow_get_size (struct nbdkit_next_ops *next_ops, void *nxdata,
return size;
}
-/* Force an early call to cow_get_size, consequently truncating the
+/* Force early calls to populate nbdkit's cache, and truncate the
* overlay to the correct size.
*/
static int
@@ -137,7 +137,14 @@ cow_prepare (struct nbdkit_next_ops *next_ops, void *nxdata,
int64_t r;
r = cow_get_size (next_ops, nxdata, handle);
- return r >= 0 ? 0 : -1;
+ if (r == -1)
+ return -1;
+
+ r = next_ops->can_cache (nxdata);
+ if (r == -1)
+ return -1;
+
+ return 0;
}
/* Whatever the underlying plugin can or can't do, we can write, we
@@ -427,7 +434,7 @@ cow_cache (struct nbdkit_next_ops *next_ops, void *nxdata,
uint64_t blknum, blkoffs;
int r;
uint64_t remaining = count; /* Rounding out could exceed 32 bits */
- enum cache_mode mode; /* XXX Cache this per connection? */
+ enum cache_mode mode;
switch (next_ops->can_cache (nxdata)) {
case NBDKIT_CACHE_NONE:
@@ -440,8 +447,7 @@ cow_cache (struct nbdkit_next_ops *next_ops, void *nxdata,
mode = BLK_CACHE_PASSTHROUGH;
break;
default:
- *err = EINVAL;
- return -1;
+ abort (); /* .prepare populated the cache */
}
if (cow_on_cache)
mode = BLK_CACHE_COW;
--
2.21.0