Support a server giving us a 64-bit extent. Note that the protocol
says a server should not give a 64-bit answer when extended headers
are not negotiated; we can handle that by reporting EPROTO but
otherwise accepting the information. Conversely, when extended
headers are in effect, the server must respond with the 64-bit extent
type, even if the extent length and flags both fit in 32 bits. Since
we already store 64-bit extents internally, the user's 32-bit callback
doesn't have to care which reply chunk the server uses (the shim takes
care of that, and an upcoming patch adds new APIs to let the client
use a 64-bit callback). Of course, until a later patch enables
extended headers negotiation, no compliant server will trigger the new
code here.
Implementation-wise, 'normal' and 'wide' are two different types but
have the same underlying size; keeping the two names makes it easier
to reason about when values are still in network byte order from the
server or native endian for local processing.
Signed-off-by: Eric Blake <eblake(a)redhat.com>
---
lib/internal.h | 3 +
generator/states-reply-structured.c | 94 +++++++++++++++++++++++------
2 files changed, 78 insertions(+), 19 deletions(-)
diff --git a/lib/internal.h b/lib/internal.h
index 11186e24..e5423245 100644
--- a/lib/internal.h
+++ b/lib/internal.h
@@ -249,6 +249,7 @@ struct nbd_handle {
struct nbd_structured_reply_offset_data offset_data;
struct nbd_structured_reply_offset_hole offset_hole;
struct nbd_structured_reply_block_status_hdr bs_hdr;
+ struct nbd_structured_reply_block_status_ext_hdr bs_ext_hdr;
struct {
struct nbd_structured_reply_error error;
char msg[NBD_MAX_STRING]; /* Common to all error types */
@@ -307,6 +308,8 @@ struct nbd_handle {
char *storage; /* malloc's view */
nbd_extent *normal; /* Our 64-bit preferred internal form; n slots */
uint32_t *narrow; /* 32-bit NBD_REPLY_TYPE_BLOCK_STATUS form; n*2 slots */
+ struct nbd_block_descriptor_ext *wide;
+ /* 64-bit NBD_REPLY_TYPE_BLOCK_STATUS_EXT; n slots */
} bs_entries;
/* Commands which are waiting to be issued [meaning the request
diff --git a/generator/states-reply-structured.c b/generator/states-reply-structured.c
index 784adb78..55d6bb2d 100644
--- a/generator/states-reply-structured.c
+++ b/generator/states-reply-structured.c
@@ -22,6 +22,8 @@
#include <stdint.h>
#include <inttypes.h>
+#include "minmax.h"
+
/* Structured reply must be completely inside the bounds of the
* requesting command.
*/
@@ -144,9 +146,34 @@ REPLY.STRUCTURED_REPLY.CHECK:
length < 12 || ((length-4) & 7) != 0)
goto resync;
assert (CALLBACK_IS_NOT_NULL (cmd->cb.fn.extent));
+ if (h->extended_headers) {
+ debug (h, "unexpected 32-bit block status with extended headers, "
+ "this is probably a server bug");
+ if (cmd->error == 0)
+ cmd->error = EPROTO;
+ }
/* Start by reading the context ID. */
h->rbuf = &h->sbuf.reply.payload.bs_hdr;
h->rlen = sizeof h->sbuf.reply.payload.bs_hdr;
+ h->sbuf.reply.payload.bs_ext_hdr.count = 0;
+ SET_NEXT_STATE (%RECV_BS_HEADER);
+ break;
+
+ case NBD_REPLY_TYPE_BLOCK_STATUS_EXT:
+ if (cmd->type != NBD_CMD_BLOCK_STATUS ||
+ length < 24 ||
+ (length-8) % sizeof (struct nbd_block_descriptor_ext))
+ goto resync;
+ assert (CALLBACK_IS_NOT_NULL (cmd->cb.fn.extent));
+ if (!h->extended_headers) {
+ debug (h, "unexpected 64-bit block status without extended headers, "
+ "this is probably a server bug");
+ if (cmd->error == 0)
+ cmd->error = EPROTO;
+ }
+ /* Start by reading the context ID. */
+ h->rbuf = &h->sbuf.reply.payload.bs_ext_hdr;
+ h->rlen = sizeof h->sbuf.reply.payload.bs_ext_hdr;
SET_NEXT_STATE (%RECV_BS_HEADER);
break;
@@ -437,6 +464,7 @@ REPLY.STRUCTURED_REPLY.RECV_BS_HEADER:
struct command *cmd = h->reply_cmd;
uint32_t length;
uint32_t count;
+ uint16_t type;
switch (recv_into_rbuf (h)) {
case -1: SET_NEXT_STATE (%.DEAD); return 0;
@@ -446,24 +474,43 @@ REPLY.STRUCTURED_REPLY.RECV_BS_HEADER:
return 0;
case 0:
length = h->sbuf.reply.hdr.structured.length; /* normalized in CHECK */
+ type = be16toh (h->sbuf.reply.hdr.structured.type);
assert (cmd); /* guaranteed by CHECK */
assert (cmd->type == NBD_CMD_BLOCK_STATUS);
- assert (length >= 12);
- length -= sizeof h->sbuf.reply.payload.bs_hdr;
- count = length / (2 * sizeof (uint32_t));
- /* Read raw data into a subset of h->bs_entries, then expand it
+ if (type == NBD_REPLY_TYPE_BLOCK_STATUS) {
+ assert (length >= 12);
+ length -= sizeof h->sbuf.reply.payload.bs_hdr;
+ count = length / (2 * sizeof (uint32_t));
+ }
+ else {
+ assert (type == NBD_REPLY_TYPE_BLOCK_STATUS_EXT);
+ assert (length >= 24);
+ length -= sizeof h->sbuf.reply.payload.bs_ext_hdr;
+ count = length / sizeof (struct nbd_block_descriptor_ext);
+ if (count != be32toh (h->sbuf.reply.payload.bs_ext_hdr.count)) {
+ h->rbuf = NULL;
+ h->rlen = length;
+ SET_NEXT_STATE (%RESYNC);
+ return 0;
+ }
+ }
+ /* Normalize count for later use. */
+ h->sbuf.reply.payload.bs_ext_hdr.count = count;
+
+ /* Read raw data into an overlap of h->bs_entries, then move it
* into place later during byte-swapping.
*/
free (h->bs_entries.storage);
- h->bs_entries.storage = malloc (count * sizeof *h->bs_entries.normal);
+ h->bs_entries.storage = malloc (MAX (count * sizeof *h->bs_entries.normal,
+ length));
if (h->bs_entries.storage == NULL) {
SET_NEXT_STATE (%.DEAD);
set_error (errno, "malloc");
return 0;
}
- h->rbuf = h->bs_entries.narrow;
+ h->rbuf = h->bs_entries.storage;
h->rlen = length;
SET_NEXT_STATE (%RECV_BS_ENTRIES);
}
@@ -471,11 +518,10 @@ REPLY.STRUCTURED_REPLY.RECV_BS_HEADER:
REPLY.STRUCTURED_REPLY.RECV_BS_ENTRIES:
struct command *cmd = h->reply_cmd;
- uint32_t length;
uint32_t count;
size_t i;
uint32_t context_id;
- uint32_t *raw;
+ uint16_t type;
switch (recv_into_rbuf (h)) {
case -1: SET_NEXT_STATE (%.DEAD); return 0;
@@ -484,25 +530,35 @@ REPLY.STRUCTURED_REPLY.RECV_BS_ENTRIES:
SET_NEXT_STATE (%.READY);
return 0;
case 0:
- length = h->sbuf.reply.hdr.structured.length; /* normalized in CHECK */
+ type = be16toh (h->sbuf.reply.hdr.structured.type);
+ count = h->sbuf.reply.payload.bs_ext_hdr.count; /* normalized in BS_HEADER */
assert (cmd); /* guaranteed by CHECK */
assert (cmd->type == NBD_CMD_BLOCK_STATUS);
assert (CALLBACK_IS_NOT_NULL (cmd->cb.fn.extent));
- assert (h->bs_entries.normal);
- assert (length >= 12);
+ assert (h->bs_entries.normal && count);
assert (h->meta_valid);
- count = (length - sizeof h->sbuf.reply.payload.bs_hdr) /
- (2 * sizeof (uint32_t));
/* Need to byte-swap the entries returned, but apart from that we
- * don't validate them. Reverse order is essential, since we are
- * expanding in-place from narrow to wider type.
+ * don't validate them.
*/
- raw = h->bs_entries.narrow;
- for (i = count; i-- > 0; ) {
- h->bs_entries.normal[i].flags = be32toh (raw[i * 2 + 1]);
- h->bs_entries.normal[i].length = be32toh (raw[i * 2]);
+ if (type == NBD_REPLY_TYPE_BLOCK_STATUS) {
+ uint32_t *raw = h->bs_entries.narrow;
+
+ /* Expanding in-place from narrow to wide, must use reverse order. */
+ for (i = count; i-- > 0; ) {
+ h->bs_entries.normal[i].flags = be32toh (raw[i * 2 + 1]);
+ h->bs_entries.normal[i].length = be32toh (raw[i * 2]);
+ }
+ }
+ else {
+ struct nbd_block_descriptor_ext *wide = h->bs_entries.wide;
+
+ assert (type == NBD_REPLY_TYPE_BLOCK_STATUS_EXT);
+ for (i = 0; i < count; i++) {
+ h->bs_entries.normal[i].length = be64toh (wide[i].length);
+ h->bs_entries.normal[i].flags = be64toh (wide[i].status_flags);
+ }
}
/* Look up the context ID. */
--
2.40.1