|
@@ -1346,22 +1346,29 @@ int nbd_disconnect(int fd)
|
|
|
|
|
|
int nbd_send_request(QIOChannel *ioc, NBDRequest *request)
|
|
|
{
|
|
|
- uint8_t buf[NBD_REQUEST_SIZE];
|
|
|
+ uint8_t buf[NBD_EXTENDED_REQUEST_SIZE];
|
|
|
+ size_t len;
|
|
|
|
|
|
- assert(request->mode <= NBD_MODE_STRUCTURED); /* TODO handle extended */
|
|
|
- assert(request->len <= UINT32_MAX);
|
|
|
trace_nbd_send_request(request->from, request->len, request->cookie,
|
|
|
request->flags, request->type,
|
|
|
nbd_cmd_lookup(request->type));
|
|
|
|
|
|
- stl_be_p(buf, NBD_REQUEST_MAGIC);
|
|
|
stw_be_p(buf + 4, request->flags);
|
|
|
stw_be_p(buf + 6, request->type);
|
|
|
stq_be_p(buf + 8, request->cookie);
|
|
|
stq_be_p(buf + 16, request->from);
|
|
|
- stl_be_p(buf + 24, request->len);
|
|
|
+ if (request->mode >= NBD_MODE_EXTENDED) {
|
|
|
+ stl_be_p(buf, NBD_EXTENDED_REQUEST_MAGIC);
|
|
|
+ stq_be_p(buf + 24, request->len);
|
|
|
+ len = NBD_EXTENDED_REQUEST_SIZE;
|
|
|
+ } else {
|
|
|
+ assert(request->len <= UINT32_MAX);
|
|
|
+ stl_be_p(buf, NBD_REQUEST_MAGIC);
|
|
|
+ stl_be_p(buf + 24, request->len);
|
|
|
+ len = NBD_REQUEST_SIZE;
|
|
|
+ }
|
|
|
|
|
|
- return nbd_write(ioc, buf, sizeof(buf), NULL);
|
|
|
+ return nbd_write(ioc, buf, len, NULL);
|
|
|
}
|
|
|
|
|
|
/* nbd_receive_simple_reply
|
|
@@ -1388,30 +1395,36 @@ static int nbd_receive_simple_reply(QIOChannel *ioc, NBDSimpleReply *reply,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/* nbd_receive_structured_reply_chunk
|
|
|
+/* nbd_receive_reply_chunk_header
|
|
|
* Read structured reply chunk except magic field (which should be already
|
|
|
- * read).
|
|
|
+ * read). Normalize into the compact form.
|
|
|
* Payload is not read.
|
|
|
*/
|
|
|
-static int nbd_receive_structured_reply_chunk(QIOChannel *ioc,
|
|
|
- NBDStructuredReplyChunk *chunk,
|
|
|
- Error **errp)
|
|
|
+static int nbd_receive_reply_chunk_header(QIOChannel *ioc, NBDReply *chunk,
|
|
|
+ Error **errp)
|
|
|
{
|
|
|
int ret;
|
|
|
+ size_t len;
|
|
|
+ uint64_t payload_len;
|
|
|
|
|
|
- assert(chunk->magic == NBD_STRUCTURED_REPLY_MAGIC);
|
|
|
+ if (chunk->magic == NBD_STRUCTURED_REPLY_MAGIC) {
|
|
|
+ len = sizeof(chunk->structured);
|
|
|
+ } else {
|
|
|
+ assert(chunk->magic == NBD_EXTENDED_REPLY_MAGIC);
|
|
|
+ len = sizeof(chunk->extended);
|
|
|
+ }
|
|
|
|
|
|
ret = nbd_read(ioc, (uint8_t *)chunk + sizeof(chunk->magic),
|
|
|
- sizeof(*chunk) - sizeof(chunk->magic), "structured chunk",
|
|
|
+ len - sizeof(chunk->magic), "structured chunk",
|
|
|
errp);
|
|
|
if (ret < 0) {
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- chunk->flags = be16_to_cpu(chunk->flags);
|
|
|
- chunk->type = be16_to_cpu(chunk->type);
|
|
|
- chunk->cookie = be64_to_cpu(chunk->cookie);
|
|
|
- chunk->length = be32_to_cpu(chunk->length);
|
|
|
+ /* flags, type, and cookie occupy same space between forms */
|
|
|
+ chunk->structured.flags = be16_to_cpu(chunk->structured.flags);
|
|
|
+ chunk->structured.type = be16_to_cpu(chunk->structured.type);
|
|
|
+ chunk->structured.cookie = be64_to_cpu(chunk->structured.cookie);
|
|
|
|
|
|
/*
|
|
|
* Because we use BLOCK_STATUS with REQ_ONE, and cap READ requests
|
|
@@ -1419,11 +1432,20 @@ static int nbd_receive_structured_reply_chunk(QIOChannel *ioc,
|
|
|
* this. Even if we stopped using REQ_ONE, sane servers will cap
|
|
|
* the number of extents they return for block status.
|
|
|
*/
|
|
|
- if (chunk->length > NBD_MAX_BUFFER_SIZE + sizeof(NBDStructuredReadData)) {
|
|
|
+ if (chunk->magic == NBD_STRUCTURED_REPLY_MAGIC) {
|
|
|
+ payload_len = be32_to_cpu(chunk->structured.length);
|
|
|
+ } else {
|
|
|
+ /* For now, we are ignoring the extended header offset. */
|
|
|
+ payload_len = be64_to_cpu(chunk->extended.length);
|
|
|
+ chunk->magic = NBD_STRUCTURED_REPLY_MAGIC;
|
|
|
+ }
|
|
|
+ if (payload_len > NBD_MAX_BUFFER_SIZE + sizeof(NBDStructuredReadData)) {
|
|
|
error_setg(errp, "server chunk %" PRIu32 " (%s) payload is too long",
|
|
|
- chunk->type, nbd_rep_lookup(chunk->type));
|
|
|
+ chunk->structured.type,
|
|
|
+ nbd_rep_lookup(chunk->structured.type));
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
+ chunk->structured.length = payload_len;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1470,19 +1492,21 @@ nbd_read_eof(BlockDriverState *bs, QIOChannel *ioc, void *buffer, size_t size,
|
|
|
|
|
|
/* nbd_receive_reply
|
|
|
*
|
|
|
- * Decreases bs->in_flight while waiting for a new reply. This yield is where
|
|
|
- * we wait indefinitely and the coroutine must be able to be safely reentered
|
|
|
- * for nbd_client_attach_aio_context().
|
|
|
+ * Wait for a new reply. If this yields, the coroutine must be able to be
|
|
|
+ * safely reentered for nbd_client_attach_aio_context(). @mode determines
|
|
|
+ * which reply magic we are expecting, although this normalizes the result
|
|
|
+ * so that the caller only has to work with compact headers.
|
|
|
*
|
|
|
* Returns 1 on success
|
|
|
- * 0 on eof, when no data was read (errp is not set)
|
|
|
- * negative errno on failure (errp is set)
|
|
|
+ * 0 on eof, when no data was read
|
|
|
+ * negative errno on failure
|
|
|
*/
|
|
|
int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc,
|
|
|
- NBDReply *reply, Error **errp)
|
|
|
+ NBDReply *reply, NBDMode mode, Error **errp)
|
|
|
{
|
|
|
int ret;
|
|
|
const char *type;
|
|
|
+ uint32_t expected;
|
|
|
|
|
|
ret = nbd_read_eof(bs, ioc, &reply->magic, sizeof(reply->magic), errp);
|
|
|
if (ret <= 0) {
|
|
@@ -1491,34 +1515,44 @@ int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc,
|
|
|
|
|
|
reply->magic = be32_to_cpu(reply->magic);
|
|
|
|
|
|
+ /* Diagnose but accept wrong-width header */
|
|
|
switch (reply->magic) {
|
|
|
case NBD_SIMPLE_REPLY_MAGIC:
|
|
|
+ if (mode >= NBD_MODE_EXTENDED) {
|
|
|
+ trace_nbd_receive_wrong_header(reply->magic,
|
|
|
+ nbd_mode_lookup(mode));
|
|
|
+ }
|
|
|
ret = nbd_receive_simple_reply(ioc, &reply->simple, errp);
|
|
|
if (ret < 0) {
|
|
|
- break;
|
|
|
+ return ret;
|
|
|
}
|
|
|
trace_nbd_receive_simple_reply(reply->simple.error,
|
|
|
nbd_err_lookup(reply->simple.error),
|
|
|
reply->cookie);
|
|
|
break;
|
|
|
case NBD_STRUCTURED_REPLY_MAGIC:
|
|
|
- ret = nbd_receive_structured_reply_chunk(ioc, &reply->structured, errp);
|
|
|
+ case NBD_EXTENDED_REPLY_MAGIC:
|
|
|
+ expected = mode >= NBD_MODE_EXTENDED ? NBD_EXTENDED_REPLY_MAGIC
|
|
|
+ : NBD_STRUCTURED_REPLY_MAGIC;
|
|
|
+ if (reply->magic != expected) {
|
|
|
+ trace_nbd_receive_wrong_header(reply->magic,
|
|
|
+ nbd_mode_lookup(mode));
|
|
|
+ }
|
|
|
+ ret = nbd_receive_reply_chunk_header(ioc, reply, errp);
|
|
|
if (ret < 0) {
|
|
|
- break;
|
|
|
+ return ret;
|
|
|
}
|
|
|
type = nbd_reply_type_lookup(reply->structured.type);
|
|
|
- trace_nbd_receive_structured_reply_chunk(reply->structured.flags,
|
|
|
- reply->structured.type, type,
|
|
|
- reply->structured.cookie,
|
|
|
- reply->structured.length);
|
|
|
+ trace_nbd_receive_reply_chunk_header(reply->structured.flags,
|
|
|
+ reply->structured.type, type,
|
|
|
+ reply->structured.cookie,
|
|
|
+ reply->structured.length);
|
|
|
break;
|
|
|
default:
|
|
|
+ trace_nbd_receive_wrong_header(reply->magic, nbd_mode_lookup(mode));
|
|
|
error_setg(errp, "invalid magic (got 0x%" PRIx32 ")", reply->magic);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- if (ret < 0) {
|
|
|
- return ret;
|
|
|
- }
|
|
|
|
|
|
return 1;
|
|
|
}
|