aboutsummaryrefslogtreecommitdiff
path: root/lib/tran_sock.c
diff options
context:
space:
mode:
authorJohn Levon <john.levon@nutanix.com>2021-06-02 16:08:14 +0100
committerGitHub <noreply@github.com>2021-06-02 16:08:14 +0100
commit57684de8240fce4a277301a86a803842338762af (patch)
treeea1066e2ae4de34bd7b77f9fb7a26af40848b52f /lib/tran_sock.c
parentb8234a75d9ec2c95cb889c0cef27927f34ad9cbc (diff)
downloadlibvfio-user-57684de8240fce4a277301a86a803842338762af.zip
libvfio-user-57684de8240fce4a277301a86a803842338762af.tar.gz
libvfio-user-57684de8240fce4a277301a86a803842338762af.tar.bz2
replace max_msg_size with max_data_xfer_size (#541)
The previously specified max_msg_size had one major issue: it implied a (way too small) limit on the size of dirty bitmaps that could be requested by a client, and as a result a hard limit on memory region size. It seemed awkward to attempt to split up an unmap request instead. Instead, let most requests and replies be limited by their "natural" limits; for example, the number of booleans in VFIO_USER_SET_IRQS is limited by MSI-X count. For the requests that solicit or provide data - that is, VFIO_USER_DMA_READ/WRITE and VFIO_USER_REGION_READ/WRITE - we negotiate a new max_data_xfer_size value. These are much easier to split up into separate requests at the client side so should not present an implementation problem. For our server, chunking is implemented in vfu_dma_read/vfu_dma_write(). Signed-off-by: John Levon <john.levon@nutanix.com> Reviewed-by: Swapnil Ingle <swapnil.ingle@nutanix.com> Reviewed-by: Thanos Makatos <thanos.makatos@nutanix.com>
Diffstat (limited to 'lib/tran_sock.c')
-rw-r--r--lib/tran_sock.c38
1 files changed, 29 insertions, 9 deletions
diff --git a/lib/tran_sock.c b/lib/tran_sock.c
index 12b3321..f8b7004 100644
--- a/lib/tran_sock.c
+++ b/lib/tran_sock.c
@@ -63,7 +63,7 @@ tran_sock_send_iovec(int sock, uint16_t msg_id, bool is_reply,
int *fds, int count, int err)
{
int ret;
- struct vfio_user_header hdr = {.msg_id = msg_id};
+ struct vfio_user_header hdr = { .msg_id = msg_id };
struct msghdr msg;
size_t i;
size_t size = count * sizeof(*fds);
@@ -250,6 +250,10 @@ tran_sock_recv_fds(int sock, struct vfio_user_header *hdr, bool is_reply,
}
}
+ if (hdr->msg_size < sizeof(*hdr) || hdr->msg_size > SERVER_MAX_MSG_SIZE) {
+ return ERROR_INT(EINVAL);
+ }
+
if (len != NULL && *len > 0 && hdr->msg_size > sizeof(*hdr)) {
ret = recv(sock, data, MIN(hdr->msg_size - sizeof(*hdr), *len),
MSG_WAITALL);
@@ -276,8 +280,6 @@ tran_sock_recv(int sock, struct vfio_user_header *hdr, bool is_reply,
/*
* Like tran_sock_recv(), but will automatically allocate reply data.
- *
- * FIXME: this does an unconstrained alloc of client-supplied data.
*/
int
tran_sock_recv_alloc(int sock, struct vfio_user_header *hdr, bool is_reply,
@@ -294,6 +296,7 @@ tran_sock_recv_alloc(int sock, struct vfio_user_header *hdr, bool is_reply,
}
assert(hdr->msg_size >= sizeof(*hdr));
+ assert(hdr->msg_size <= SERVER_MAX_MSG_SIZE);
len = hdr->msg_size - sizeof(*hdr);
@@ -464,6 +467,7 @@ tran_sock_get_poll_fd(vfu_ctx_t *vfu_ctx)
* {
* "capabilities": {
* "max_msg_fds": 32,
+ * "max_data_xfer_size": 1048576
* "migration": {
* "pgsize": 4096
* }
@@ -474,8 +478,8 @@ tran_sock_get_poll_fd(vfu_ctx_t *vfu_ctx)
* available in newer library versions, so we don't use it.
*/
int
-tran_parse_version_json(const char *json_str,
- int *client_max_fdsp, size_t *pgsizep)
+tran_parse_version_json(const char *json_str, int *client_max_fdsp,
+ size_t *client_max_data_xfer_sizep, size_t *pgsizep)
{
struct json_object *jo_caps = NULL;
struct json_object *jo_top = NULL;
@@ -508,6 +512,18 @@ tran_parse_version_json(const char *json_str,
}
}
+ if (json_object_object_get_ex(jo_caps, "max_data_xfer_size", &jo)) {
+ if (json_object_get_type(jo) != json_type_int) {
+ goto out;
+ }
+
+ errno = 0;
+ *client_max_data_xfer_sizep = (int)json_object_get_int64(jo);
+
+ if (errno != 0) {
+ goto out;
+ }
+ }
if (json_object_object_get_ex(jo_caps, "migration", &jo)) {
struct json_object *jo2 = NULL;
@@ -581,6 +597,7 @@ recv_version(vfu_ctx_t *vfu_ctx, int sock, uint16_t *msg_idp,
}
vfu_ctx->client_max_fds = 1;
+ vfu_ctx->client_max_data_xfer_size = VFIO_USER_DEFAULT_MAX_DATA_XFER_SIZE;
if (vlen > sizeof(*cversion)) {
const char *json_str = (const char *)cversion->data;
@@ -594,6 +611,7 @@ recv_version(vfu_ctx_t *vfu_ctx, int sock, uint16_t *msg_idp,
}
ret = tran_parse_version_json(json_str, &vfu_ctx->client_max_fds,
+ &vfu_ctx->client_max_data_xfer_size,
&pgsize);
if (ret < 0) {
@@ -656,20 +674,20 @@ send_version(vfu_ctx_t *vfu_ctx, int sock, uint16_t msg_id,
"{"
"\"capabilities\":{"
"\"max_msg_fds\":%u,"
- "\"max_msg_size\":%u"
+ "\"max_data_xfer_size\":%u"
"}"
- "}", SERVER_MAX_FDS, SERVER_MAX_MSG_SIZE);
+ "}", SERVER_MAX_FDS, SERVER_MAX_DATA_XFER_SIZE);
} else {
slen = snprintf(server_caps, sizeof(server_caps),
"{"
"\"capabilities\":{"
"\"max_msg_fds\":%u,"
- "\"max_msg_size\":%u,"
+ "\"max_data_xfer_size\":%u,"
"\"migration\":{"
"\"pgsize\":%zu"
"}"
"}"
- "}", SERVER_MAX_FDS, SERVER_MAX_MSG_SIZE,
+ "}", SERVER_MAX_FDS, SERVER_MAX_DATA_XFER_SIZE,
migration_get_pgsize(vfu_ctx->migration));
}
@@ -787,6 +805,8 @@ tran_sock_recv_body(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
ts = vfu_ctx->tran_data;
+ assert(msg->in_size <= SERVER_MAX_MSG_SIZE);
+
msg->in_data = malloc(msg->in_size);
if (msg->in_data == NULL) {