aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorGreg Kurz <groug@kaod.org>2020-01-20 15:11:39 +0100
committerGreg Kurz <groug@kaod.org>2020-01-20 15:11:39 +0100
commit16724a173049ac29c7b5ade741da93a0f46edff7 (patch)
tree1585a3ff4437d15c176b36a1be9f2b5ed45e583f /hw
parent846cf408a4c8055063f4a5a71ccf7ed030cdad30 (diff)
downloadqemu-16724a173049ac29c7b5ade741da93a0f46edff7.zip
qemu-16724a173049ac29c7b5ade741da93a0f46edff7.tar.gz
qemu-16724a173049ac29c7b5ade741da93a0f46edff7.tar.bz2
9p: init_in_iov_from_pdu can truncate the size
init_in_iov_from_pdu might not be able to allocate the full buffer size requested, which comes from the client and could be larger than the transport has available at the time of the request. Specifically, this can happen with read operations, with the client requesting a read up to the max allowed, which might be more than the transport has available at the time. Today the implementation of init_in_iov_from_pdu throws an error, both Xen and Virtio. Instead, change the V9fsTransport interface so that the size becomes a pointer and can be limited by the implementation of init_in_iov_from_pdu. Change both the Xen and Virtio implementations to set the size to the size of the buffer they managed to allocate, instead of throwing an error. However, if the allocated buffer size is less than P9_IOHDRSZ (the size of the header) still throw an error as the case is unhandable. Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com> CC: groug@kaod.org CC: anthony.perard@citrix.com CC: roman@zededa.com CC: qemu_oss@crudebyte.com [groug: fix 32-bit build] Signed-off-by: Greg Kurz <groug@kaod.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/9pfs/9p.c33
-rw-r--r--hw/9pfs/9p.h2
-rw-r--r--hw/9pfs/virtio-9p-device.c11
-rw-r--r--hw/9pfs/xen-9p-backend.c13
4 files changed, 38 insertions, 21 deletions
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index 520177f..8b247b9 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -2090,22 +2090,29 @@ out_nofid:
* with qemu_iovec_destroy().
*/
static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
- size_t skip, size_t size,
+ size_t skip, size_t *size,
bool is_write)
{
QEMUIOVector elem;
struct iovec *iov;
unsigned int niov;
+ size_t alloc_size = *size + skip;
if (is_write) {
- pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip);
+ pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, alloc_size);
} else {
- pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip);
+ pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, &alloc_size);
+ }
+
+ if (alloc_size < skip) {
+ *size = 0;
+ } else {
+ *size = alloc_size - skip;
}
qemu_iovec_init_external(&elem, iov, niov);
qemu_iovec_init(qiov, niov);
- qemu_iovec_concat(qiov, &elem, skip, size);
+ qemu_iovec_concat(qiov, &elem, skip, *size);
}
static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
@@ -2113,15 +2120,14 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
{
ssize_t err;
size_t offset = 7;
- uint64_t read_count;
+ size_t read_count;
QEMUIOVector qiov_full;
if (fidp->fs.xattr.len < off) {
read_count = 0;
- } else {
+ } else if (fidp->fs.xattr.len - off < max_count) {
read_count = fidp->fs.xattr.len - off;
- }
- if (read_count > max_count) {
+ } else {
read_count = max_count;
}
err = pdu_marshal(pdu, offset, "d", read_count);
@@ -2130,7 +2136,7 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
}
offset += err;
- v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
+ v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &read_count, false);
err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
((char *)fidp->fs.xattr.value) + off,
read_count);
@@ -2259,9 +2265,11 @@ static void coroutine_fn v9fs_read(void *opaque)
QEMUIOVector qiov_full;
QEMUIOVector qiov;
int32_t len;
+ size_t size = max_count;
- v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false);
+ v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, &size, false);
qemu_iovec_init(&qiov, qiov_full.niov);
+ max_count = size;
do {
qemu_iovec_reset(&qiov);
qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count);
@@ -2504,6 +2512,7 @@ static void coroutine_fn v9fs_write(void *opaque)
int32_t len = 0;
int32_t total = 0;
size_t offset = 7;
+ size_t size;
V9fsFidState *fidp;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
@@ -2516,7 +2525,9 @@ static void coroutine_fn v9fs_write(void *opaque)
return;
}
offset += err;
- v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
+ size = count;
+ v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &size, true);
+ count = size;
trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
fidp = get_fid(pdu, fid);
diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
index 3904f82..8d07a0b 100644
--- a/hw/9pfs/9p.h
+++ b/hw/9pfs/9p.h
@@ -425,7 +425,7 @@ struct V9fsTransport {
ssize_t (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
va_list ap);
void (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
- unsigned int *pniov, size_t size);
+ unsigned int *pniov, size_t *size);
void (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
unsigned int *pniov, size_t size);
void (*push_and_notify)(V9fsPDU *pdu);
diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index b5a7c03..991e175 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -147,19 +147,22 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
}
static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
- unsigned int *pniov, size_t size)
+ unsigned int *pniov, size_t *size)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = v->elems[pdu->idx];
size_t buf_size = iov_size(elem->in_sg, elem->in_num);
- if (buf_size < size) {
+ if (buf_size < P9_IOHDRSZ) {
VirtIODevice *vdev = VIRTIO_DEVICE(v);
virtio_error(vdev,
- "VirtFS reply type %d needs %zu bytes, buffer has %zu",
- pdu->id + 1, size, buf_size);
+ "VirtFS reply type %d needs %zu bytes, buffer has %zu, less than minimum",
+ pdu->id + 1, *size, buf_size);
+ }
+ if (buf_size < *size) {
+ *size = buf_size;
}
*piov = elem->in_sg;
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index 71eebe1..18fe5b7 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -187,7 +187,7 @@ static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
struct iovec **piov,
unsigned int *pniov,
- size_t size)
+ size_t *size)
{
Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
@@ -197,16 +197,19 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
g_free(ring->sg);
ring->sg = g_new0(struct iovec, 2);
- xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
+ xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, *size);
buf_size = iov_size(ring->sg, num);
- if (buf_size < size) {
+ if (buf_size < P9_IOHDRSZ) {
xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
- "needs %zu bytes, buffer has %zu\n", pdu->id, size,
- buf_size);
+ "needs %zu bytes, buffer has %zu, less than minimum\n",
+ pdu->id, *size, buf_size);
xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
xen_9pfs_disconnect(&xen_9pfs->xendev);
}
+ if (buf_size < *size) {
+ *size = buf_size;
+ }
*piov = ring->sg;
*pniov = num;