diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2017-06-29 11:45:01 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2017-06-29 11:45:01 +0100 |
commit | 464588675455afda2899e20a0b120e4075de50c7 (patch) | |
tree | 4c2d66d238db7c061bfd6105b8885a288cad9040 | |
parent | 82991bed738783d68c9950840b2e98d84b9f0621 (diff) | |
parent | 3284fad7283596033cb810b4788fd1bb43312dbd (diff) | |
download | qemu-464588675455afda2899e20a0b120e4075de50c7.zip qemu-464588675455afda2899e20a0b120e4075de50c7.tar.gz qemu-464588675455afda2899e20a0b120e4075de50c7.tar.bz2 |
Merge remote-tracking branch 'remotes/sstabellini/tags/xen-20170627-tag' into staging
Xen 2017/06/27
# gpg: Signature made Tue 27 Jun 2017 23:02:43 BST
# gpg: using RSA key 0x894F8F4870E1AE90
# gpg: Good signature from "Stefano Stabellini <stefano.stabellini@eu.citrix.com>"
# gpg: aka "Stefano Stabellini <sstabellini@kernel.org>"
# Primary key fingerprint: D04E 33AB A51F 67BA 07D3 0AEA 894F 8F48 70E1 AE90
* remotes/sstabellini/tags/xen-20170627-tag:
xen-disk: add support for multi-page shared rings
xen-disk: only advertize feature-persistent if grant copy is not available
xen/disk: don't leak stack data via response ring
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r-- | hw/block/xen_disk.c | 184 |
1 files changed, 133 insertions, 51 deletions
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c index 3a22805..d42ed70 100644 --- a/hw/block/xen_disk.c +++ b/hw/block/xen_disk.c @@ -36,8 +36,6 @@ static int batch_maps = 0; -static int max_requests = 32; - /* ------------------------------------------------------------- */ #define BLOCK_SIZE 512 @@ -84,6 +82,8 @@ struct ioreq { BlockAcctCookie acct; }; +#define MAX_RING_PAGE_ORDER 4 + struct XenBlkDev { struct XenDevice xendev; /* must be first */ char *params; @@ -94,7 +94,8 @@ struct XenBlkDev { bool directiosafe; const char *fileproto; const char *filename; - int ring_ref; + unsigned int ring_ref[1 << MAX_RING_PAGE_ORDER]; + unsigned int nr_ring_ref; void *sring; int64_t file_blk; int64_t file_size; @@ -110,6 +111,7 @@ struct XenBlkDev { int requests_total; int requests_inflight; int requests_finished; + unsigned int max_requests; /* Persistent grants extension */ gboolean feature_discard; @@ -199,7 +201,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) struct ioreq *ioreq = NULL; if (QLIST_EMPTY(&blkdev->freelist)) { - if (blkdev->requests_total >= max_requests) { + if (blkdev->requests_total >= blkdev->max_requests) { goto out; } /* allocate new struct */ @@ -769,31 +771,30 @@ static int blk_send_response_one(struct ioreq *ioreq) struct XenBlkDev *blkdev = ioreq->blkdev; int send_notify = 0; int have_requests = 0; - blkif_response_t resp; - void *dst; - - resp.id = ioreq->req.id; - resp.operation = ioreq->req.operation; - resp.status = ioreq->status; + blkif_response_t *resp; /* Place on the response ring for the relevant domain. */ switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: - dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); + resp = (blkif_response_t *) RING_GET_RESPONSE(&blkdev->rings.native, + blkdev->rings.native.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_32: - dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part, - blkdev->rings.x86_32_part.rsp_prod_pvt); + resp = (blkif_response_t *) RING_GET_RESPONSE(&blkdev->rings.x86_32_part, + blkdev->rings.x86_32_part.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_64: - dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part, - blkdev->rings.x86_64_part.rsp_prod_pvt); + resp = (blkif_response_t *) RING_GET_RESPONSE(&blkdev->rings.x86_64_part, + blkdev->rings.x86_64_part.rsp_prod_pvt); break; default: - dst = NULL; return 0; } - memcpy(dst, &resp, sizeof(resp)); + + resp->id = ioreq->req.id; + resp->operation = ioreq->req.operation; + resp->status = ioreq->status; + blkdev->rings.common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify); @@ -905,7 +906,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) ioreq_runio_qemu_aio(ioreq); } - if (blkdev->more_work && blkdev->requests_inflight < max_requests) { + if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) { qemu_bh_schedule(blkdev->bh); } } @@ -918,15 +919,6 @@ static void blk_bh(void *opaque) blk_handle_requests(blkdev); } -/* - * We need to account for the grant allocations requiring contiguous - * chunks; the worst case number would be - * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1, - * but in order to keep things simple just use - * 2 * max_req * max_seg. - */ -#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg)) - static void blk_alloc(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); @@ -938,11 +930,6 @@ static void blk_alloc(struct XenDevice *xendev) if (xen_mode != XEN_EMULATE) { batch_maps = 1; } - if (xengnttab_set_max_grants(xendev->gnttabdev, - MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) { - xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n", - strerror(errno)); - } } static void blk_parse_discard(struct XenBlkDev *blkdev) @@ -1023,13 +1010,23 @@ static int blk_init(struct XenDevice *xendev) blkdev->file_blk = BLOCK_SIZE; + blkdev->feature_grant_copy = + (xengnttab_grant_copy(blkdev->xendev.gnttabdev, 0, NULL) == 0); + + xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n", + blkdev->feature_grant_copy ? "enabled" : "disabled"); + /* fill info * blk_connect supplies sector-size and sectors */ xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1); - xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1); + xenstore_write_be_int(&blkdev->xendev, "feature-persistent", + !blkdev->feature_grant_copy); xenstore_write_be_int(&blkdev->xendev, "info", info); + xenstore_write_be_int(&blkdev->xendev, "max-ring-page-order", + MAX_RING_PAGE_ORDER); + blk_parse_discard(blkdev); g_free(directiosafe); @@ -1051,12 +1048,25 @@ out_error: return -1; } +/* + * We need to account for the grant allocations requiring contiguous + * chunks; the worst case number would be + * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1, + * but in order to keep things simple just use + * 2 * max_req * max_seg. + */ +#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg)) + static int blk_connect(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); int pers, index, qflags; bool readonly = true; bool writethrough = true; + int order, ring_ref; + unsigned int ring_size, max_grants; + unsigned int i; + uint32_t *domids; /* read-only ? */ if (blkdev->directiosafe) { @@ -1131,9 +1141,42 @@ static int blk_connect(struct XenDevice *xendev) xenstore_write_be_int64(&blkdev->xendev, "sectors", blkdev->file_size / blkdev->file_blk); - if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) { + if (xenstore_read_fe_int(&blkdev->xendev, "ring-page-order", + &order) == -1) { + blkdev->nr_ring_ref = 1; + + if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", + &ring_ref) == -1) { + return -1; + } + blkdev->ring_ref[0] = ring_ref; + + } else if (order >= 0 && order <= MAX_RING_PAGE_ORDER) { + blkdev->nr_ring_ref = 1 << order; + + for (i = 0; i < blkdev->nr_ring_ref; i++) { + char *key; + + key = g_strdup_printf("ring-ref%u", i); + if (!key) { + return -1; + } + + if (xenstore_read_fe_int(&blkdev->xendev, key, + &ring_ref) == -1) { + g_free(key); + return -1; + } + blkdev->ring_ref[i] = ring_ref; + + g_free(key); + } + } else { + xen_pv_printf(xendev, 0, "invalid ring-page-order: %d\n", + order); return -1; } + if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", &blkdev->xendev.remote_port) == -1) { return -1; @@ -1156,41 +1199,85 @@ static int blk_connect(struct XenDevice *xendev) blkdev->protocol = BLKIF_PROTOCOL_NATIVE; } - blkdev->sring = xengnttab_map_grant_ref(blkdev->xendev.gnttabdev, - blkdev->xendev.dom, - blkdev->ring_ref, - PROT_READ | PROT_WRITE); + ring_size = XC_PAGE_SIZE * blkdev->nr_ring_ref; + switch (blkdev->protocol) { + case BLKIF_PROTOCOL_NATIVE: + { + blkdev->max_requests = __CONST_RING_SIZE(blkif, ring_size); + break; + } + case BLKIF_PROTOCOL_X86_32: + { + blkdev->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size); + break; + } + case BLKIF_PROTOCOL_X86_64: + { + blkdev->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size); + break; + } + default: + return -1; + } + + /* Calculate the maximum number of grants needed by ioreqs */ + max_grants = MAX_GRANTS(blkdev->max_requests, + BLKIF_MAX_SEGMENTS_PER_REQUEST); + /* Add on the number needed for the ring pages */ + max_grants += blkdev->nr_ring_ref; + + if (xengnttab_set_max_grants(blkdev->xendev.gnttabdev, max_grants)) { + xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n", + strerror(errno)); + return -1; + } + + domids = g_malloc0_n(blkdev->nr_ring_ref, sizeof(uint32_t)); + for (i = 0; i < blkdev->nr_ring_ref; i++) { + domids[i] = blkdev->xendev.dom; + } + + blkdev->sring = xengnttab_map_grant_refs(blkdev->xendev.gnttabdev, + blkdev->nr_ring_ref, + domids, + blkdev->ring_ref, + PROT_READ | PROT_WRITE); + + g_free(domids); + if (!blkdev->sring) { return -1; } + blkdev->cnt_map++; switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: { blkif_sring_t *sring_native = blkdev->sring; - BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); + BACK_RING_INIT(&blkdev->rings.native, sring_native, ring_size); break; } case BLKIF_PROTOCOL_X86_32: { blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; - BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE); + BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, ring_size); break; } case BLKIF_PROTOCOL_X86_64: { blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; - BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE); + BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, ring_size); break; } } if (blkdev->feature_persistent) { /* Init persistent grants */ - blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST; + blkdev->max_grants = blkdev->max_requests * + BLKIF_MAX_SEGMENTS_PER_REQUEST; blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp, NULL, NULL, batch_maps ? @@ -1202,15 +1289,9 @@ static int blk_connect(struct XenDevice *xendev) xen_be_bind_evtchn(&blkdev->xendev); - blkdev->feature_grant_copy = - (xengnttab_grant_copy(blkdev->xendev.gnttabdev, 0, NULL) == 0); - - xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n", - blkdev->feature_grant_copy ? "enabled" : "disabled"); - - xen_pv_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " + xen_pv_printf(&blkdev->xendev, 1, "ok: proto %s, nr-ring-ref %u, " "remote port %d, local port %d\n", - blkdev->xendev.protocol, blkdev->ring_ref, + blkdev->xendev.protocol, blkdev->nr_ring_ref, blkdev->xendev.remote_port, blkdev->xendev.local_port); return 0; } @@ -1227,7 +1308,8 @@ static void blk_disconnect(struct XenDevice *xendev) xen_pv_unbind_evtchn(&blkdev->xendev); if (blkdev->sring) { - xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); + xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, + blkdev->nr_ring_ref); blkdev->cnt_map--; blkdev->sring = NULL; } |