diff options
author | John Levon <john.levon@nutanix.com> | 2022-05-27 17:29:32 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-05-27 17:29:32 +0100 |
commit | 538d6063c9f8d395e1d38285ddfe405c3fcd7619 (patch) | |
tree | 96afbcf64f70916966744d3422a113b81e1c21ba /lib | |
parent | 065c33e7dc7bbd1d5964a5a3af173a69ad3ee931 (diff) | |
download | libvfio-user-538d6063c9f8d395e1d38285ddfe405c3fcd7619.zip libvfio-user-538d6063c9f8d395e1d38285ddfe405c3fcd7619.tar.gz libvfio-user-538d6063c9f8d395e1d38285ddfe405c3fcd7619.tar.bz2 |
remove maps list from DMA controller (#674)
->maps existed so that if a consumer does vfu_map_sg() and then we are asked to
enable dirty page tracking, we won't mark those pages as dirty, and will hence
potentially lose data.
Now that we require quiesce and the use of either vfu_unmap_sg() or
vfu_sg_mark_dirty(), there's no need to have this list any more.
Signed-off-by: John Levon <john.levon@nutanix.com>
Reviewed-by: Thanos Makatos <thanos.makatos@nutanix.com>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/dma.c | 21 | ||||
-rw-r--r-- | lib/dma.h | 28 |
2 files changed, 7 insertions, 42 deletions
@@ -99,7 +99,6 @@ dma_controller_create(vfu_ctx_t *vfu_ctx, size_t max_regions, size_t max_size) dma->nregions = 0; memset(dma->regions, 0, max_regions * sizeof(dma->regions[0])); dma->dirty_pgsize = 0; - LIST_INIT(&dma->maps); return dma; } @@ -468,22 +467,6 @@ out: return cnt; } -static void -dma_mark_dirty_sgs(dma_controller_t *dma) -{ - struct dma_sg *sg; - - if (dma->dirty_pgsize == 0) { - return; - } - - LIST_FOREACH(sg, &dma->maps, entry) { - if (sg->writeable) { - _dma_mark_dirty(dma, &dma->regions[sg->region], sg); - } - } -} - int dma_controller_dirty_page_logging_start(dma_controller_t *dma, size_t pgsize) { @@ -523,8 +506,6 @@ dma_controller_dirty_page_logging_start(dma_controller_t *dma, size_t pgsize) } dma->dirty_pgsize = pgsize; - dma_mark_dirty_sgs(dma); - vfu_log(dma->vfu_ctx, LOG_DEBUG, "dirty pages: started logging"); return 0; @@ -628,8 +609,6 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr, #endif memset(region->dirty_bitmap, 0, size); - dma_mark_dirty_sgs(dma); - return 0; } @@ -89,7 +89,6 @@ struct dma_sg { uint64_t length; uint64_t offset; bool writeable; - LIST_ENTRY(dma_sg) entry; }; typedef struct { @@ -105,7 +104,6 @@ typedef struct dma_controller { int nregions; struct vfu_ctx *vfu_ctx; size_t dirty_pgsize; // Dirty page granularity - LIST_HEAD(, dma_sg) maps; dma_memory_region_t regions[0]; } dma_controller_t; @@ -245,10 +243,6 @@ dma_map_sg(dma_controller_t *dma, dma_sg_t *sg, struct iovec *iov, return ERROR_INT(EFAULT); } - if (sg->writeable) { - LIST_INSERT_HEAD(&dma->maps, sg, entry); - } - vfu_log(dma->vfu_ctx, LOG_DEBUG, "map %p-%p", sg->dma_addr + sg->offset, sg->dma_addr + sg->offset + sg->length); @@ -294,30 +288,22 @@ dma_mark_sg_dirty(dma_controller_t *dma, dma_sg_t *sg, int cnt) static inline void dma_unmap_sg(dma_controller_t *dma, dma_sg_t *sg, int cnt) { + dma_memory_region_t *region; + assert(dma != NULL); assert(sg != NULL); assert(cnt > 0); do { - dma_memory_region_t *r; - /* - * FIXME this double loop will be removed if we replace the array with - * tfind(3) - */ - for (r = dma->regions; - r < dma->regions + dma->nregions && - r->info.iova.iov_base != sg->dma_addr; - r++); - if (r > dma->regions + dma->nregions) { - /* bad region */ - continue; + if (sg->region >= dma->nregions) { + return; } - if (sg->writeable) { - LIST_REMOVE(sg, entry); + region = &dma->regions[sg->region]; + if (sg->writeable) { if (dma->dirty_pgsize > 0) { - _dma_mark_dirty(dma, r, sg); + _dma_mark_dirty(dma, region, sg); } } |