aboutsummaryrefslogtreecommitdiff
path: root/lib/dma.c
diff options
context:
space:
mode:
authorJohn Levon <john.levon@nutanix.com>2022-05-30 09:41:32 +0100
committerGitHub <noreply@github.com>2022-05-30 09:41:32 +0100
commite036ac145acea1a5aa77879e978ac2fff909a657 (patch)
tree1f0837b4c79feb97aa642d4e505e3d64012896d7 /lib/dma.c
parent79e83e482d4eb0b7a07cfa207506d33edf05d04b (diff)
downloadlibvfio-user-e036ac145acea1a5aa77879e978ac2fff909a657.zip
libvfio-user-e036ac145acea1a5aa77879e978ac2fff909a657.tar.gz
libvfio-user-e036ac145acea1a5aa77879e978ac2fff909a657.tar.bz2
allow concurrent dirty bitmap get (#677)
Use atomic operations to allow concurrent bitmap updates with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP operations. Dirtying clients can race against each other, so we must use atomic or when marking dirty: we do this byte-by-byte. When reading the dirty bitmap, we must be careful to not race and lose any set bits within the same byte. If we miss an update, we'll catch it the next time around, presuming that before the final pass we'll have quiesced all I/O. Signed-off-by: John Levon <john.levon@nutanix.com> Reviewed-by: Raphael Norwitz <raphael.norwitz@nutanix.com> Reviewed-by: Thanos Makatos <thanos.makatos@nutanix.com>
Diffstat (limited to 'lib/dma.c')
-rw-r--r--lib/dma.c34
1 files changed, 28 insertions, 6 deletions
diff --git a/lib/dma.c b/lib/dma.c
index 5ca897f..ac3ddfe 100644
--- a/lib/dma.c
+++ b/lib/dma.c
@@ -31,7 +31,6 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
-#include <stdio.h>
#include <sys/param.h>
#include <stddef.h>
@@ -281,7 +280,8 @@ dirty_page_logging_start_on_region(dma_memory_region_t *region, size_t pgsize)
if (size < 0) {
return size;
}
- region->dirty_bitmap = calloc(size, sizeof(char));
+
+ region->dirty_bitmap = calloc(size, 1);
if (region->dirty_bitmap == NULL) {
return ERROR_INT(errno);
}
@@ -553,10 +553,11 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
uint64_t len, size_t pgsize, size_t size,
char *bitmap)
{
- int ret;
+ dma_memory_region_t *region;
ssize_t bitmap_size;
dma_sg_t sg;
- dma_memory_region_t *region;
+ size_t i;
+ int ret;
assert(dma != NULL);
assert(bitmap != NULL);
@@ -599,11 +600,32 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
return ERROR_INT(EINVAL);
}
- memcpy(bitmap, region->dirty_bitmap, size);
+ for (i = 0; i < (size_t)bitmap_size; i++) {
+ uint8_t val = region->dirty_bitmap[i];
+ uint8_t *outp = (uint8_t *)&bitmap[i];
+
+ /*
+ * If no bits are dirty, avoid the atomic exchange. This is obviously
+ * racy, but it's OK: if we miss a dirty bit being set, we'll catch it
+ * the next time around.
+ *
+ * Otherwise, atomically exchange the dirty bits with zero: as we use
+ * atomic or in _dma_mark_dirty(), this cannot lose set bits - we might
+ * miss a bit being set after, but again, we'll catch that next time
+ * around.
+ */
+ if (val == 0) {
+ *outp = 0;
+ } else {
+ uint8_t zero = 0;
+ __atomic_exchange(&region->dirty_bitmap[i], &zero,
+ outp, __ATOMIC_SEQ_CST);
+ }
+ }
+
#ifdef DEBUG
log_dirty_bitmap(dma->vfu_ctx, region, bitmap, size);
#endif
- memset(region->dirty_bitmap, 0, size);
return 0;
}