diff options
| author | Hollin <hollinisme@gmail.com> | 2026-04-17 04:36:41 +0800 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2026-04-16 21:36:41 +0100 |
| commit | 4d9f663450fa80ff375612dbbafe073700e3d3d8 (patch) | |
| tree | 9b83eeaecae8a43c03628713c3c8d0d97b852f5e | |
| parent | 2311cde4a933dc62ee65b4fe8f408e37bf2bb390 (diff) | |
| download | libvfio-user-master.tar.gz libvfio-user-master.tar.bz2 libvfio-user-master.zip | |
The dirty bitmap was allocated when dirty tracking is enabled for a DMA
region, but was not released during region teardown.
This could lead to memory leaks over time with repeated DMA map/unmap
operations while dirty page logging is active.
Signed-off-by: liuhaolin <hollinisme@gmail.com>
| -rw-r--r-- | lib/dma.c | 50 | ||||
| -rw-r--r-- | test/py/test_dirty_pages.py | 86 |
2 files changed, 116 insertions, 20 deletions
@@ -66,6 +66,32 @@ fd_get_blocksize(int fd) return st.st_blksize; } +static int +dirty_page_logging_start_on_region(dma_memory_region_t *region, size_t pgsize) +{ + assert(region->fd != -1); + + ssize_t size = get_bitmap_size(region->info.iova.iov_len, pgsize); + if (size < 0) { + return size; + } + + region->dirty_bitmap = calloc(size, 1); + if (region->dirty_bitmap == NULL) { + return ERROR_INT(errno); + } + return 0; +} + +static void +dirty_page_logging_stop_on_region(dma_memory_region_t *region) +{ + if (region->dirty_bitmap != NULL) { + free(region->dirty_bitmap); + region->dirty_bitmap = NULL; + } +} + dma_controller_t * dma_controller_create(vfu_ctx_t *vfu_ctx, size_t max_regions, size_t max_size) { @@ -135,6 +161,8 @@ MOCK_DEFINE(dma_controller_unmap_region)(dma_controller_t *dma, assert(region->fd != -1); + dirty_page_logging_stop_on_region(region); + err = fd_cache_put(®ion->fd); assert(err == 0); } @@ -258,23 +286,6 @@ dma_map_region(dma_controller_t *dma, dma_memory_region_t *region) return 0; } -static int -dirty_page_logging_start_on_region(dma_memory_region_t *region, size_t pgsize) -{ - assert(region->fd != -1); - - ssize_t size = get_bitmap_size(region->info.iova.iov_len, pgsize); - if (size < 0) { - return size; - } - - region->dirty_bitmap = calloc(size, 1); - if (region->dirty_bitmap == NULL) { - return ERROR_INT(errno); - } - return 0; -} - dma_memory_region_t * MOCK_DEFINE(dma_controller_add_region)(dma_controller_t *dma, vfu_dma_addr_t dma_addr, uint64_t size, @@ -413,7 +424,7 @@ rollback: if (region->info.vaddr != NULL) { dma_controller_unmap_region(dma, region); } - free(region->dirty_bitmap); + dirty_page_logging_stop_on_region(region); free(region); } err = fd_cache_put(&fd); @@ -475,8 +486,7 @@ dma_controller_dirty_page_logging_reset(dma_controller_t *dma) for (btree_iter_init(&dma->regions, 0, &iter); (region = btree_iter_get(&iter, NULL)) != NULL; btree_iter_next(&iter)) { - free(region->dirty_bitmap); - region->dirty_bitmap = NULL; + dirty_page_logging_stop_on_region(region); } dma->dirty_pgsize = 0; } diff --git a/test/py/test_dirty_pages.py b/test/py/test_dirty_pages.py index 6abea1c..b4f2468 100644 --- a/test/py/test_dirty_pages.py +++ b/test/py/test_dirty_pages.py @@ -412,4 +412,90 @@ def test_dirty_pages_uninitialised_dma(): vfu_destroy_ctx(ctx) + +def test_dirty_pages_ctx_no_stop(): + """ + Test that dirty page tracking resources are properly cleaned up even when: + 1. DMA regions are unmapped while dirty page logging is active + 2. The device context is destroyed without explicitly stopping dirty page + logging first + + This validates that there are no memory leaks in the dirty bitmap handling + paths during both explicit DMA unmap operations and implicit cleanup during + context destruction. + """ + global ctx, client + + # Create context and initialize device + ctx = vfu_create_ctx(flags=LIBVFIO_USER_FLAG_ATTACH_NB) + assert ctx is not None + + ret = vfu_pci_init(ctx) + assert ret == 0 + + vfu_setup_device_quiesce_cb(ctx, quiesce_cb=quiesce_cb) + + ret = vfu_setup_device_dma(ctx, MAX_DMA_REGIONS, + dma_register, + dma_unregister) + assert ret == 0 + + ret = vfu_realize_ctx(ctx) + assert ret == 0 + + client = connect_client(ctx) + + # Create two separate DMA regions + f1 = tempfile.TemporaryFile() + f1.truncate(0x10 << PAGE_SHIFT) + f2 = tempfile.TemporaryFile() + f2.truncate(0x10 << PAGE_SHIFT) + + # Map first region at 0x100000 + payload = vfio_user_dma_map(argsz=len(vfio_user_dma_map()), + flags=(VFIO_USER_F_DMA_REGION_READ | VFIO_USER_F_DMA_REGION_WRITE), + offset=0, addr=0x10 << PAGE_SHIFT, size=0x10 << PAGE_SHIFT) + msg(ctx, client.sock, VFIO_USER_DMA_MAP, payload, fds=[f1.fileno()]) + + # Map second region at 0x300000 + payload = vfio_user_dma_map(argsz=len(vfio_user_dma_map()), + flags=(VFIO_USER_F_DMA_REGION_READ | VFIO_USER_F_DMA_REGION_WRITE), + offset=0, addr=0x30 << PAGE_SHIFT, size=0x10 << PAGE_SHIFT) + msg(ctx, client.sock, VFIO_USER_DMA_MAP, payload, fds=[f2.fileno()]) + + # Setup migration support + ret = vfu_setup_device_migration_callbacks(ctx) + assert ret == 0 + + # Start dirty page logging + start_logging() + + # Modify pages in both regions + write_to_page(ctx, 0x10, 1, get_bitmap=False) + write_to_page(ctx, 0x30, 1, get_bitmap=False) + + # Unmap the first DMA region + payload = vfio_user_dma_unmap(argsz=len(vfio_user_dma_unmap()), + addr=0x10 << PAGE_SHIFT, size=0x10 << PAGE_SHIFT) + msg(ctx, client.sock, VFIO_USER_DMA_UNMAP, payload) + + # Verify first region is actually unmapped (should fail) + get_dirty_page_bitmap(addr=0x10 << PAGE_SHIFT, + length=0x10 << PAGE_SHIFT, + expect=errno.ENOENT) + + # Verify second region still works and dirty bit is correctly set + bitmap = get_dirty_page_bitmap(addr=0x30 << PAGE_SHIFT, + length=0x10 << PAGE_SHIFT) + # First page of second region should be marked dirty + assert bitmap == 0b1 + + # Don't unmap the second DMA region - let context destruction clean it up + # This tests that the destroy path properly frees remaining dirty bitmaps + + # Cleanup + client.disconnect(ctx) + vfu_destroy_ctx(ctx) + + # ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab: |
