aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/libvfio-user.h31
-rw-r--r--lib/dma.c47
-rw-r--r--lib/dma.h40
-rw-r--r--lib/libvfio-user.c110
-rw-r--r--samples/server.c8
-rw-r--r--test/py/libvfio_user.py20
-rw-r--r--test/py/test_dirty_pages.py94
-rw-r--r--test/unit-tests.c2
8 files changed, 244 insertions, 108 deletions
diff --git a/include/libvfio-user.h b/include/libvfio-user.h
index e05cacf..cd470a9 100644
--- a/include/libvfio-user.h
+++ b/include/libvfio-user.h
@@ -42,6 +42,7 @@
#include <sys/uio.h>
#include <unistd.h>
#include <syslog.h>
+#include <sys/queue.h>
#include "pci_caps/dsn.h"
#include "pci_caps/msi.h"
@@ -61,17 +62,18 @@ extern "C" {
/* DMA addresses cannot be directly de-referenced. */
typedef void *vfu_dma_addr_t;
-typedef struct {
- vfu_dma_addr_t dma_addr;
- int region;
- uint64_t length;
- uint64_t offset;
- bool mappable;
-} dma_sg_t;
+struct dma_sg;
+typedef struct dma_sg dma_sg_t;
typedef struct vfu_ctx vfu_ctx_t;
/*
+ * Returns the size, in bytes, of dma_sg_t.
+ */
+size_t
+dma_sg_size(void);
+
+/*
* Attaching to the transport is non-blocking.
* The caller must then manually call vfu_attach_ctx(),
* which is non-blocking, as many times as necessary.
@@ -649,16 +651,19 @@ vfu_addr_to_sg(vfu_ctx_t *vfu_ctx, vfu_dma_addr_t dma_addr, size_t len,
* vfu_setup_device_dma().
*
* @vfu_ctx: the libvfio-user context
- * @sg: array of scatter/gather entries returned by vfu_addr_to_sg
+ * @sg: array of scatter/gather entries returned by vfu_addr_to_sg. These
+ * entries must not be modified and the array must not be deallocated
+ * until vfu_unmap_sg() has been called for each entry.
* @iov: array of iovec structures (defined in <sys/uio.h>) to receive each
* mapping
* @cnt: number of scatter/gather entries to map
+ * @flags: must be 0
*
* @returns 0 on success, -1 on failure. Sets errno.
*/
int
-vfu_map_sg(vfu_ctx_t *vfu_ctx, const dma_sg_t *sg,
- struct iovec *iov, int cnt);
+vfu_map_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, struct iovec *iov, int cnt,
+ int flags);
/**
* Unmaps a list scatter/gather entries (previously mapped by vfu_map_sg()) from
@@ -670,8 +675,7 @@ vfu_map_sg(vfu_ctx_t *vfu_ctx, const dma_sg_t *sg,
* @cnt: number of scatter/gather entries to unmap
*/
void
-vfu_unmap_sg(vfu_ctx_t *vfu_ctx, const dma_sg_t *sg,
- struct iovec *iov, int cnt);
+vfu_unmap_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, struct iovec *iov, int cnt);
/**
* Read from the dma region exposed by the client. This can be used as an
@@ -876,6 +880,9 @@ size_t
vfu_pci_find_next_capability(vfu_ctx_t *vfu_ctx, bool extended,
size_t pos, int cap_id);
+bool
+vfu_sg_is_mappable(vfu_ctx_t *vfu_ctx, dma_sg_t *sg);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/dma.c b/lib/dma.c
index b76c18c..9270114 100644
--- a/lib/dma.c
+++ b/lib/dma.c
@@ -44,6 +44,17 @@
#include "dma.h"
#include "private.h"
+size_t
+dma_sg_size(void)
+{
+ return sizeof(dma_sg_t);
+}
+
+bool
+dma_sg_is_mappable(const dma_controller_t *dma, const dma_sg_t *sg) {
+ return sg->region[dma->regions].info.vaddr != NULL;
+}
+
static inline ssize_t
fd_get_blocksize(int fd)
{
@@ -88,6 +99,7 @@ dma_controller_create(vfu_ctx_t *vfu_ctx, size_t max_regions, size_t max_size)
dma->nregions = 0;
memset(dma->regions, 0, max_regions * sizeof(dma->regions[0]));
dma->dirty_pgsize = 0;
+ LIST_INIT(&dma->maps);
return dma;
}
@@ -463,7 +475,24 @@ out:
return cnt;
}
-int dma_controller_dirty_page_logging_start(dma_controller_t *dma, size_t pgsize)
+static void
+dma_mark_dirty_sgs(dma_controller_t *dma)
+{
+ struct dma_sg *sg;
+
+ if (dma->dirty_pgsize == 0) {
+ return;
+ }
+
+ LIST_FOREACH(sg, &dma->maps, entry) {
+ if (sg->writeable) {
+ _dma_mark_dirty(dma, &dma->regions[sg->region], sg);
+ }
+ }
+}
+
+int
+dma_controller_dirty_page_logging_start(dma_controller_t *dma, size_t pgsize)
{
size_t i;
@@ -495,6 +524,9 @@ int dma_controller_dirty_page_logging_start(dma_controller_t *dma, size_t pgsize
}
}
dma->dirty_pgsize = pgsize;
+
+ dma_mark_dirty_sgs(dma);
+
return 0;
}
@@ -519,7 +551,7 @@ dma_controller_dirty_page_logging_stop(dma_controller_t *dma)
int
dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
uint64_t len, size_t pgsize, size_t size,
- char **data)
+ char *bitmap)
{
int ret;
ssize_t bitmap_size;
@@ -527,7 +559,7 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
dma_memory_region_t *region;
assert(dma != NULL);
- assert(data != NULL);
+ assert(bitmap != NULL);
/*
* FIXME for now we support IOVAs that match exactly the DMA region. This
@@ -562,7 +594,14 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
region = &dma->regions[sg.region];
- *data = region->dirty_bitmap;
+ /*
+ * TODO race condition between resetting bitmap and user calling
+ * vfu_map_sg/vfu_unmap_sg().
+ */
+ memcpy(bitmap, region->dirty_bitmap, size);
+ memset(region->dirty_bitmap, 0, size);
+
+ dma_mark_dirty_sgs(dma);
return 0;
}
diff --git a/lib/dma.h b/lib/dma.h
index 082ca46..29809d3 100644
--- a/lib/dma.h
+++ b/lib/dma.h
@@ -73,6 +73,7 @@
#include <stdlib.h>
#include <limits.h>
#include <errno.h>
+#include <sys/queue.h>
#include "libvfio-user.h"
#include "common.h"
@@ -82,6 +83,15 @@
struct vfu_ctx;
+struct dma_sg {
+ vfu_dma_addr_t dma_addr;
+ int region;
+ uint64_t length;
+ uint64_t offset;
+ bool writeable;
+ LIST_ENTRY(dma_sg) entry;
+};
+
typedef struct {
vfu_dma_info_t info;
int fd; // File descriptor to mmap
@@ -96,6 +106,7 @@ typedef struct dma_controller {
int nregions;
struct vfu_ctx *vfu_ctx;
size_t dirty_pgsize; // Dirty page granularity
+ LIST_HEAD(, dma_sg) maps;
dma_memory_region_t regions[0];
} dma_controller_t;
@@ -132,14 +143,6 @@ _dma_addr_sg_split(const dma_controller_t *dma,
vfu_dma_addr_t dma_addr, uint64_t len,
dma_sg_t *sg, int max_sg, int prot);
-static bool
-_dma_should_mark_dirty(const dma_controller_t *dma, int prot)
-{
- assert(dma != NULL);
-
- return (prot & PROT_WRITE) == PROT_WRITE && dma->dirty_pgsize > 0;
-}
-
static void
_dma_mark_dirty(const dma_controller_t *dma, const dma_memory_region_t *region,
dma_sg_t *sg)
@@ -172,10 +175,7 @@ dma_init_sg(const dma_controller_t *dma, dma_sg_t *sg, vfu_dma_addr_t dma_addr,
sg->region = region_index;
sg->offset = dma_addr - region->info.iova.iov_base;
sg->length = len;
- if (_dma_should_mark_dirty(dma, prot)) {
- _dma_mark_dirty(dma, region, sg);
- }
- sg->mappable = (region->info.vaddr != NULL);
+ sg->writeable = prot & PROT_WRITE;
return 0;
}
@@ -225,7 +225,7 @@ dma_addr_to_sg(const dma_controller_t *dma,
}
static inline int
-dma_map_sg(dma_controller_t *dma, const dma_sg_t *sg, struct iovec *iov,
+dma_map_sg(dma_controller_t *dma, dma_sg_t *sg, struct iovec *iov,
int cnt)
{
dma_memory_region_t *region;
@@ -245,6 +245,12 @@ dma_map_sg(dma_controller_t *dma, const dma_sg_t *sg, struct iovec *iov,
return ERROR_INT(EFAULT);
}
+ if (sg->writeable) {
+ if (dma->dirty_pgsize > 0) {
+ _dma_mark_dirty(dma, region, sg);
+ }
+ LIST_INSERT_HEAD(&dma->maps, &sg[i], entry);
+ }
vfu_log(dma->vfu_ctx, LOG_DEBUG, "map %p-%p",
sg->dma_addr + sg->offset,
sg->dma_addr + sg->offset + sg->length);
@@ -276,6 +282,9 @@ dma_unmap_sg(dma_controller_t *dma, const dma_sg_t *sg,
/* bad region */
continue;
}
+ if (sg->writeable) {
+ LIST_REMOVE(sg, entry);
+ }
vfu_log(dma->vfu_ctx, LOG_DEBUG, "unmap %p-%p",
sg[i].dma_addr + sg[i].offset,
sg[i].dma_addr + sg[i].offset + sg[i].length);
@@ -292,7 +301,10 @@ dma_controller_dirty_page_logging_stop(dma_controller_t *dma);
int
dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
uint64_t len, size_t pgsize, size_t size,
- char **data);
+ char *bitmap);
+bool
+dma_sg_is_mappable(const dma_controller_t *dma, const dma_sg_t *sg);
+
#endif /* LIB_VFIO_USER_DMA_H */
diff --git a/lib/libvfio-user.c b/lib/libvfio-user.c
index 40eb010..bbef62a 100644
--- a/lib/libvfio-user.c
+++ b/lib/libvfio-user.c
@@ -544,7 +544,6 @@ handle_dma_unmap(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg,
struct vfio_user_dma_unmap *dma_unmap)
{
int ret;
- char *bitmap = NULL;
char rstr[1024];
assert(vfu_ctx != NULL);
@@ -583,22 +582,14 @@ handle_dma_unmap(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg,
* temporary anyway since we're moving dirty page tracking out of
* the DMA controller.
*/
- ret = dma_controller_dirty_page_get(vfu_ctx->dma,
- (vfu_dma_addr_t)dma_unmap->addr,
- dma_unmap->size,
- dma_unmap->bitmap->pgsize,
- dma_unmap->bitmap->size,
- &bitmap);
- if (ret < 0) {
- vfu_log(vfu_ctx, LOG_ERR, "failed to get dirty page bitmap: %m");
- return -1;
- }
msg->out_size += sizeof(*dma_unmap->bitmap) + dma_unmap->bitmap->size;
} else if (dma_unmap->flags != 0) {
vfu_log(vfu_ctx, LOG_ERR, "bad flags=%#x", dma_unmap->flags);
return ERROR_INT(ENOTSUP);
}
+
+
msg->out_data = malloc(msg->out_size);
if (msg->out_data == NULL) {
return ERROR_INT(ENOMEM);
@@ -607,7 +598,16 @@ handle_dma_unmap(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg,
if (dma_unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
memcpy(msg->out_data + sizeof(*dma_unmap), dma_unmap->bitmap, sizeof(*dma_unmap->bitmap));
- memcpy(msg->out_data + sizeof(*dma_unmap) + sizeof(*dma_unmap->bitmap), bitmap, dma_unmap->bitmap->size);
+ ret = dma_controller_dirty_page_get(vfu_ctx->dma,
+ (vfu_dma_addr_t)dma_unmap->addr,
+ dma_unmap->size,
+ dma_unmap->bitmap->pgsize,
+ dma_unmap->bitmap->size,
+ msg->out_data + sizeof(*dma_unmap) + sizeof(*dma_unmap->bitmap));
+ if (ret < 0) {
+ vfu_log(vfu_ctx, LOG_ERR, "failed to get dirty page bitmap: %m");
+ return -1;
+ }
}
ret = dma_controller_remove_region(vfu_ctx->dma,
@@ -655,64 +655,58 @@ handle_dirty_pages_get(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
struct vfio_user_dirty_pages *dirty_pages_out;
struct vfio_user_bitmap_range *range_in;
struct vfio_user_bitmap_range *range_out;
- char *bitmap;
size_t argsz;
int ret;
- if (msg->in_size < sizeof(*dirty_pages_in) + sizeof(*range_in)) {
- vfu_log(vfu_ctx, LOG_ERR, "invalid message size %zu", msg->in_size);
- return ERROR_INT(EINVAL);
- }
dirty_pages_in = msg->in_data;
- range_in = msg->in_data + sizeof(*dirty_pages_in);
- ret = dma_controller_dirty_page_get(vfu_ctx->dma,
- (vfu_dma_addr_t)range_in->iova,
- range_in->size, range_in->bitmap.pgsize,
- range_in->bitmap.size, &bitmap);
- if (ret != 0) {
- vfu_log(vfu_ctx, LOG_WARNING,
- "failed to get dirty bitmap from DMA controller: %m");
- return -1;
+ if (msg->in_size < sizeof(*dirty_pages_in) + sizeof(*range_in)
+ || dirty_pages_in->argsz < sizeof(*dirty_pages_out)) {
+ vfu_log(vfu_ctx, LOG_ERR, "invalid message size=%zu argsz=%u",
+ msg->in_size, dirty_pages_in->argsz);
+ return ERROR_INT(EINVAL);
}
+ range_in = msg->in_data + sizeof(*dirty_pages_in);
+
/* NB: this is bound by MAX_DMA_SIZE. */
argsz = sizeof(*dirty_pages_out) + sizeof(*range_out) +
range_in->bitmap.size;
-
- /*
- * If the reply doesn't fit, reply with just the dirty pages header, giving
- * the needed argsz. Typically this shouldn't happen, as the client knows
- * the needed reply size and has already provided the correct bitmap size.
- */
- if (dirty_pages_in->argsz >= argsz) {
- msg->out_size = argsz;
- } else {
- msg->out_size = sizeof(*dirty_pages_out);
- }
-
+ msg->out_size = MIN(dirty_pages_in->argsz, argsz);
msg->out_data = malloc(msg->out_size);
-
if (msg->out_data == NULL) {
return -1;
}
-
dirty_pages_out = msg->out_data;
- memcpy(dirty_pages_out, dirty_pages_in, sizeof (*dirty_pages_out));
+ memcpy(dirty_pages_out, dirty_pages_in, sizeof(*dirty_pages_out));
dirty_pages_out->argsz = argsz;
+ /*
+ * If the reply doesn't fit, reply with just the dirty pages header, giving
+ * the needed argsz. Typically this shouldn't happen, as the client knows
+ * the needed reply size and has already provided the correct bitmap size.
+ */
if (dirty_pages_in->argsz >= argsz) {
- char *bitmap_out;
-
+ void *bitmap_out = msg->out_data + sizeof(*dirty_pages_out)
+ + sizeof(*range_out);
range_out = msg->out_data + sizeof(*dirty_pages_out);
- memcpy(range_out, range_in, sizeof (*range_out));
-
- bitmap_out = msg->out_data + sizeof(*dirty_pages_out)
- + sizeof(*range_out);
- memcpy(bitmap_out, bitmap, range_in->bitmap.size);
+ memcpy(range_out, range_in, sizeof(*range_out));
+ ret = dma_controller_dirty_page_get(vfu_ctx->dma,
+ (vfu_dma_addr_t)range_in->iova,
+ range_in->size,
+ range_in->bitmap.pgsize,
+ range_in->bitmap.size, bitmap_out);
+ if (ret != 0) {
+ ret = errno;
+ vfu_log(vfu_ctx, LOG_WARNING,
+ "failed to get dirty bitmap from DMA controller: %m");
+ free(msg->out_data);
+ msg->out_data = NULL;
+ msg->out_size = 0;
+ return ERROR_INT(ret);
+ }
}
-
return 0;
}
@@ -1588,12 +1582,12 @@ vfu_addr_to_sg(vfu_ctx_t *vfu_ctx, vfu_dma_addr_t dma_addr,
}
EXPORT int
-vfu_map_sg(vfu_ctx_t *vfu_ctx, const dma_sg_t *sg,
- struct iovec *iov, int cnt)
+vfu_map_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, struct iovec *iov, int cnt,
+ int flags)
{
int ret;
- if (unlikely(vfu_ctx->dma_unregister == NULL)) {
+ if (unlikely(vfu_ctx->dma_unregister == NULL) || flags != 0) {
return ERROR_INT(EINVAL);
}
@@ -1606,7 +1600,7 @@ vfu_map_sg(vfu_ctx_t *vfu_ctx, const dma_sg_t *sg,
}
EXPORT void
-vfu_unmap_sg(vfu_ctx_t *vfu_ctx, const dma_sg_t *sg, struct iovec *iov, int cnt)
+vfu_unmap_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, struct iovec *iov, int cnt)
{
if (unlikely(vfu_ctx->dma_unregister == NULL)) {
return;
@@ -1630,6 +1624,10 @@ vfu_dma_transfer(vfu_ctx_t *vfu_ctx, enum vfio_user_command cmd,
assert(vfu_ctx != NULL);
assert(sg != NULL);
+ if (cmd == VFIO_USER_DMA_WRITE && !sg->writeable) {
+ return ERROR_INT(EPERM);
+ }
+
rlen = sizeof(struct vfio_user_dma_region_access) +
MIN(sg->length, vfu_ctx->client_max_data_xfer_size);
@@ -1715,4 +1713,10 @@ vfu_dma_write(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, void *data)
return vfu_dma_transfer(vfu_ctx, VFIO_USER_DMA_WRITE, sg, data);
}
+EXPORT bool
+vfu_sg_is_mappable(vfu_ctx_t *vfu_ctx, dma_sg_t *sg)
+{
+ return dma_sg_is_mappable(vfu_ctx->dma, sg);
+}
+
/* ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab: */
diff --git a/samples/server.c b/samples/server.c
index 6a31251..4da476b 100644
--- a/samples/server.c
+++ b/samples/server.c
@@ -212,13 +212,13 @@ static void do_dma_io(vfu_ctx_t *vfu_ctx, struct server_data *server_data)
unsigned char buf[count];
unsigned char md5sum1[MD5_DIGEST_LENGTH], md5sum2[MD5_DIGEST_LENGTH];
int i, ret;
- dma_sg_t sg;
+ dma_sg_t *sg = alloca(dma_sg_size());
assert(vfu_ctx != NULL);
ret = vfu_addr_to_sg(vfu_ctx,
(vfu_dma_addr_t)server_data->regions[0].iova.iov_base,
- count, &sg, 1, PROT_WRITE);
+ count, sg, 1, PROT_WRITE);
if (ret < 0) {
err(EXIT_FAILURE, "failed to map %p-%p",
server_data->regions[0].iova.iov_base,
@@ -229,7 +229,7 @@ static void do_dma_io(vfu_ctx_t *vfu_ctx, struct server_data *server_data)
get_md5sum(buf, count, md5sum1);
vfu_log(vfu_ctx, LOG_DEBUG, "%s: WRITE addr %p count %d", __func__,
server_data->regions[0].iova.iov_base, count);
- ret = vfu_dma_write(vfu_ctx, &sg, buf);
+ ret = vfu_dma_write(vfu_ctx, sg, buf);
if (ret < 0) {
err(EXIT_FAILURE, "vfu_dma_write failed");
}
@@ -237,7 +237,7 @@ static void do_dma_io(vfu_ctx_t *vfu_ctx, struct server_data *server_data)
memset(buf, 0, count);
vfu_log(vfu_ctx, LOG_DEBUG, "%s: READ addr %p count %d", __func__,
server_data->regions[0].iova.iov_base, count);
- ret = vfu_dma_read(vfu_ctx, &sg, buf);
+ ret = vfu_dma_read(vfu_ctx, sg, buf);
if (ret < 0) {
err(EXIT_FAILURE, "vfu_dma_read failed");
}
diff --git a/test/py/libvfio_user.py b/test/py/libvfio_user.py
index b276a82..1105fb5 100644
--- a/test/py/libvfio_user.py
+++ b/test/py/libvfio_user.py
@@ -389,7 +389,9 @@ class dma_sg_t(Structure):
("region", c.c_int),
("length", c.c_uint64),
("offset", c.c_uint64),
- ("mappable", c.c_bool)
+ ("writeable", c.c_bool),
+ ("le_next", c.c_void_p), # FIXME add struct for LIST_ENTRY
+ ("le_prev", c.c_void_p),
]
#
@@ -432,6 +434,10 @@ lib.vfu_setup_device_migration_callbacks.argtypes = (c.c_void_p,
c.POINTER(vfu_migration_callbacks_t), c.c_uint64)
lib.vfu_addr_to_sg.argtypes = (c.c_void_p, c.c_void_p, c.c_size_t,
c.POINTER(dma_sg_t), c.c_int, c.c_int)
+lib.vfu_map_sg.argtypes = (c.c_void_p, c.POINTER(dma_sg_t), c.POINTER(iovec_t),
+ c.c_int, c.c_int)
+lib.vfu_unmap_sg.argtypes = (c.c_void_p, c.POINTER(dma_sg_t),
+ c.POINTER(iovec_t), c.c_int)
def to_byte(val):
"""Cast an int to a byte value."""
@@ -691,4 +697,14 @@ def vfu_addr_to_sg(ctx, dma_addr, length, max_sg=1,
sg = dma_sg_t()
- return lib.vfu_addr_to_sg(ctx, dma_addr, length, sg, max_sg, prot)
+ return (lib.vfu_addr_to_sg(ctx, dma_addr, length, sg, max_sg, prot), sg)
+
+
+def vfu_map_sg(ctx, sg, iovec, cnt=1, flags=0):
+ # FIXME not sure wheter cnt != 1 will work because iovec is an array
+ return lib.vfu_map_sg(ctx, sg, iovec, cnt, flags)
+
+def vfu_unmap_sg(ctx, sg, iovec, cnt=1):
+ return lib.vfu_unmap_sg(ctx, sg, iovec, cnt)
+
+# ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab: #
diff --git a/test/py/test_dirty_pages.py b/test/py/test_dirty_pages.py
index 2c9be01..25a73da 100644
--- a/test/py/test_dirty_pages.py
+++ b/test/py/test_dirty_pages.py
@@ -135,7 +135,8 @@ def test_dirty_pages_start_bad_flags():
vfu_run_ctx(ctx)
get_reply(sock, expect=errno.EINVAL)
-def test_dirty_pages_start():
+
+def start_logging():
payload = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_START)
@@ -144,10 +145,11 @@ def test_dirty_pages_start():
vfu_run_ctx(ctx)
get_reply(sock)
- # should be idempotent
- sock.send(hdr + payload)
- vfu_run_ctx(ctx)
- get_reply(sock)
+
+def test_dirty_pages_start():
+ start_logging()
+ start_logging() # should be idempotent
+
def test_dirty_pages_get_short_read():
payload = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
@@ -162,9 +164,10 @@ def test_dirty_pages_get_short_read():
# This should in fact work; update when it does.
#
def test_dirty_pages_get_sub_range():
- dirty_pages = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ argsz = len(vfio_user_dirty_pages()) + len(vfio_user_bitmap_range()) + 8
+ dirty_pages = vfio_user_dirty_pages(argsz=argsz,
flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP)
- bitmap = vfio_user_bitmap(pgsize=0x1000, size=1)
+ bitmap = vfio_user_bitmap(pgsize=0x1000, size=8)
br = vfio_user_bitmap_range(iova=0x11000, size=0x1000, bitmap=bitmap)
hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES,
@@ -174,7 +177,8 @@ def test_dirty_pages_get_sub_range():
get_reply(sock, expect=errno.ENOTSUP)
def test_dirty_pages_get_bad_page_size():
- dirty_pages = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ argsz = len(vfio_user_dirty_pages()) + len(vfio_user_bitmap_range()) + 8
+ dirty_pages = vfio_user_dirty_pages(argsz=argsz,
flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP)
bitmap = vfio_user_bitmap(pgsize=0x2000, size=8)
br = vfio_user_bitmap_range(iova=0x10000, size=0x10000, bitmap=bitmap)
@@ -186,7 +190,8 @@ def test_dirty_pages_get_bad_page_size():
get_reply(sock, expect=errno.EINVAL)
def test_dirty_pages_get_bad_bitmap_size():
- dirty_pages = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ argsz = len(vfio_user_dirty_pages()) + len(vfio_user_bitmap_range()) + 8
+ dirty_pages = vfio_user_dirty_pages(argsz=argsz,
flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP)
bitmap = vfio_user_bitmap(pgsize=0x1000, size=1)
br = vfio_user_bitmap_range(iova=0x10000, size=0x10000, bitmap=bitmap)
@@ -247,14 +252,8 @@ def test_dirty_pages_get_unmodified():
assert br.bitmap.pgsize == 0x1000
assert br.bitmap.size == 8
-def test_dirty_pages_get_modified():
- # sufficient to mark the region dirty
- ret = vfu_addr_to_sg(ctx, dma_addr=0x10000, length=0x1000)
- assert ret == 1
-
- ret = vfu_addr_to_sg(ctx, dma_addr=0x14000, length=0x4000)
- assert ret == 1
+def get_dirty_page_bitmap():
argsz = len(vfio_user_dirty_pages()) + len(vfio_user_bitmap_range()) + 8
dirty_pages = vfio_user_dirty_pages(argsz=argsz,
@@ -270,11 +269,46 @@ def test_dirty_pages_get_modified():
dirty_pages, result = vfio_user_dirty_pages.pop_from_buffer(result)
br, result = vfio_user_bitmap_range.pop_from_buffer(result)
- bitmap = struct.unpack("Q", result)[0]
+ return struct.unpack("Q", result)[0]
+
+sg3 = None
+iovec3 = None
+def test_dirty_pages_get_modified():
+ ret, sg1 = vfu_addr_to_sg(ctx, dma_addr=0x10000, length=0x1000)
+ assert ret == 1
+ iovec1 = iovec_t()
+ ret = vfu_map_sg(ctx, sg1, iovec1)
+ assert ret == 0
+
+ ret, sg2 = vfu_addr_to_sg(ctx, dma_addr=0x11000, length=0x1000,
+ prot=mmap.PROT_READ)
+ assert ret == 1
+ iovec2 = iovec_t()
+ ret = vfu_map_sg(ctx, sg2, iovec2)
+ assert ret == 0
+
+ global sg3, iovec3
+ ret, sg3 = vfu_addr_to_sg(ctx, dma_addr=0x14000, length=0x4000)
+ assert ret == 1
+ iovec3 = iovec_t()
+ ret = vfu_map_sg(ctx, sg3, iovec3)
+ assert ret == 0
+
+ bitmap = get_dirty_page_bitmap()
assert bitmap == 0b11110001
-def test_dirty_pages_stop():
+ # unmap segment, dirty bitmap should be the same
+ vfu_unmap_sg(ctx, sg1, iovec1)
+ bitmap = get_dirty_page_bitmap()
+ assert bitmap == 0b11110001
+
+ # check again, previously unmapped segment should be clean
+ bitmap = get_dirty_page_bitmap()
+ assert bitmap == 0b11110000
+
+
+def stop_logging():
payload = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP)
@@ -291,6 +325,30 @@ def test_dirty_pages_stop():
vfu_run_ctx(ctx)
get_reply(sock)
+
+def test_dirty_pages_stop():
+ stop_logging()
+
+ # one segment is still mapped, starting logging again and bitmap should be
+ # dirty
+ start_logging()
+ assert get_dirty_page_bitmap() == 0b11110000
+
+ # unmap segment, bitmap should still be dirty
+ vfu_unmap_sg(ctx, sg3, iovec3)
+ assert get_dirty_page_bitmap() == 0b11110000
+
+ # bitmap should be clear after it was unmapped before previous reqeust for
+ # dirty pages
+ assert get_dirty_page_bitmap() == 0b00000000
+
+ # FIXME we have a memory leak as we don't free dirty bitmaps when
+ # destroying the context.
+ stop_logging()
+
def test_dirty_pages_cleanup():
disconnect_client(ctx, sock)
vfu_destroy_ctx(ctx)
+
+
+# ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
diff --git a/test/unit-tests.c b/test/unit-tests.c
index 9945bac..8a41a76 100644
--- a/test/unit-tests.c
+++ b/test/unit-tests.c
@@ -407,7 +407,7 @@ test_dma_addr_to_sg(void **state UNUSED)
assert_int_equal(0x2000 - (unsigned long long)r->info.iova.iov_base,
sg.offset);
assert_int_equal(0x400, sg.length);
- assert_true(sg.mappable);
+ assert_true(vfu_sg_is_mappable(&vfu_ctx, &sg));
errno = 0;
r->info.prot = PROT_WRITE;