aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/vfio-user.rst75
-rw-r--r--include/vfio-user.h22
-rw-r--r--lib/common.h12
-rw-r--r--lib/dma.c8
-rw-r--r--lib/libvfio-user.c124
-rw-r--r--lib/private.h6
-rw-r--r--samples/client.c86
-rw-r--r--test/mocks.c31
-rw-r--r--test/py/libvfio_user.py215
-rw-r--r--test/py/test_dirty_pages.py298
-rw-r--r--test/py/test_pci_caps.py3
-rw-r--r--test/py/test_pci_ext_caps.py3
-rw-r--r--test/unit-tests.c47
13 files changed, 662 insertions, 268 deletions
diff --git a/docs/vfio-user.rst b/docs/vfio-user.rst
index 57d689b..e801859 100644
--- a/docs/vfio-user.rst
+++ b/docs/vfio-user.rst
@@ -626,7 +626,7 @@ The request payload for this message is a structure of the following format:
* *get dirty page bitmap* indicates that a dirty page bitmap must be
populated before unmapping the DMA region. The client must provide a
- `VFIO bitmap`_ structure, explained below, immediately following this
+ `VFIO Bitmap`_ structure, explained below, immediately following this
entry.
* *address* is the base DMA address of the DMA region.
@@ -639,9 +639,9 @@ The size of request message depends on whether or not the
* If set, the size of the total request message is: 16 + 24 + 16.
-.. _VFIO bitmap:
+.. _VFIO Bitmap:
-VFIO bitmap format
+VFIO Bitmap Format
""""""""""""""""""
+--------+--------+------+
@@ -665,12 +665,12 @@ portion of a DMA region is possible.
The server responds with the original DMA entry in the request. If the
*get dirty page bitmap* bit is set in flags in the request, then
-the server also includes the `VFIO bitmap`_ structure sent in the request,
+the server also includes the `VFIO Bitmap`_ structure sent in the request,
followed by the corresponding dirty page bitmap, where each bit represents
-one page of size *pgsize* in `VFIO bitmap`_ .
+one page of size *pgsize* in `VFIO Bitmap`_ .
The total size of the total reply message is:
-16 + 24 + (16 + *size* in `VFIO bitmap`_ if *get dirty page bitmap* is set).
+16 + 24 + (16 + *size* in `VFIO Bitmap`_ if *get dirty page bitmap* is set).
``VFIO_USER_DEVICE_GET_INFO``
-----------------------------
@@ -1477,8 +1477,7 @@ Neither the request or reply have a payload.
This command is analogous to ``VFIO_IOMMU_DIRTY_PAGES``. It is sent by the client
to the server in order to control logging of dirty pages, usually during a live
-migration. The VFIO dirty bitmap structure is defined in ``<linux/vfio.h>``
-(``struct vfio_iommu_type1_dirty_bitmap``).
+migration.
Dirty page tracking is optional for server implementation; clients should not
rely on it.
@@ -1503,10 +1502,9 @@ Request
| | | 2 | VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP | |
| | +-----+----------------------------------------+ |
+-------+--------+-----------------------------------------+
-| data | 8 | 4 |
-+-------+--------+-----------------------------------------+
-* *argsz* is the size of the VFIO dirty bitmap info structure.
+* *argsz* is the size of the VFIO dirty bitmap info structure for
+ ``START/STOP``; and for ``GET_BITMAP``, the maximum size of the reply payload
* *flags* defines the action to be performed by the server:
@@ -1517,22 +1515,22 @@ Request
* ``VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP`` instructs the server to stop logging
dirty pages.
- * ``VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP`` requests from the server to return
+ * ``VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP`` requests the server to return
the dirty bitmap for a specific IOVA range. The IOVA range is specified by
- "VFIO dirty bitmap get" structure, which must immediately follow the
- "VFIO dirty bitmap" structure, explained next. This operation is only valid
- if logging of dirty pages has been previously started. The server must
- respond the same way it does for ``VFIO_USER_DMA_UNMAP`` if
- ``VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP`` is set in the flags field of the
- table entry (``struct vfio_bitmap`` plus the bitmap must follow the
- response header).
+ a "VFIO Bitmap Range" structure, which must immediately follow this
+ "VFIO Dirty Pages" structure. See `VFIO Bitmap Range Format`_.
+ This operation is only valid if logging of dirty pages has been previously
+ started.
These flags are mutually exclusive with each other.
-* *data* unused, must be zero
+This part of the request is analogous to VFIO's ``struct
+vfio_iommu_type1_dirty_bitmap``.
+
+.. _VFIO Bitmap Range Format:
-VFIO Dirty Bitmap Get Format
-""""""""""""""""""""""""""""
+VFIO Bitmap Range Format
+""""""""""""""""""""""""
+--------+--------+------+
| Name | Offset | Size |
@@ -1548,7 +1546,10 @@ VFIO Dirty Bitmap Get Format
* *size* is the size of the IOVA region
-* *bitmap* is the VFIO bitmap explained in `VFIO bitmap`_.
+* *bitmap* is the VFIO Bitmap explained in `VFIO Bitmap`_.
+
+This part of the request is analogous to VFIO's ``struct
+vfio_iommu_type1_dirty_bitmap_get``.
Reply
^^^^^
@@ -1556,8 +1557,32 @@ Reply
For ``VFIO_IOMMU_DIRTY_PAGES_FLAG_START`` or
``VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP``, there is no reply payload.
-For ``VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP``, the reply payload is the
-corresponding set of dirty bitmaps.
+For ``VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP``, the reply payload is as follows:
+
++--------------+--------+-----------------------------------------+
+| Name | Offset | Size |
++==============+========+=========================================+
+| argsz | 0 | 4 |
++--------------+--------+-----------------------------------------+
+| flags | 4 | 4 |
++--------------+--------+-----------------------------------------+
+| | +-----+----------------------------------------+ |
+| | | Bit | Definition | |
+| | +=====+========================================+ |
+| | | 2 | VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP | |
+| | +-----+----------------------------------------+ |
++--------------+--------+-----------------------------------------+
+| bitmap range | 8 | 40 |
++--------------+--------+-----------------------------------------+
+| bitmap | 48 | variable |
++--------------+--------+-----------------------------------------+
+
+* *argsz* is the size required for the full reply payload (dirty pages structure
+ + bitmap range structure + actual bitmap)
+* *flags* is ``VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP``
+* *bitmap range* is the same bitmap range struct provided in the request, as
+ defined in `VFIO Bitmap Range Format`_.
+* *bitmap* is the actual dirty pages bitmap corresponding to the range request
VFIO Device Migration Info
--------------------------
diff --git a/include/vfio-user.h b/include/vfio-user.h
index ef3159a..71112ef 100644
--- a/include/vfio-user.h
+++ b/include/vfio-user.h
@@ -155,7 +155,18 @@ struct vfio_user_irq_info {
uint32_t subindex;
} __attribute__((packed));
-/* based on struct vfio_iommu_type1_dirty_bitmap_get */
+/* Analogous to vfio_iommu_type1_dirty_bitmap. */
+struct vfio_user_dirty_pages {
+ uint32_t argsz;
+#ifndef VFIO_IOMMU_DIRTY_PAGES_FLAG_START
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START (1 << 0)
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP (1 << 1)
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP (1 << 2)
+#endif
+ uint32_t flags;
+} __attribute__((packed));
+
+/* Analogous to struct vfio_iommu_type1_dirty_bitmap_get. */
struct vfio_user_bitmap_range {
uint64_t iova;
uint64_t size;
@@ -200,15 +211,6 @@ struct vfio_device_migration_info {
};
#endif /* not a RHEL kernel */
-struct vfio_iommu_type1_dirty_bitmap {
- __u32 argsz;
- __u32 flags;
-#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START (1 << 0)
-#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP (1 << 1)
-#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP (1 << 2)
- __u8 data[];
-};
-
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0) */
#ifdef __cplusplus
diff --git a/lib/common.h b/lib/common.h
index 970e5d4..a56a0f0 100644
--- a/lib/common.h
+++ b/lib/common.h
@@ -37,6 +37,7 @@
#ifndef LIB_VFIO_USER_COMMON_H
#define LIB_VFIO_USER_COMMON_H
+#include <limits.h>
#include <stdint.h>
#define UNUSED __attribute__((unused))
@@ -56,6 +57,17 @@
#define ROUND_DOWN(x, a) ((x) & ~((a)-1))
#define ROUND_UP(x,a) ROUND_DOWN((x)+(a)-1, a)
+/*
+ * The size, in bytes, of the bitmap that represents the given range with the
+ * given page size.
+ */
+static inline size_t
+_get_bitmap_size(size_t size, size_t pgsize)
+{
+ size_t nr_pages = (size / pgsize) + (size % pgsize != 0);
+ return ROUND_UP(nr_pages, sizeof(uint64_t) * CHAR_BIT) / CHAR_BIT;
+}
+
#ifdef UNIT_TEST
#define MOCK_DEFINE(f) \
diff --git a/lib/dma.c b/lib/dma.c
index 125c4c8..32014f0 100644
--- a/lib/dma.c
+++ b/lib/dma.c
@@ -265,8 +265,8 @@ get_bitmap_size(size_t region_size, size_t pgsize)
if (region_size < pgsize) {
return ERROR_INT(EINVAL);
}
- size_t nr_pages = (region_size / pgsize) + (region_size % pgsize != 0);
- return ROUND_UP(nr_pages, sizeof(uint64_t) * CHAR_BIT) / CHAR_BIT;
+
+ return _get_bitmap_size(region_size, pgsize);
}
static int
@@ -544,8 +544,8 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
}
/*
- * FIXME they must be equal because this is how much data the client
- * expects to receive.
+ * They must be equal because this is how much data the client expects to
+ * receive.
*/
if (size != (size_t)bitmap_size) {
vfu_log(dma->vfu_ctx, LOG_ERR, "bad bitmap size %ld != %ld", size,
diff --git a/lib/libvfio-user.c b/lib/libvfio-user.c
index 3963312..c569714 100644
--- a/lib/libvfio-user.c
+++ b/lib/libvfio-user.c
@@ -642,73 +642,97 @@ handle_device_reset(vfu_ctx_t *vfu_ctx, vfu_reset_type_t reason)
return do_device_reset(vfu_ctx, reason);
}
-int
-MOCK_DEFINE(handle_dirty_pages_get)(vfu_ctx_t *vfu_ctx,
- struct iovec **iovecs, size_t *nr_iovecs,
- struct vfio_user_bitmap_range *ranges,
- uint32_t size)
+static int
+handle_dirty_pages_get(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
{
- int ret = EINVAL;
- size_t i;
-
- assert(vfu_ctx != NULL);
- assert(iovecs != NULL);
- assert(nr_iovecs != NULL);
- assert(ranges != NULL);
+ struct vfio_user_dirty_pages *dirty_pages_in;
+ struct vfio_user_dirty_pages *dirty_pages_out;
+ struct vfio_user_bitmap_range *range_in;
+ struct vfio_user_bitmap_range *range_out;
+ char *bitmap;
+ size_t argsz;
+ int ret;
- if (size % sizeof(struct vfio_user_bitmap_range) != 0) {
+ if (msg->in_size < sizeof(*dirty_pages_in) + sizeof(*range_in)) {
+ vfu_log(vfu_ctx, LOG_ERR, "invalid message size %zu", msg->in_size);
return ERROR_INT(EINVAL);
}
- *nr_iovecs = size / sizeof(struct vfio_user_bitmap_range);
- *iovecs = malloc(*nr_iovecs * sizeof(struct iovec));
- if (*iovecs == NULL) {
+
+ dirty_pages_in = msg->in_data;
+ range_in = msg->in_data + sizeof(*dirty_pages_in);
+
+ ret = dma_controller_dirty_page_get(vfu_ctx->dma,
+ (vfu_dma_addr_t)range_in->iova,
+ range_in->size, range_in->bitmap.pgsize,
+ range_in->bitmap.size, &bitmap);
+ if (ret != 0) {
+ vfu_log(vfu_ctx, LOG_WARNING,
+ "failed to get dirty bitmap from DMA controller: %m");
return -1;
}
- for (i = 0; i < *nr_iovecs; i++) {
- struct vfio_user_bitmap_range *r = &ranges[i];
- ret = dma_controller_dirty_page_get(vfu_ctx->dma,
- (vfu_dma_addr_t)r->iova, r->size,
- r->bitmap.pgsize, r->bitmap.size,
- (char **)&((*iovecs)[i].iov_base));
- if (ret != 0) {
- ret = errno;
- vfu_log(vfu_ctx, LOG_WARNING,
- "failed to get dirty bitmap from DMA controller: %m");
- goto out;
- }
- (*iovecs)[i].iov_len = r->bitmap.size;
+ /*
+ * FIXME: this is unbounded until we can limit the maximum DMA region size.
+ */
+ argsz = sizeof(*dirty_pages_out) + sizeof(*range_out) +
+ range_in->bitmap.size;
+
+ /*
+ * If the reply doesn't fit, reply with just the dirty pages header, giving
+ * the needed argsz. Typically this shouldn't happen, as the client knows
+ * the needed reply size and has already provided the correct bitmap size.
+ */
+ if (dirty_pages_in->argsz >= argsz) {
+ msg->out_size = argsz;
+ } else {
+ msg->out_size = sizeof(*dirty_pages_out);
}
-out:
- if (ret != 0) {
- if (*iovecs != NULL) {
- free(*iovecs);
- *iovecs = NULL;
- }
- return ERROR_INT(ret);
+ msg->out_data = malloc(msg->out_size);
+
+ if (msg->out_data == NULL) {
+ return -1;
+ }
+
+ dirty_pages_out = msg->out_data;
+ memcpy(dirty_pages_out, dirty_pages_in, sizeof (*dirty_pages_out));
+ dirty_pages_out->argsz = argsz;
+
+ if (dirty_pages_in->argsz >= argsz) {
+ char *bitmap_out;
+
+ range_out = msg->out_data + sizeof(*dirty_pages_out);
+ memcpy(range_out, range_in, sizeof (*range_out));
+
+ bitmap_out = msg->out_data + sizeof(*dirty_pages_out)
+ + sizeof(*range_out);
+ memcpy(bitmap_out, bitmap, range_in->bitmap.size);
}
return 0;
}
-int
-MOCK_DEFINE(handle_dirty_pages)(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
+static int
+handle_dirty_pages(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
{
- struct vfio_iommu_type1_dirty_bitmap *dirty_bitmap = msg->in_data;
+ struct vfio_user_dirty_pages *dirty_pages = msg->in_data;
int ret;
assert(vfu_ctx != NULL);
assert(msg != NULL);
- // FIXME: doesn't match other in_size/argsz checks
- if (msg->in_size < sizeof(*dirty_bitmap) ||
- msg->in_size != dirty_bitmap->argsz) {
- vfu_log(vfu_ctx, LOG_ERR, "invalid header size %zu", msg->in_size);
+ if (msg->in_size < sizeof(*dirty_pages) ||
+ dirty_pages->argsz < sizeof(*dirty_pages)) {
+ vfu_log(vfu_ctx, LOG_ERR, "invalid message size %zu", msg->in_size);
return ERROR_INT(EINVAL);
}
- switch (dirty_bitmap->flags) {
+ if (vfu_ctx->migration == NULL) {
+ vfu_log(vfu_ctx, LOG_ERR, "migration not configured");
+ return ERROR_INT(ENOTSUP);
+ }
+
+ switch (dirty_pages->flags) {
case VFIO_IOMMU_DIRTY_PAGES_FLAG_START:
ret = dma_controller_dirty_page_logging_start(vfu_ctx->dma,
migration_get_pgsize(vfu_ctx->migration));
@@ -719,18 +743,12 @@ MOCK_DEFINE(handle_dirty_pages)(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
ret = 0;
break;
- case VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP: {
- struct vfio_user_bitmap_range *get;
- get = (void *)(dirty_bitmap + 1);
-
- ret = handle_dirty_pages_get(vfu_ctx, &msg->out_iovecs,
- &msg->nr_out_iovecs, get,
- msg->in_size - sizeof(*dirty_bitmap));
+ case VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP:
+ ret = handle_dirty_pages_get(vfu_ctx, msg);
break;
- }
default:
- vfu_log(vfu_ctx, LOG_ERR, "bad flags %#x", dirty_bitmap->flags);
+ vfu_log(vfu_ctx, LOG_ERR, "bad flags %#x", dirty_pages->flags);
ret = ERROR_INT(EINVAL);
break;
}
diff --git a/lib/private.h b/lib/private.h
index 4d9e562..c9a8af7 100644
--- a/lib/private.h
+++ b/lib/private.h
@@ -193,18 +193,12 @@ handle_device_get_region_info(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg);
int
handle_device_reset(vfu_ctx_t *vfu_ctx, vfu_reset_type_t reason);
-MOCK_DECLARE(int, handle_dirty_pages, vfu_ctx_t *vfu_ctx, vfu_msg_t *msg);
-
MOCK_DECLARE(bool, cmd_allowed_when_stopped_and_copying, uint16_t cmd);
MOCK_DECLARE(bool, should_exec_command, vfu_ctx_t *vfu_ctx, uint16_t cmd);
MOCK_DECLARE(int, process_request, vfu_ctx_t *vfu_ctx);
-MOCK_DECLARE(int, handle_dirty_pages_get, vfu_ctx_t *vfu_ctx,
- struct iovec **iovecs, size_t *nr_iovecs,
- struct vfio_user_bitmap_range *ranges, uint32_t size);
-
#endif /* LIB_VFIO_USER_PRIVATE_H */
/* ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab: */
diff --git a/samples/client.c b/samples/client.c
index 82cb12b..71bc4ac 100644
--- a/samples/client.c
+++ b/samples/client.c
@@ -640,53 +640,43 @@ handle_dma_io(int sock, struct vfio_user_dma_map *dma_regions,
}
static void
-get_dirty_bitmaps(int sock, struct vfio_user_dma_map *dma_regions,
- UNUSED int nr_dma_regions)
+get_dirty_bitmap(int sock, struct vfio_user_dma_map *dma_region)
{
- struct vfio_iommu_type1_dirty_bitmap dirty_bitmap = { 0 };
- struct vfio_user_bitmap_range bitmaps[2] = { { 0 }, };
+ uint64_t bitmap_size = _get_bitmap_size(dma_region->size,
+ sysconf(_SC_PAGESIZE));
+ struct vfio_user_dirty_pages *dirty_pages;
+ struct vfio_user_bitmap_range *range;
+ char *bitmap;
+ size_t size;
+ void *data;
int ret;
- size_t i;
- struct iovec iovecs[4] = {
- [1] = {
- .iov_base = &dirty_bitmap,
- .iov_len = sizeof(dirty_bitmap)
- }
- };
- struct vfio_user_header hdr = {0};
- uint64_t data[ARRAY_SIZE(bitmaps)];
- assert(dma_regions != NULL);
- //FIXME: Is below assert correct?
- //assert(nr_dma_regions >= (int)ARRAY_SIZE(bitmaps));
+ size = sizeof(*dirty_pages) + sizeof(*range) + bitmap_size;
- for (i = 0; i < ARRAY_SIZE(bitmaps); i++) {
- bitmaps[i].iova = dma_regions[i].addr;
- bitmaps[i].size = dma_regions[i].size;
- bitmaps[i].bitmap.size = sizeof(uint64_t); /* FIXME calculate based on page and IOVA size, don't hardcode */
- bitmaps[i].bitmap.pgsize = sysconf(_SC_PAGESIZE);
- iovecs[(i + 2)].iov_base = &bitmaps[i]; /* FIXME the +2 is because iovecs[0] is the vfio_user_header and iovecs[1] is vfio_iommu_type1_dirty_bitmap */
- iovecs[(i + 2)].iov_len = sizeof(struct vfio_user_bitmap_range);
- }
+ data = calloc(1, size);
+ assert(data != NULL);
- /*
- * FIXME there should be at least two IOVAs. Send single message for two
- * IOVAs and ensure only one bit is set in first IOVA.
- */
- dirty_bitmap.argsz = sizeof(dirty_bitmap) + ARRAY_SIZE(bitmaps) * sizeof(struct vfio_user_bitmap_range);
- dirty_bitmap.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
- ret = tran_sock_msg_iovec(sock, 0, VFIO_USER_DIRTY_PAGES,
- iovecs, ARRAY_SIZE(iovecs),
- NULL, 0,
- &hdr, data, ARRAY_SIZE(data) * sizeof(uint64_t), NULL, 0);
+ dirty_pages = data;
+ dirty_pages->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
+ dirty_pages->argsz = sizeof(*dirty_pages) + sizeof(*range) + bitmap_size;
+
+ range = data + sizeof(*dirty_pages);
+ range->iova = dma_region->addr;
+ range->size = dma_region->size;
+ range->bitmap.size = bitmap_size;
+ range->bitmap.pgsize = sysconf(_SC_PAGESIZE);
+
+ bitmap = data + sizeof(*dirty_pages) + sizeof(*range);
+
+ ret = tran_sock_msg(sock, 0x99, VFIO_USER_DIRTY_PAGES,
+ data, sizeof(*dirty_pages) + sizeof(*range),
+ NULL, data, size);
if (ret != 0) {
- err(EXIT_FAILURE, "failed to start dirty page logging");
+ err(EXIT_FAILURE, "failed to get dirty page bitmap");
}
- for (i = 0; i < ARRAY_SIZE(bitmaps); i++) {
- printf("client: %s: %#lx-%#lx\t%#lx\n", __func__, bitmaps[i].iova,
- bitmaps[i].iova + bitmaps[i].size - 1, data[i]);
- }
+ printf("client: %s: %#lx-%#lx\t%#x\n", __func__, range->iova,
+ range->iova + range->size - 1, bitmap[0]);
}
enum migration {
@@ -1073,7 +1063,7 @@ int main(int argc, char *argv[])
int server_max_fds;
size_t pgsize;
int nr_dma_regions;
- struct vfio_iommu_type1_dirty_bitmap dirty_bitmap = {0};
+ struct vfio_user_dirty_pages dirty_pages = {0};
int opt;
time_t t;
char *path_to_server = NULL;
@@ -1170,10 +1160,10 @@ int main(int argc, char *argv[])
*/
irq_fd = configure_irqs(sock);
- dirty_bitmap.argsz = sizeof(dirty_bitmap);
- dirty_bitmap.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START;
+ dirty_pages.argsz = sizeof(dirty_pages);
+ dirty_pages.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START;
ret = tran_sock_msg(sock, 0, VFIO_USER_DIRTY_PAGES,
- &dirty_bitmap, sizeof(dirty_bitmap),
+ &dirty_pages, sizeof(dirty_pages),
NULL, NULL, 0);
if (ret != 0) {
err(EXIT_FAILURE, "failed to start dirty page logging");
@@ -1194,12 +1184,14 @@ int main(int argc, char *argv[])
handle_dma_io(sock, dma_regions, nr_dma_regions, dma_region_fds);
- get_dirty_bitmaps(sock, dma_regions, nr_dma_regions);
+ for (i = 0; i < nr_dma_regions; i++) {
+ get_dirty_bitmap(sock, &dma_regions[i]);
+ }
- dirty_bitmap.argsz = sizeof(dirty_bitmap);
- dirty_bitmap.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP;
+ dirty_pages.argsz = sizeof(dirty_pages);
+ dirty_pages.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP;
ret = tran_sock_msg(sock, 0, VFIO_USER_DIRTY_PAGES,
- &dirty_bitmap, sizeof(dirty_bitmap),
+ &dirty_pages, sizeof(dirty_pages),
NULL, NULL, 0);
if (ret != 0) {
err(EXIT_FAILURE, "failed to stop dirty page logging");
diff --git a/test/mocks.c b/test/mocks.c
index 4f202d5..6e34d4a 100644
--- a/test/mocks.c
+++ b/test/mocks.c
@@ -62,11 +62,9 @@ static struct function funcs[] = {
{ .name = "dma_controller_add_region" },
{ .name = "dma_controller_remove_region" },
{ .name = "dma_controller_unmap_region" },
- { .name = "handle_dirty_pages" },
{ .name = "process_request" },
{ .name = "should_exec_command" },
{ .name = "migration_region_access_registers" },
- { .name = "handle_dirty_pages_get" },
{ .name = "handle_device_state" },
{. name = "vfio_migr_state_transition_is_valid" },
{ .name = "state_trans_notify" },
@@ -214,18 +212,6 @@ should_exec_command(vfu_ctx_t *vfu_ctx, uint16_t cmd)
return mock();
}
-int
-handle_dirty_pages(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
-{
- if (!is_patched("handle_dirty_pages")) {
- return __real_handle_dirty_pages(vfu_ctx, msg);
- }
- check_expected(vfu_ctx);
- check_expected(msg);
- errno = mock();
- return mock();
-}
-
ssize_t
migration_region_access_registers(vfu_ctx_t *vfu_ctx, char *buf, size_t count,
loff_t pos, bool is_write)
@@ -243,23 +229,6 @@ migration_region_access_registers(vfu_ctx_t *vfu_ctx, char *buf, size_t count,
return mock();
}
-int
-handle_dirty_pages_get(vfu_ctx_t *vfu_ctx,
- struct iovec **iovecs, size_t *nr_iovecs,
- struct vfio_user_bitmap_range *ranges, uint32_t size)
-{
- if (!is_patched("handle_dirty_pages_get")) {
- return __real_handle_dirty_pages_get(vfu_ctx, iovecs, nr_iovecs,
- ranges, size);
- }
- check_expected(vfu_ctx);
- check_expected(iovecs);
- check_expected(nr_iovecs);
- check_expected(ranges);
- check_expected(size);
- return mock();
-}
-
ssize_t
handle_device_state(vfu_ctx_t *vfu_ctx, struct migration *migr,
uint32_t device_state, bool notify) {
diff --git a/test/py/libvfio_user.py b/test/py/libvfio_user.py
index fb201dd..a664da0 100644
--- a/test/py/libvfio_user.py
+++ b/test/py/libvfio_user.py
@@ -35,6 +35,7 @@ from collections import namedtuple
from types import SimpleNamespace
import ctypes as c
import json
+import mmap
import os
import pathlib
import socket
@@ -146,6 +147,16 @@ VFU_REGION_FLAG_WRITE = 2
VFU_REGION_FLAG_RW = (VFU_REGION_FLAG_READ | VFU_REGION_FLAG_WRITE)
VFU_REGION_FLAG_MEM = 4
+VFIO_USER_F_DMA_REGION_READ = (1 << 0)
+VFIO_USER_F_DMA_REGION_WRITE = (1 << 1)
+VFIO_USER_F_DMA_REGION_MAPPABLE = (1 << 2)
+
+VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP = (1 << 0)
+
+VFIO_IOMMU_DIRTY_PAGES_FLAG_START = (1 << 0)
+VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP = (1 << 1)
+VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP = (1 << 2)
+
# enum vfu_dev_irq_type
VFU_DEV_INTX_IRQ = 0
VFU_DEV_MSI_IRQ = 1
@@ -169,6 +180,8 @@ VFU_CAP_FLAG_EXTENDED = (1 << 0)
VFU_CAP_FLAG_CALLBACK = (1 << 1)
VFU_CAP_FLAG_READONLY = (1 << 2)
+VFU_MIGR_CALLBACKS_VERS = 1
+
SOCK_PATH = b"/tmp/vfio-user.sock.%d" % os.getpid()
topdir = os.path.realpath(os.path.dirname(__file__) + "/../..")
@@ -195,8 +208,8 @@ class Structure(c.Structure):
class vfu_bar_t(c.Union):
_pack_ = 1
_fields_ = [
- ("mem", c.c_int),
- ("io", c.c_int)
+ ("mem", c.c_int32),
+ ("io", c.c_int32)
]
class vfu_pci_hdr_intr_t(Structure):
@@ -209,9 +222,9 @@ class vfu_pci_hdr_intr_t(Structure):
class vfu_pci_hdr_t(Structure):
_pack_ = 1
_fields_ = [
- ("id", c.c_int),
- ("cmd", c.c_short),
- ("sts", c.c_short),
+ ("id", c.c_int32),
+ ("cmd", c.c_uint16),
+ ("sts", c.c_uint16),
("rid", c.c_byte),
("cc_pi", c.c_byte),
("cc_scc", c.c_byte),
@@ -221,9 +234,9 @@ class vfu_pci_hdr_t(Structure):
("htype", c.c_byte),
("bist", c.c_byte),
("bars", vfu_bar_t * PCI_BARS_NR),
- ("ccptr", c.c_int),
- ("ss", c.c_int),
- ("erom", c.c_int),
+ ("ccptr", c.c_int32),
+ ("ss", c.c_int32),
+ ("erom", c.c_int32),
("cap", c.c_byte),
("res1", c.c_byte * 7),
("intr", vfu_pci_hdr_intr_t),
@@ -234,66 +247,143 @@ class vfu_pci_hdr_t(Structure):
class iovec_t(Structure):
_fields_ = [
("iov_base", c.c_void_p),
- ("iov_len", c.c_int)
+ ("iov_len", c.c_int32)
]
class vfio_irq_info(Structure):
+ _pack_ = 1
_fields_ = [
- ("argsz", c.c_uint),
- ("flags", c.c_uint),
- ("index", c.c_uint),
- ("count", c.c_uint),
+ ("argsz", c.c_uint32),
+ ("flags", c.c_uint32),
+ ("index", c.c_uint32),
+ ("count", c.c_uint32),
]
class vfio_irq_set(Structure):
+ _pack_ = 1
_fields_ = [
- ("argsz", c.c_uint),
- ("flags", c.c_uint),
- ("index", c.c_uint),
- ("start", c.c_uint),
- ("count", c.c_uint),
+ ("argsz", c.c_uint32),
+ ("flags", c.c_uint32),
+ ("index", c.c_uint32),
+ ("start", c.c_uint32),
+ ("count", c.c_uint32),
]
class vfio_user_device_info(Structure):
+ _pack_ = 1
_fields_ = [
- ("argsz", c.c_uint),
- ("flags", c.c_uint),
- ("num_regions", c.c_uint),
- ("num_irqs", c.c_uint),
+ ("argsz", c.c_uint32),
+ ("flags", c.c_uint32),
+ ("num_regions", c.c_uint32),
+ ("num_irqs", c.c_uint32),
]
class vfio_region_info(Structure):
+ _pack_ = 1
_fields_ = [
- ("argsz", c.c_uint),
- ("flags", c.c_uint),
- ("index", c.c_uint),
- ("cap_offset", c.c_uint),
- ("size", c.c_ulong),
- ("offset", c.c_ulong),
+ ("argsz", c.c_uint32),
+ ("flags", c.c_uint32),
+ ("index", c.c_uint32),
+ ("cap_offset", c.c_uint32),
+ ("size", c.c_uint64),
+ ("offset", c.c_uint64),
]
class vfio_region_info_cap_type(Structure):
+ _pack_ = 1
_fields_ = [
- ("id", c.c_ushort),
- ("version", c.c_ushort),
- ("next", c.c_uint),
- ("type", c.c_uint),
- ("subtype", c.c_uint),
+ ("id", c.c_uint16),
+ ("version", c.c_uint16),
+ ("next", c.c_uint32),
+ ("type", c.c_uint32),
+ ("subtype", c.c_uint32),
]
class vfio_region_info_cap_sparse_mmap(Structure):
+ _pack_ = 1
_fields_ = [
- ("id", c.c_ushort),
- ("version", c.c_ushort),
- ("next", c.c_uint),
- ("nr_areas", c.c_uint),
- ("reserved", c.c_uint),
+ ("id", c.c_uint16),
+ ("version", c.c_uint16),
+ ("next", c.c_uint32),
+ ("nr_areas", c.c_uint32),
+ ("reserved", c.c_uint32),
]
class vfio_region_sparse_mmap_area(Structure):
+ _pack_ = 1
+ _fields_ = [
+ ("offset", c.c_uint64),
+ ("size", c.c_uint64),
+ ]
+
+class vfio_user_dma_map(Structure):
+ _pack_ = 1
+ _fields_ = [
+ ("argsz", c.c_uint32),
+ ("flags", c.c_uint32),
+ ("offset", c.c_uint64),
+ ("addr", c.c_uint64),
+ ("size", c.c_uint64),
+ ]
+
+class vfu_dma_info_t(Structure):
+ _fields_ = [
+ ("iova", iovec_t),
+ ("vaddr", c.c_void_p),
+ ("mapping", iovec_t),
+ ("page_size", c.c_size_t),
+ ("prot", c.c_uint32)
+ ]
+
+class vfio_user_dirty_pages(Structure):
+ _pack_ = 1
+ _fields_ = [
+ ("argsz", c.c_uint32),
+ ("flags", c.c_uint32)
+ ]
+
+class vfio_user_bitmap(Structure):
+ _pack_ = 1
_fields_ = [
- ("offset", c.c_ulong),
- ("size", c.c_ulong),
+ ("pgsize", c.c_uint64),
+ ("size", c.c_uint64)
+ ]
+
+class vfio_user_bitmap_range(Structure):
+ _pack_ = 1
+ _fields_ = [
+ ("iova", c.c_uint64),
+ ("size", c.c_uint64),
+ ("bitmap", vfio_user_bitmap)
+ ]
+
+transition_cb_t = c.CFUNCTYPE(c.c_int, c.c_void_p, c.c_int)
+get_pending_bytes_cb_t = c.CFUNCTYPE(c.c_uint64, c.c_void_p)
+prepare_data_cb_t = c.CFUNCTYPE(c.c_void_p, c.POINTER(c.c_uint64),
+ c.POINTER(c.c_uint64))
+read_data_cb_t = c.CFUNCTYPE(c.c_ssize_t, c.c_void_p, c.c_void_p,
+ c.c_uint64, c.c_uint64)
+write_data_cb_t = c.CFUNCTYPE(c.c_ssize_t, c.c_void_p, c.c_uint64)
+data_written_cb_t = c.CFUNCTYPE(c.c_int, c.c_void_p, c.c_uint64)
+
+class vfu_migration_callbacks_t(Structure):
+ _fields_ = [
+ ("version", c.c_int),
+ ("transition", transition_cb_t),
+ ("get_pending_bytes", get_pending_bytes_cb_t),
+ ("prepare_data", prepare_data_cb_t),
+ ("read_data", read_data_cb_t),
+ ("write_data", write_data_cb_t),
+ ("data_written", data_written_cb_t),
+ ]
+
+class dma_sg_t(Structure):
+ _fields_ = [
+ ("dma_addr", c.c_void_p),
+ ("region", c.c_int),
+ ("length", c.c_uint64),
+ ("offset", c.c_uint64),
+ ("mappable", c.c_bool)
]
#
@@ -327,7 +417,15 @@ lib.vfu_pci_find_next_capability.argtypes = (c.c_void_p, c.c_bool, c.c_ulong,
c.c_int)
lib.vfu_pci_find_next_capability.restype = (c.c_ulong)
lib.vfu_irq_trigger.argtypes = (c.c_void_p, c.c_uint)
-
+vfu_dma_register_cb_t = c.CFUNCTYPE(None, c.c_void_p, c.POINTER(vfu_dma_info_t))
+vfu_dma_unregister_cb_t = c.CFUNCTYPE(c.c_int, c.c_void_p,
+ c.POINTER(vfu_dma_info_t))
+lib.vfu_setup_device_dma.argtypes = (c.c_void_p, vfu_dma_register_cb_t,
+ vfu_dma_unregister_cb_t)
+lib.vfu_setup_device_migration_callbacks.argtypes = (c.c_void_p,
+ c.POINTER(vfu_migration_callbacks_t), c.c_uint64)
+lib.vfu_addr_to_sg.argtypes = (c.c_void_p, c.c_void_p, c.c_size_t,
+ c.POINTER(dma_sg_t), c.c_int, c.c_int)
def to_byte(val):
"""Cast an int to a byte value."""
@@ -553,3 +651,38 @@ def vfu_irq_trigger(ctx, subindex):
assert ctx != None
return lib.vfu_irq_trigger(ctx, subindex)
+
+def vfu_setup_device_dma(ctx, register_cb=None, unregister_cb=None):
+ assert ctx != None
+
+ return lib.vfu_setup_device_dma(ctx, c.cast(register_cb,
+ vfu_dma_register_cb_t),
+ c.cast(unregister_cb,
+ vfu_dma_unregister_cb_t))
+
+def vfu_setup_device_migration_callbacks(ctx, cbs=None, offset=0):
+ assert ctx != None
+
+ @c.CFUNCTYPE(c.c_int)
+ def stub():
+ return 0
+
+ if not cbs:
+ cbs = vfu_migration_callbacks_t()
+ cbs.version = VFU_MIGR_CALLBACKS_VERS
+ cbs.transition = c.cast(stub, transition_cb_t)
+ cbs.get_pending_bytes = c.cast(stub, get_pending_bytes_cb_t)
+ cbs.prepare_data = c.cast(stub, prepare_data_cb_t)
+ cbs.read_data = c.cast(stub, read_data_cb_t)
+ cbs.write_data = c.cast(stub, write_data_cb_t)
+ cbs.data_written = c.cast(stub, data_written_cb_t)
+
+ return lib.vfu_setup_device_migration_callbacks(ctx, cbs, offset)
+
+def vfu_addr_to_sg(ctx, dma_addr, length, max_sg=1,
+ prot=(mmap.PROT_READ | mmap.PROT_WRITE)):
+ assert ctx != None
+
+ sg = dma_sg_t()
+
+ return lib.vfu_addr_to_sg(ctx, dma_addr, length, sg, max_sg, prot)
diff --git a/test/py/test_dirty_pages.py b/test/py/test_dirty_pages.py
new file mode 100644
index 0000000..5d4f6db
--- /dev/null
+++ b/test/py/test_dirty_pages.py
@@ -0,0 +1,298 @@
+#
+# Copyright (c) 2021 Nutanix Inc. All rights reserved.
+#
+# Authors: John Levon <john.levon@nutanix.com>
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of Nutanix nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICESLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+#
+
+from libvfio_user import *
+import errno
+import mmap
+import tempfile
+
+ctx = None
+
+@vfu_dma_register_cb_t
+def dma_register(ctx, info):
+ pass
+
+@vfu_dma_unregister_cb_t
+def dma_unregister(ctx, info):
+ pass
+ return 0
+
+def test_dirty_pages_setup():
+ global ctx, sock
+
+ ctx = vfu_create_ctx(flags=LIBVFIO_USER_FLAG_ATTACH_NB)
+ assert ctx != None
+
+ ret = vfu_pci_init(ctx)
+ assert ret == 0
+
+ ret = vfu_setup_device_dma(ctx, dma_register, dma_unregister)
+ assert ret == 0
+
+ f = tempfile.TemporaryFile()
+ f.truncate(0x2000)
+
+ mmap_areas = [ (0x1000, 0x1000) ]
+
+ ret = vfu_setup_region(ctx, index=VFU_PCI_DEV_MIGR_REGION_IDX, size=0x2000,
+ flags=VFU_REGION_FLAG_RW, mmap_areas=mmap_areas,
+ fd=f.fileno())
+ assert ret == 0
+
+ ret = vfu_realize_ctx(ctx)
+ assert ret == 0
+
+ sock = connect_client(ctx)
+
+ f = tempfile.TemporaryFile()
+ f.truncate(0x10000)
+
+ payload = vfio_user_dma_map(argsz=len(vfio_user_dma_map()),
+ flags=(VFIO_USER_F_DMA_REGION_READ |
+ VFIO_USER_F_DMA_REGION_WRITE |
+ VFIO_USER_F_DMA_REGION_MAPPABLE),
+ offset=0, addr=0x10000, size=0x10000)
+
+ hdr = vfio_user_header(VFIO_USER_DMA_MAP, size=len(payload))
+
+ sock.sendmsg([hdr + payload], [(socket.SOL_SOCKET, socket.SCM_RIGHTS,
+ struct.pack("I", f.fileno()))])
+ vfu_run_ctx(ctx)
+ get_reply(sock)
+
+def test_dirty_pages_short_write():
+ payload = struct.pack("I", 8)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES, size=len(payload))
+ sock.send(hdr + payload)
+ vfu_run_ctx(ctx)
+ get_reply(sock, expect=errno.EINVAL)
+
+def test_dirty_pages_bad_argsz():
+ payload = vfio_user_dirty_pages(argsz=4,
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_START)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES, size=len(payload))
+ sock.send(hdr + payload)
+ vfu_run_ctx(ctx)
+ get_reply(sock, expect=errno.EINVAL)
+
+def test_dirty_pages_start_no_migration():
+ payload = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_START)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES, size=len(payload))
+ sock.send(hdr + payload)
+ vfu_run_ctx(ctx)
+ get_reply(sock, expect=errno.ENOTSUP)
+
+def test_dirty_pages_start_bad_flags():
+ #
+ # This is a little cheeky, after vfu_realize_ctx(), but it works at the
+ # moment.
+ #
+ vfu_setup_device_migration_callbacks(ctx, offset=0x1000)
+
+ payload = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ flags=(VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
+ VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP))
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES, size=len(payload))
+ sock.send(hdr + payload)
+ vfu_run_ctx(ctx)
+ get_reply(sock, expect=errno.EINVAL)
+
+ payload = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ flags=(VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
+ VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP))
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES, size=len(payload))
+ sock.send(hdr + payload)
+ vfu_run_ctx(ctx)
+ get_reply(sock, expect=errno.EINVAL)
+
+def test_dirty_pages_start():
+ payload = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_START)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES, size=len(payload))
+ sock.send(hdr + payload)
+ vfu_run_ctx(ctx)
+ get_reply(sock)
+
+ # should be idempotent
+ sock.send(hdr + payload)
+ vfu_run_ctx(ctx)
+ get_reply(sock)
+
+def test_dirty_pages_get_short_read():
+ payload = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES, size=len(payload))
+ sock.send(hdr + payload)
+ vfu_run_ctx(ctx)
+ get_reply(sock, expect=errno.EINVAL)
+
+#
+# This should in fact work; update when it does.
+#
+def test_dirty_pages_get_sub_range():
+ dirty_pages = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP)
+ bitmap = vfio_user_bitmap(pgsize=0x1000, size=1)
+ br = vfio_user_bitmap_range(iova=0x11000, size=0x1000, bitmap=bitmap)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES,
+ size=len(dirty_pages) + len(br))
+ sock.send(hdr + dirty_pages + br)
+ vfu_run_ctx(ctx)
+ get_reply(sock, expect=errno.ENOTSUP)
+
+def test_dirty_pages_get_bad_page_size():
+ dirty_pages = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP)
+ bitmap = vfio_user_bitmap(pgsize=0x2000, size=8)
+ br = vfio_user_bitmap_range(iova=0x10000, size=0x10000, bitmap=bitmap)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES,
+ size=len(dirty_pages) + len(br))
+ sock.send(hdr + dirty_pages + br)
+ vfu_run_ctx(ctx)
+ get_reply(sock, expect=errno.EINVAL)
+
+def test_dirty_pages_get_bad_bitmap_size():
+ dirty_pages = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP)
+ bitmap = vfio_user_bitmap(pgsize=0x1000, size=1)
+ br = vfio_user_bitmap_range(iova=0x10000, size=0x10000, bitmap=bitmap)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES,
+ size=len(dirty_pages) + len(br))
+ sock.send(hdr + dirty_pages + br)
+ vfu_run_ctx(ctx)
+ get_reply(sock, expect=errno.EINVAL)
+
+def test_dirty_pages_get_short_reply():
+ dirty_pages = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP)
+ bitmap = vfio_user_bitmap(pgsize=0x1000, size=8)
+ br = vfio_user_bitmap_range(iova=0x10000, size=0x10000, bitmap=bitmap)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES,
+ size=len(dirty_pages) + len(br))
+ sock.send(hdr + dirty_pages + br)
+ vfu_run_ctx(ctx)
+ result = get_reply(sock)
+
+ assert len(result) == len(vfio_user_dirty_pages())
+
+ dirty_pages, _ = vfio_user_dirty_pages.pop_from_buffer(result)
+
+ argsz = len(vfio_user_dirty_pages()) + len(vfio_user_bitmap_range()) + 8
+
+ assert dirty_pages.argsz == argsz
+ assert dirty_pages.flags == VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP
+
+def test_dirty_pages_get_unmodified():
+ argsz = len(vfio_user_dirty_pages()) + len(vfio_user_bitmap_range()) + 8
+
+ dirty_pages = vfio_user_dirty_pages(argsz=argsz,
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP)
+ bitmap = vfio_user_bitmap(pgsize=0x1000, size=8)
+ br = vfio_user_bitmap_range(iova=0x10000, size=0x10000, bitmap=bitmap)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES,
+ size=len(dirty_pages) + len(br))
+ sock.send(hdr + dirty_pages + br)
+ vfu_run_ctx(ctx)
+ result = get_reply(sock)
+
+ assert len(result) == argsz
+
+ dirty_pages, result = vfio_user_dirty_pages.pop_from_buffer(result)
+
+ assert dirty_pages.argsz == argsz
+ assert dirty_pages.flags == VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP
+
+ br, result = vfio_user_bitmap_range.pop_from_buffer(result)
+
+ assert br.iova == 0x10000
+ assert br.size == 0x10000
+
+ assert br.bitmap.pgsize == 0x1000
+ assert br.bitmap.size == 8
+
+def test_dirty_pages_get_modified():
+ # sufficient to mark the region dirty
+ ret = vfu_addr_to_sg(ctx, dma_addr=0x10000, length=0x1000)
+ assert ret == 1
+
+ ret = vfu_addr_to_sg(ctx, dma_addr=0x14000, length=0x4000)
+ assert ret == 1
+
+ argsz = len(vfio_user_dirty_pages()) + len(vfio_user_bitmap_range()) + 8
+
+ dirty_pages = vfio_user_dirty_pages(argsz=argsz,
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP)
+ bitmap = vfio_user_bitmap(pgsize=0x1000, size=8)
+ br = vfio_user_bitmap_range(iova=0x10000, size=0x10000, bitmap=bitmap)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES,
+ size=len(dirty_pages) + len(br))
+ sock.send(hdr + dirty_pages + br)
+ vfu_run_ctx(ctx)
+ result = get_reply(sock)
+
+ dirty_pages, result = vfio_user_dirty_pages.pop_from_buffer(result)
+ br, result = vfio_user_bitmap_range.pop_from_buffer(result)
+ bitmap = struct.unpack("Q", result)[0]
+
+ assert bitmap == 0b11110001
+
+def test_dirty_pages_stop():
+ payload = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES, size=len(payload))
+ sock.send(hdr + payload)
+ vfu_run_ctx(ctx)
+ get_reply(sock)
+
+ payload = vfio_user_dirty_pages(argsz=len(vfio_user_dirty_pages()),
+ flags=VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP)
+
+ hdr = vfio_user_header(VFIO_USER_DIRTY_PAGES, size=len(payload))
+ sock.send(hdr + payload)
+ vfu_run_ctx(ctx)
+ get_reply(sock)
+
+def test_dirty_pages_cleanup():
+ disconnect_client(ctx, sock)
+ vfu_destroy_ctx(ctx)
diff --git a/test/py/test_pci_caps.py b/test/py/test_pci_caps.py
index 88a9b7c..be1914d 100644
--- a/test/py/test_pci_caps.py
+++ b/test/py/test_pci_caps.py
@@ -70,8 +70,7 @@ def test_pci_cap_bad_pos():
assert pos == -1
assert c.get_errno() == errno.EINVAL
-@c.CFUNCTYPE(c.c_int, c.c_void_p, c.POINTER(c.c_char),
- c.c_long, c.c_long, c.c_int)
+@vfu_region_access_cb_t
def pci_region_cb(ctx, buf, count, offset, is_write):
if not is_write:
return read_pci_cfg_space(ctx, buf, count, offset)
diff --git a/test/py/test_pci_ext_caps.py b/test/py/test_pci_ext_caps.py
index 70f8253..ab10e12 100644
--- a/test/py/test_pci_ext_caps.py
+++ b/test/py/test_pci_ext_caps.py
@@ -91,8 +91,7 @@ def test_pci_ext_cap_bad_pos():
assert pos == -1
assert c.get_errno() == errno.EINVAL
-@c.CFUNCTYPE(c.c_int, c.c_void_p, c.POINTER(c.c_char),
- c.c_long, c.c_long, c.c_int)
+@vfu_region_access_cb_t
def pci_region_cb(ctx, buf, count, offset, is_write):
if not is_write:
return read_pci_cfg_space(ctx, buf, count, offset, extended=True)
diff --git a/test/unit-tests.c b/test/unit-tests.c
index c083d05..d334244 100644
--- a/test/unit-tests.c
+++ b/test/unit-tests.c
@@ -448,33 +448,6 @@ typedef struct {
} tran_sock_t;
static void
-test_dirty_pages_without_dma(UNUSED void **state)
-{
- int ret;
-
- /* with DMA controller */
-
- patch("handle_dirty_pages");
-
- expect_value(handle_dirty_pages, vfu_ctx, &vfu_ctx);
- expect_any(handle_dirty_pages, msg);
- will_return(handle_dirty_pages, EREMOTEIO);
- will_return(handle_dirty_pages, -1);
-
- ret = exec_command(&vfu_ctx, mkmsg(VFIO_USER_DIRTY_PAGES, NULL, 0));
- assert_int_equal(-1, ret);
- assert_int_equal(EREMOTEIO, errno);
-
- /* without DMA controller */
-
- vfu_ctx.dma = NULL;
-
- ret = exec_command(&vfu_ctx, mkmsg(VFIO_USER_DIRTY_PAGES, NULL, 0));
- assert_int_equal(0, ret);
-
-}
-
-static void
test_migration_state_transitions(void **state UNUSED)
{
bool (*f)(uint32_t, uint32_t) = vfio_migr_state_transition_is_valid;
@@ -736,24 +709,6 @@ test_should_exec_command(UNUSED void **state)
assert_true(should_exec_command(&vfu_ctx, 0xbeef));
}
-static void
-test_dma_controller_dirty_page_get(void **state UNUSED)
-{
- dma_memory_region_t *r;
- uint64_t len = UINT32_MAX + (uint64_t)10;
- char bp[0x20008]; /* must be QWORD aligned */
-
- vfu_ctx.dma->nregions = 1;
- r = &vfu_ctx.dma->regions[0];
- r->info.iova.iov_base = (void *)0;
- r->info.iova.iov_len = len;
- r->info.vaddr = (void *)0xdeadbeef;
- vfu_ctx.dma->dirty_pgsize = 4096;
-
- assert_int_equal(0, dma_controller_dirty_page_get(vfu_ctx.dma, (void *)0,
- len, 4096, sizeof(bp), (char **)&bp));
-}
-
int
main(void)
{
@@ -769,7 +724,6 @@ main(void)
cmocka_unit_test_setup(test_dma_map_sg, setup),
cmocka_unit_test_setup(test_dma_addr_to_sg, setup),
cmocka_unit_test_setup(test_vfu_setup_device_dma, setup),
- cmocka_unit_test_setup(test_dirty_pages_without_dma, setup),
cmocka_unit_test_setup(test_migration_state_transitions, setup),
cmocka_unit_test_setup_teardown(test_setup_migration_region_size_ok,
setup_test_setup_migration_region,
@@ -789,7 +743,6 @@ main(void)
cmocka_unit_test_setup(test_device_is_stopped_and_copying, setup),
cmocka_unit_test_setup(test_cmd_allowed_when_stopped_and_copying, setup),
cmocka_unit_test_setup(test_should_exec_command, setup),
- cmocka_unit_test_setup(test_dma_controller_dirty_page_get, setup),
};
return cmocka_run_group_tests(tests, NULL, NULL);