aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Levon <john.levon@nutanix.com>2022-05-27 19:06:31 +0100
committerGitHub <noreply@github.com>2022-05-27 19:06:31 +0100
commit188cd00c520855615331d35c087a22215767b8fb (patch)
tree8bda987bcdeb1c8cf0751dbe190a28aef2609272
parent538d6063c9f8d395e1d38285ddfe405c3fcd7619 (diff)
downloadlibvfio-user-188cd00c520855615331d35c087a22215767b8fb.zip
libvfio-user-188cd00c520855615331d35c087a22215767b8fb.tar.gz
libvfio-user-188cd00c520855615331d35c087a22215767b8fb.tar.bz2
re-work SGL API (#675)
Harmonize and rename the vfu_*sg() APIs to better reflect their functionality: in our case, there is no mapping happening as part of these calls, they are merely housekeeping for range splitting, dirty tracking, and so on. Signed-off-by: John Levon <john.levon@nutanix.com> Reviewed-by: Thanos Makatos <thanos.makatos@nutanix.com>
-rw-r--r--include/libvfio-user.h78
-rw-r--r--lib/dma.c12
-rw-r--r--lib/dma.h52
-rw-r--r--lib/libvfio-user.c49
-rw-r--r--samples/client.c2
-rw-r--r--samples/server.c14
-rw-r--r--test/py/libvfio_user.py29
-rw-r--r--test/py/meson.build2
-rw-r--r--test/py/test_dirty_pages.py39
-rw-r--r--test/py/test_dma_map.py4
-rw-r--r--test/py/test_quiesce.py8
-rw-r--r--test/py/test_sgl_get_put.py (renamed from test/py/test_map_unmap_sg.py)28
-rw-r--r--test/unit-tests.c26
13 files changed, 175 insertions, 168 deletions
diff --git a/include/libvfio-user.h b/include/libvfio-user.h
index 3fe4b9f..ba599b5 100644
--- a/include/libvfio-user.h
+++ b/include/libvfio-user.h
@@ -349,10 +349,8 @@ typedef enum vfu_reset_type {
* Device callback for quiescing the device.
*
* vfu_run_ctx uses this callback to request from the device to quiesce its
- * operation. A quiesced device must not call the following functions:
- * - vfu_dma_read and vfu_dma_write,
- * - vfu_addr_to_sg, vfu_map_sg, and vfu_unmap_sg, unless it does so from a
- * device callback.
+ * operation. A quiesced device must not call vfu_addr_to_sgl() or vfu_sgl_*(),
+ * unless it does so from a device callback.
*
* The callback can return two values:
* 1) 0: this indicates that the device was quiesced. vfu_run_ctx then continues
@@ -370,7 +368,7 @@ typedef enum vfu_reset_type {
* the migration transition callback. These callbacks are only called after the
* device has been quiesced.
*
- * The following example demonstrates how a device can use vfu_map_sg and
+ * The following example demonstrates how a device can use the SG routines and
* friends while quiesced:
*
* A DMA region is mapped, libvfio-user calls the quiesce callback but the
@@ -382,18 +380,18 @@ typedef enum vfu_reset_type {
* }
*
* While quiescing, the device can continue to operate as normal, including
- * calling functions such as vfu_map_sg. Then, the device finishes quiescing:
+ * calling functions such as vfu_sgl_get(). Then, the device finishes quiescing:
*
* vfu_quiesce_done(vfu_ctx, 0);
*
* At this point, the device must have stopped using functions like
- * vfu_map_sg(), for example by pausing any I/O threads. libvfio-user
+ * vfu_sgl_get(), for example by pausing any I/O threads. libvfio-user
* eventually calls the dma_register device callback before vfu_quiesce_done
* returns. In this callback the device is allowed to call functions such as
- * vfu_map_sg:
+ * vfu_sgl_get()
*
* void (dma_register_cb(vfu_ctx_t *vfu_ctx, vfu_dma_info_t *info) {
- * vfu_map_sg(ctx, ...);
+ * vfu_sgl_get(ctx, ...);
* }
*
* Once vfu_quiesce_done returns, the device is unquiesced.
@@ -452,7 +450,7 @@ vfu_setup_device_reset_cb(vfu_ctx_t *vfu_ctx, vfu_reset_cb_t *reset);
*
* @iova: guest DMA range. This is the guest physical range (as we don't
* support vIOMMU) that the guest registers for DMA, via a VFIO_USER_DMA_MAP
- * message, and is the address space used as input to vfu_addr_to_sg().
+ * message, and is the address space used as input to vfu_addr_to_sgl().
* @vaddr: if the range is mapped into this process, this is the virtual address
* of the start of the region.
* @mapping: if @vaddr is non-NULL, this range represents the actual range
@@ -518,9 +516,9 @@ typedef void (vfu_dma_unregister_cb_t)(vfu_ctx_t *vfu_ctx, vfu_dma_info_t *info)
* DMA range addition or removal, these callbacks will be invoked.
*
* If this function is not called, guest DMA regions are not accessible via
- * vfu_addr_to_sg().
+ * vfu_addr_to_sgl().
*
- * To directly access this DMA memory via a local mapping with vfu_map_sg(), at
+ * To directly access this DMA memory via a local mapping with vfu_sgl_get(), at
* least @dma_unregister must be provided.
*
* @vfu_ctx: the libvfio-user context
@@ -764,33 +762,34 @@ vfu_irq_trigger(vfu_ctx_t *vfu_ctx, uint32_t subindex);
* @vfu_ctx: the libvfio-user context
* @dma_addr: the guest physical address
* @len: size of memory to be mapped
- * @sg: array that receives the scatter/gather entries to be mapped
- * @max_sg: maximum number of elements in above array
+ * @sgl: array that receives the scatter/gather entries to be mapped
+ * @max_nr_sgs: maximum number of elements in above array
* @prot: protection as defined in <sys/mman.h>
*
* @returns the number of scatter/gather entries created on success, and on
* failure:
* -1: if the GPA address span is invalid (errno=ENOENT) or
* protection violation (errno=EACCES)
- * (-x - 1): if @max_sg is too small, where x is the number of scatter/gather
+ * (-x - 1): if @max_nr_sgs is too small, where x is the number of SG
* entries necessary to complete this request (errno=0).
*/
int
-vfu_addr_to_sg(vfu_ctx_t *vfu_ctx, vfu_dma_addr_t dma_addr, size_t len,
- dma_sg_t *sg, int max_sg, int prot);
+vfu_addr_to_sgl(vfu_ctx_t *vfu_ctx, vfu_dma_addr_t dma_addr, size_t len,
+ dma_sg_t *sgl, size_t max_nr_sgs, int prot);
/**
- * Maps scatter/gather entries from the guest's physical address space to the
- * process's virtual memory. It is the caller's responsibility to remove the
- * mappings by calling vfu_unmap_sg().
+ * Populate the given iovec array (accessible in the process's virtual memory),
+ * based upon the SGL previously built via vfu_addr_to_sgl().
+ * It is the caller's responsibility to return the release the iovecs via
+ * vfu_sgl_put().
*
* This is only supported when a @dma_unregister callback is provided to
* vfu_setup_device_dma().
*
* @vfu_ctx: the libvfio-user context
- * @sg: array of scatter/gather entries returned by vfu_addr_to_sg. These
- * entries must not be modified and the array must not be deallocated
- * until vfu_unmap_sg() has been called.
+ * @sgl: array of scatter/gather entries returned by vfu_addr_to_sg. These
+ * entries must not be modified and the array must not be deallocated
+ * until vfu_sgl_put() has been called.
* @iov: array of iovec structures (defined in <sys/uio.h>) to receive each
* mapping
* @cnt: number of scatter/gather entries to map
@@ -799,38 +798,37 @@ vfu_addr_to_sg(vfu_ctx_t *vfu_ctx, vfu_dma_addr_t dma_addr, size_t len,
* @returns 0 on success, -1 on failure. Sets errno.
*/
int
-vfu_map_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, struct iovec *iov, int cnt,
- int flags);
+vfu_sgl_get(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl, struct iovec *iov, size_t cnt,
+ int flags);
/**
- * Mark scatter/gather entries (previously mapped by vfu_map_sg()) as dirty
- * (written to). This is only necessary if vfu_unmap_sg() is not called.
+ * Mark scatter/gather entries (previously acquired via vfu_sgl_get())
+ * as dirty (written to). This is only necessary if vfu_sgl_put() is not called.
*
* @vfu_ctx: the libvfio-user context
* @sg: array of scatter/gather entries to mark as dirty
* @cnt: number of scatter/gather entries to mark as dirty
*/
void
-vfu_mark_sg_dirty(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, int cnt);
+vfu_sgl_mark_dirty(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl, size_t cnt);
/**
- * Unmaps scatter/gather entries (previously mapped by vfu_map_sg()) from
- * the process's virtual memory.
+ * Release the iovec array previously acquired by vfu_sgl_get().
*
- * This will automatically mark the sg as dirty if needed.
+ * This will automatically mark the sgl as dirty if needed.
*
* @vfu_ctx: the libvfio-user context
- * @sg: array of scatter/gather entries to unmap
+ * @sgl: array of scatter/gather entries to unmap
* @iov: array of iovec structures for each scatter/gather entry
* @cnt: number of scatter/gather entries to unmap
*/
void
-vfu_unmap_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, struct iovec *iov, int cnt);
+vfu_sgl_put(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl, struct iovec *iov, size_t cnt);
/**
* Read from the dma region exposed by the client. This can be used as an
- * alternative to vfu_map_sg(), if the region is not directly mappable, or DMA
- * notification callbacks have not been provided.
+ * alternative to reading from a vfu_sgl_get() mapping, if the region is not
+ * directly mappable, or DMA notification callbacks have not been provided.
*
* @vfu_ctx: the libvfio-user context
* @sg: a DMA segment obtained from dma_addr_to_sg
@@ -839,12 +837,12 @@ vfu_unmap_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, struct iovec *iov, int cnt);
* @returns 0 on success, -1 on failure. Sets errno.
*/
int
-vfu_dma_read(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, void *data);
+vfu_sgl_read(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, size_t cnt, void *data);
/**
- * Write to the dma region exposed by the client. This can be used as an
- * alternative to vfu_map_sg(), if the region is not directly mappable, or DMA
- * notification callbacks have not been provided.
+ * Write to the dma region exposed by the client. This can be used as an
+ * alternative to reading from a vfu_sgl_get() mapping, if the region is not
+ * directly mappable, or DMA notification callbacks have not been provided.
*
* @vfu_ctx: the libvfio-user context
* @sg: a DMA segment obtained from dma_addr_to_sg
@@ -853,7 +851,7 @@ vfu_dma_read(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, void *data);
* @returns 0 on success, -1 on failure. Sets errno.
*/
int
-vfu_dma_write(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, void *data);
+vfu_sgl_write(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, size_t cnt, void *data);
/*
* Supported PCI regions.
diff --git a/lib/dma.c b/lib/dma.c
index daa1b58..5ca897f 100644
--- a/lib/dma.c
+++ b/lib/dma.c
@@ -417,7 +417,7 @@ MOCK_DEFINE(dma_controller_add_region)(dma_controller_t *dma,
int
_dma_addr_sg_split(const dma_controller_t *dma,
vfu_dma_addr_t dma_addr, uint64_t len,
- dma_sg_t *sg, int max_sg, int prot)
+ dma_sg_t *sg, int max_nr_sgs, int prot)
{
int idx;
int cnt = 0, ret;
@@ -433,7 +433,7 @@ _dma_addr_sg_split(const dma_controller_t *dma,
while (dma_addr >= region_start && dma_addr < region_end) {
size_t region_len = MIN((uint64_t)(region_end - dma_addr), len);
- if (cnt < max_sg) {
+ if (cnt < max_nr_sgs) {
ret = dma_init_sg(dma, &sg[cnt], dma_addr, region_len, prot, idx);
if (ret < 0) {
return ret;
@@ -460,7 +460,7 @@ out:
// There is still a region which was not found.
assert(len > 0);
return ERROR_INT(ENOENT);
- } else if (cnt > max_sg) {
+ } else if (cnt > max_nr_sgs) {
cnt = -cnt - 1;
}
errno = 0;
@@ -566,7 +566,7 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
* is purely for simplifying the implementation. We MUST allow arbitrary
* IOVAs.
*/
- ret = dma_addr_to_sg(dma, addr, len, &sg, 1, PROT_NONE);
+ ret = dma_addr_to_sgl(dma, addr, len, &sg, 1, PROT_NONE);
if (ret != 1 || sg.dma_addr != addr || sg.length != len) {
return ERROR_INT(ENOTSUP);
}
@@ -599,10 +599,6 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
return ERROR_INT(EINVAL);
}
- /*
- * TODO race condition between resetting bitmap and user calling
- * vfu_map_sg/vfu_unmap_sg().
- */
memcpy(bitmap, region->dirty_bitmap, size);
#ifdef DEBUG
log_dirty_bitmap(dma->vfu_ctx, region, bitmap, size);
diff --git a/lib/dma.h b/lib/dma.h
index aad3b9c..3fdbd65 100644
--- a/lib/dma.h
+++ b/lib/dma.h
@@ -48,13 +48,13 @@
* is registered with the DMA controllers at a unique, non-overlapping
* linear span of the DMA address space.
* - To perform DMA, the application should first build a scatter-gather
- * list (sglist) of dma_sg_t from DMA addresses. Then the sglist
- * can be mapped using dma_map_sg() into the process's virtual address space
- * as an iovec for direct access, and unmapped using dma_unmap_sg() when done.
+ * list (sgl) of dma_sg_t from DMA addresses. Then the sgl
+ * can be mapped using dma_sgl_get() into the process's virtual address space
+ * as an iovec for direct access, and unmapped using dma_sgl_put() when done.
* Every region is mapped into the application's virtual address space
* at registration time with R/W permissions.
- * dma_map_sg() ignores all protection bits and only does lookups and
- * returns pointers to the previously mapped regions. dma_unmap_sg() is
+ * dma_sgl_get() ignores all protection bits and only does lookups and
+ * returns pointers to the previously mapped regions. dma_sgl_put() is
* effectively a no-op.
*/
@@ -134,11 +134,11 @@ MOCK_DECLARE(int, dma_controller_remove_region, dma_controller_t *dma,
MOCK_DECLARE(void, dma_controller_unmap_region, dma_controller_t *dma,
dma_memory_region_t *region);
-// Helper for dma_addr_to_sg() slow path.
+// Helper for dma_addr_to_sgl() slow path.
int
_dma_addr_sg_split(const dma_controller_t *dma,
vfu_dma_addr_t dma_addr, uint64_t len,
- dma_sg_t *sg, int max_sg, int prot);
+ dma_sg_t *sg, int max_nr_sgs, int prot);
static void
_dma_mark_dirty(const dma_controller_t *dma, const dma_memory_region_t *region,
@@ -188,13 +188,13 @@ dma_init_sg(const dma_controller_t *dma, dma_sg_t *sg, vfu_dma_addr_t dma_addr,
* -1 if
* - the DMA address span is invalid
* - protection violation (errno=EACCES)
- * (-x - 1) if @max_sg is too small, where x is the number of sg entries
+ * (-x - 1) if @max_nr_sgs is too small, where x is the number of sg entries
* necessary to complete this request.
*/
static inline int
-dma_addr_to_sg(const dma_controller_t *dma,
- vfu_dma_addr_t dma_addr, size_t len,
- dma_sg_t *sg, int max_sg, int prot)
+dma_addr_to_sgl(const dma_controller_t *dma,
+ vfu_dma_addr_t dma_addr, size_t len,
+ dma_sg_t *sgl, size_t max_nr_sgs, int prot)
{
static __thread int region_hint;
int cnt, ret;
@@ -203,11 +203,11 @@ dma_addr_to_sg(const dma_controller_t *dma,
const void *region_end = iov_end(&region->info.iova);
// Fast path: single region.
- if (likely(max_sg > 0 && len > 0 &&
+ if (likely(max_nr_sgs > 0 && len > 0 &&
dma_addr >= region->info.iova.iov_base &&
dma_addr + len <= region_end &&
region_hint < dma->nregions)) {
- ret = dma_init_sg(dma, sg, dma_addr, len, prot, region_hint);
+ ret = dma_init_sg(dma, sgl, dma_addr, len, prot, region_hint);
if (ret < 0) {
return ret;
}
@@ -215,24 +215,26 @@ dma_addr_to_sg(const dma_controller_t *dma,
return 1;
}
// Slow path: search through regions.
- cnt = _dma_addr_sg_split(dma, dma_addr, len, sg, max_sg, prot);
+ cnt = _dma_addr_sg_split(dma, dma_addr, len, sgl, max_nr_sgs, prot);
if (likely(cnt > 0)) {
- region_hint = sg->region;
+ region_hint = sgl[0].region;
}
return cnt;
}
static inline int
-dma_map_sg(dma_controller_t *dma, dma_sg_t *sg, struct iovec *iov,
- int cnt)
+dma_sgl_get(dma_controller_t *dma, dma_sg_t *sgl, struct iovec *iov, size_t cnt)
{
dma_memory_region_t *region;
+ dma_sg_t *sg;
assert(dma != NULL);
- assert(sg != NULL);
+ assert(sgl != NULL);
assert(iov != NULL);
assert(cnt > 0);
+ sg = sgl;
+
do {
if (sg->region >= dma->nregions) {
return ERROR_INT(EINVAL);
@@ -257,14 +259,17 @@ dma_map_sg(dma_controller_t *dma, dma_sg_t *sg, struct iovec *iov,
}
static inline void
-dma_mark_sg_dirty(dma_controller_t *dma, dma_sg_t *sg, int cnt)
+dma_sgl_mark_dirty(dma_controller_t *dma, dma_sg_t *sgl, size_t cnt)
{
dma_memory_region_t *region;
+ dma_sg_t *sg;
assert(dma != NULL);
- assert(sg != NULL);
+ assert(sgl != NULL);
assert(cnt > 0);
+ sg = sgl;
+
do {
if (sg->region >= dma->nregions) {
return;
@@ -286,14 +291,17 @@ dma_mark_sg_dirty(dma_controller_t *dma, dma_sg_t *sg, int cnt)
}
static inline void
-dma_unmap_sg(dma_controller_t *dma, dma_sg_t *sg, int cnt)
+dma_sgl_put(dma_controller_t *dma, dma_sg_t *sgl, size_t cnt)
{
dma_memory_region_t *region;
+ dma_sg_t *sg;
assert(dma != NULL);
- assert(sg != NULL);
+ assert(sgl != NULL);
assert(cnt > 0);
+ sg = sgl;
+
do {
if (sg->region >= dma->nregions) {
return;
diff --git a/lib/libvfio-user.c b/lib/libvfio-user.c
index 7d324aa..90c4b39 100644
--- a/lib/libvfio-user.c
+++ b/lib/libvfio-user.c
@@ -2003,8 +2003,8 @@ quiesce_check_allowed(vfu_ctx_t *vfu_ctx)
}
EXPORT int
-vfu_addr_to_sg(vfu_ctx_t *vfu_ctx, vfu_dma_addr_t dma_addr,
- size_t len, dma_sg_t *sg, int max_sg, int prot)
+vfu_addr_to_sgl(vfu_ctx_t *vfu_ctx, vfu_dma_addr_t dma_addr,
+ size_t len, dma_sg_t *sgl, size_t max_nr_sgs, int prot)
{
assert(vfu_ctx != NULL);
@@ -2014,31 +2014,24 @@ vfu_addr_to_sg(vfu_ctx_t *vfu_ctx, vfu_dma_addr_t dma_addr,
quiesce_check_allowed(vfu_ctx);
- return dma_addr_to_sg(vfu_ctx->dma, dma_addr, len, sg, max_sg, prot);
+ return dma_addr_to_sgl(vfu_ctx->dma, dma_addr, len, sgl, max_nr_sgs, prot);
}
EXPORT int
-vfu_map_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, struct iovec *iov, int cnt,
- int flags)
+vfu_sgl_get(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl, struct iovec *iov, size_t cnt,
+ int flags)
{
- int ret;
-
if (unlikely(vfu_ctx->dma_unregister == NULL) || flags != 0) {
return ERROR_INT(EINVAL);
}
quiesce_check_allowed(vfu_ctx);
- ret = dma_map_sg(vfu_ctx->dma, sg, iov, cnt);
- if (ret < 0) {
- return -1;
- }
-
- return 0;
+ return dma_sgl_get(vfu_ctx->dma, sgl, iov, cnt);
}
EXPORT void
-vfu_mark_sg_dirty(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, int cnt)
+vfu_sgl_mark_dirty(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl, size_t cnt)
{
if (unlikely(vfu_ctx->dma_unregister == NULL)) {
return;
@@ -2046,12 +2039,12 @@ vfu_mark_sg_dirty(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, int cnt)
quiesce_check_allowed(vfu_ctx);
- return dma_mark_sg_dirty(vfu_ctx->dma, sg, cnt);
+ return dma_sgl_mark_dirty(vfu_ctx->dma, sgl, cnt);
}
EXPORT void
-vfu_unmap_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg,
- struct iovec *iov UNUSED, int cnt)
+vfu_sgl_put(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl,
+ struct iovec *iov UNUSED, size_t cnt)
{
if (unlikely(vfu_ctx->dma_unregister == NULL)) {
return;
@@ -2059,7 +2052,7 @@ vfu_unmap_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg,
quiesce_check_allowed(vfu_ctx);
- return dma_unmap_sg(vfu_ctx->dma, sg, cnt);
+ return dma_sgl_put(vfu_ctx->dma, sgl, cnt);
}
static int
@@ -2156,17 +2149,29 @@ vfu_dma_transfer(vfu_ctx_t *vfu_ctx, enum vfio_user_command cmd,
}
EXPORT int
-vfu_dma_read(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, void *data)
+vfu_sgl_read(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl, size_t cnt, void *data)
{
assert(vfu_ctx->pending.state == VFU_CTX_PENDING_NONE);
- return vfu_dma_transfer(vfu_ctx, VFIO_USER_DMA_READ, sg, data);
+
+ /* Not currently implemented. */
+ if (cnt != 1) {
+ return ERROR_INT(ENOTSUP);
+ }
+
+ return vfu_dma_transfer(vfu_ctx, VFIO_USER_DMA_READ, sgl, data);
}
EXPORT int
-vfu_dma_write(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, void *data)
+vfu_sgl_write(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl, size_t cnt, void *data)
{
assert(vfu_ctx->pending.state == VFU_CTX_PENDING_NONE);
- return vfu_dma_transfer(vfu_ctx, VFIO_USER_DMA_WRITE, sg, data);
+
+ /* Not currently implemented. */
+ if (cnt != 1) {
+ return ERROR_INT(ENOTSUP);
+ }
+
+ return vfu_dma_transfer(vfu_ctx, VFIO_USER_DMA_WRITE, sgl, data);
}
EXPORT bool
diff --git a/samples/client.c b/samples/client.c
index 38da81d..da211b8 100644
--- a/samples/client.c
+++ b/samples/client.c
@@ -51,7 +51,7 @@
#define CLIENT_MAX_FDS (32)
-/* This is low, so we get testing of vfu_dma_read/write() chunking. */
+/* This is low, so we get testing of vfu_sgl_read/write() chunking. */
#define CLIENT_MAX_DATA_XFER_SIZE (1024)
static char const *irq_to_str[] = {
diff --git a/samples/server.c b/samples/server.c
index 1bd7710..e994ac1 100644
--- a/samples/server.c
+++ b/samples/server.c
@@ -204,9 +204,9 @@ static void do_dma_io(vfu_ctx_t *vfu_ctx, struct server_data *server_data)
assert(vfu_ctx != NULL);
- ret = vfu_addr_to_sg(vfu_ctx,
- (vfu_dma_addr_t)server_data->regions[0].iova.iov_base,
- count, sg, 1, PROT_WRITE);
+ ret = vfu_addr_to_sgl(vfu_ctx,
+ (vfu_dma_addr_t)server_data->regions[0].iova.iov_base,
+ count, sg, 1, PROT_WRITE);
if (ret < 0) {
err(EXIT_FAILURE, "failed to map %p-%p",
server_data->regions[0].iova.iov_base,
@@ -217,17 +217,17 @@ static void do_dma_io(vfu_ctx_t *vfu_ctx, struct server_data *server_data)
crc1 = rte_hash_crc(buf, count, 0);
vfu_log(vfu_ctx, LOG_DEBUG, "%s: WRITE addr %p count %d", __func__,
server_data->regions[0].iova.iov_base, count);
- ret = vfu_dma_write(vfu_ctx, sg, buf);
+ ret = vfu_sgl_write(vfu_ctx, sg, 1, buf);
if (ret < 0) {
- err(EXIT_FAILURE, "vfu_dma_write failed");
+ err(EXIT_FAILURE, "vfu_sgl_write failed");
}
memset(buf, 0, count);
vfu_log(vfu_ctx, LOG_DEBUG, "%s: READ addr %p count %d", __func__,
server_data->regions[0].iova.iov_base, count);
- ret = vfu_dma_read(vfu_ctx, sg, buf);
+ ret = vfu_sgl_read(vfu_ctx, sg, 1, buf);
if (ret < 0) {
- err(EXIT_FAILURE, "vfu_dma_read failed");
+ err(EXIT_FAILURE, "vfu_sgl_read failed");
}
crc2 = rte_hash_crc(buf, count, 0);
diff --git a/test/py/libvfio_user.py b/test/py/libvfio_user.py
index aeaefa5..4bdb761 100644
--- a/test/py/libvfio_user.py
+++ b/test/py/libvfio_user.py
@@ -611,12 +611,12 @@ lib.vfu_setup_device_dma.argtypes = (c.c_void_p, vfu_dma_register_cb_t,
lib.vfu_setup_device_migration_callbacks.argtypes = (c.c_void_p,
c.POINTER(vfu_migration_callbacks_t), c.c_uint64)
lib.dma_sg_size.restype = (c.c_size_t)
-lib.vfu_addr_to_sg.argtypes = (c.c_void_p, c.c_void_p, c.c_size_t,
- c.POINTER(dma_sg_t), c.c_int, c.c_int)
-lib.vfu_map_sg.argtypes = (c.c_void_p, c.POINTER(dma_sg_t), c.POINTER(iovec_t),
- c.c_int, c.c_int)
-lib.vfu_unmap_sg.argtypes = (c.c_void_p, c.POINTER(dma_sg_t),
- c.POINTER(iovec_t), c.c_int)
+lib.vfu_addr_to_sgl.argtypes = (c.c_void_p, c.c_void_p, c.c_size_t,
+ c.POINTER(dma_sg_t), c.c_size_t, c.c_int)
+lib.vfu_sgl_get.argtypes = (c.c_void_p, c.POINTER(dma_sg_t),
+ c.POINTER(iovec_t), c.c_size_t, c.c_int)
+lib.vfu_sgl_put.argtypes = (c.c_void_p, c.POINTER(dma_sg_t),
+ c.POINTER(iovec_t), c.c_size_t)
lib.vfu_create_ioeventfd.argtypes = (c.c_void_p, c.c_uint32, c.c_int,
c.c_size_t, c.c_uint32, c.c_uint32,
@@ -1147,21 +1147,22 @@ def dma_sg_size():
return lib.dma_sg_size()
-def vfu_addr_to_sg(ctx, dma_addr, length, max_sg=1,
- prot=(mmap.PROT_READ | mmap.PROT_WRITE)):
+def vfu_addr_to_sgl(ctx, dma_addr, length, max_nr_sgs=1,
+ prot=(mmap.PROT_READ | mmap.PROT_WRITE)):
assert ctx is not None
- sg = (dma_sg_t * max_sg)()
+ sg = (dma_sg_t * max_nr_sgs)()
- return (lib.vfu_addr_to_sg(ctx, dma_addr, length, sg, max_sg, prot), sg)
+ return (lib.vfu_addr_to_sgl(ctx, dma_addr, length,
+ sg, max_nr_sgs, prot), sg)
-def vfu_map_sg(ctx, sg, iovec, cnt=1, flags=0):
- return lib.vfu_map_sg(ctx, sg, iovec, cnt, flags)
+def vfu_sgl_get(ctx, sg, iovec, cnt=1, flags=0):
+ return lib.vfu_sgl_get(ctx, sg, iovec, cnt, flags)
-def vfu_unmap_sg(ctx, sg, iovec, cnt=1):
- return lib.vfu_unmap_sg(ctx, sg, iovec, cnt)
+def vfu_sgl_put(ctx, sg, iovec, cnt=1):
+ return lib.vfu_sgl_put(ctx, sg, iovec, cnt)
def vfu_create_ioeventfd(ctx, region_idx, fd, offset, size, flags, datamatch):
diff --git a/test/py/meson.build b/test/py/meson.build
index e8266e7..d9c97b3 100644
--- a/test/py/meson.build
+++ b/test/py/meson.build
@@ -37,7 +37,6 @@ python_tests = [
'test_dma_map.py',
'test_dma_unmap.py',
'test_irq_trigger.py',
- 'test_map_unmap_sg.py',
'test_migration.py',
'test_negotiate.py',
'test_pci_caps.py',
@@ -45,6 +44,7 @@ python_tests = [
'test_quiesce.py',
'test_request_errors.py',
'test_setup_region.py',
+ 'test_sgl_get_put.py',
'test_vfu_create_ctx.py',
'test_vfu_realize_ctx.py',
]
diff --git a/test/py/test_dirty_pages.py b/test/py/test_dirty_pages.py
index 9f892bd..6cf87fb 100644
--- a/test/py/test_dirty_pages.py
+++ b/test/py/test_dirty_pages.py
@@ -312,46 +312,45 @@ iovec3 = None
def test_dirty_pages_get_modified():
- ret, sg1 = vfu_addr_to_sg(ctx, dma_addr=0x10000, length=0x1000)
+ ret, sg1 = vfu_addr_to_sgl(ctx, dma_addr=0x10000, length=0x1000)
assert ret == 1
iovec1 = iovec_t()
- ret = vfu_map_sg(ctx, sg1, iovec1)
+ ret = vfu_sgl_get(ctx, sg1, iovec1)
assert ret == 0
# read only
- ret, sg2 = vfu_addr_to_sg(ctx, dma_addr=0x11000, length=0x1000,
- prot=mmap.PROT_READ)
+ ret, sg2 = vfu_addr_to_sgl(ctx, dma_addr=0x11000, length=0x1000,
+ prot=mmap.PROT_READ)
assert ret == 1
iovec2 = iovec_t()
- ret = vfu_map_sg(ctx, sg2, iovec2)
+ ret = vfu_sgl_get(ctx, sg2, iovec2)
assert ret == 0
- ret, sg3 = vfu_addr_to_sg(ctx, dma_addr=0x12000, length=0x1000)
+ ret, sg3 = vfu_addr_to_sgl(ctx, dma_addr=0x12000, length=0x1000)
assert ret == 1
iovec3 = iovec_t()
- ret = vfu_map_sg(ctx, sg3, iovec3)
+ ret = vfu_sgl_get(ctx, sg3, iovec3)
assert ret == 0
- ret, sg4 = vfu_addr_to_sg(ctx, dma_addr=0x14000, length=0x4000)
+ ret, sg4 = vfu_addr_to_sgl(ctx, dma_addr=0x14000, length=0x4000)
assert ret == 1
iovec4 = iovec_t()
- ret = vfu_map_sg(ctx, sg4, iovec4)
+ ret = vfu_sgl_get(ctx, sg4, iovec4)
assert ret == 0
- # not unmapped yet, dirty bitmap should be zero, but dirty maps will have
- # been marked dirty still
+ # not put yet, dirty bitmap should be zero
bitmap = get_dirty_page_bitmap()
assert bitmap == 0b00000000
- # unmap segments, dirty bitmap should be updated
- vfu_unmap_sg(ctx, sg1, iovec1)
- vfu_unmap_sg(ctx, sg4, iovec4)
+ # put SGLs, dirty bitmap should be updated
+ vfu_sgl_put(ctx, sg1, iovec1)
+ vfu_sgl_put(ctx, sg4, iovec4)
bitmap = get_dirty_page_bitmap()
assert bitmap == 0b11110001
- # after another two unmaps, should just be one dirty page
- vfu_unmap_sg(ctx, sg2, iovec2)
- vfu_unmap_sg(ctx, sg3, iovec3)
+ # after another two puts, should just be one dirty page
+ vfu_sgl_put(ctx, sg2, iovec2)
+ vfu_sgl_put(ctx, sg3, iovec3)
bitmap = get_dirty_page_bitmap()
assert bitmap == 0b00000100
@@ -393,12 +392,12 @@ def test_dirty_pages_bitmap_with_quiesce():
quiesce_errno = errno.EBUSY
- ret, sg1 = vfu_addr_to_sg(ctx, dma_addr=0x10000, length=0x1000)
+ ret, sg1 = vfu_addr_to_sgl(ctx, dma_addr=0x10000, length=0x1000)
assert ret == 1
iovec1 = iovec_t()
- ret = vfu_map_sg(ctx, sg1, iovec1)
+ ret = vfu_sgl_get(ctx, sg1, iovec1)
assert ret == 0
- vfu_unmap_sg(ctx, sg1, iovec1)
+ vfu_sgl_put(ctx, sg1, iovec1)
send_dirty_page_bitmap(busy=True)
diff --git a/test/py/test_dma_map.py b/test/py/test_dma_map.py
index f33d110..e8ce8f2 100644
--- a/test/py/test_dma_map.py
+++ b/test/py/test_dma_map.py
@@ -119,7 +119,7 @@ def test_dma_map_busy(mock_dma_register, mock_quiesce):
mock_dma_register.assert_called_once()
# check that the DMA region has been added
- count, sgs = vfu_addr_to_sg(ctx, 0x10000, 0x1000)
+ count, sgs = vfu_addr_to_sgl(ctx, 0x10000, 0x1000)
assert len(sgs) == 1
sg = sgs[0]
assert sg.dma_addr == 0x10000 and sg.region == 0 and sg.length == 0x1000 \
@@ -224,7 +224,7 @@ def test_dma_map_busy_reply_fail(mock_dma_register, mock_quiesce, mock_reset):
mock_reset.assert_called_once()
# check that the DMA region was NOT added
- count, sgs = vfu_addr_to_sg(ctx, 0x10000, 0x1000)
+ count, sgs = vfu_addr_to_sgl(ctx, 0x10000, 0x1000)
assert count == -1
assert c.get_errno() == errno.ENOENT
diff --git a/test/py/test_quiesce.py b/test/py/test_quiesce.py
index f283ccc..0e2a980 100644
--- a/test/py/test_quiesce.py
+++ b/test/py/test_quiesce.py
@@ -102,7 +102,7 @@ def test_device_quiesce_error_after_busy(mock_quiesce, mock_dma_register):
mock_dma_register.assert_not_called()
# check that the DMA region was NOT added
- count, sgs = vfu_addr_to_sg(ctx, 0x10000, 0x1000)
+ count, sgs = vfu_addr_to_sgl(ctx, 0x10000, 0x1000)
assert count == -1
assert c.get_errno() == errno.ENOENT
@@ -110,18 +110,18 @@ def test_device_quiesce_error_after_busy(mock_quiesce, mock_dma_register):
# DMA map/unmap, migration device state transition, and reset callbacks
# have the same function signature in Python
def _side_effect(ctx, _):
- count, sgs = vfu_addr_to_sg(ctx, 0x10000, 0x1000)
+ count, sgs = vfu_addr_to_sgl(ctx, 0x10000, 0x1000)
assert count == 1
sg = sgs[0]
assert sg.dma_addr == 0x10000 and sg.region == 0 \
and sg.length == 0x1000 and sg.offset == 0 and sg.writeable
iovec = iovec_t()
- ret = vfu_map_sg(ctx, sg, iovec)
+ ret = vfu_sgl_get(ctx, sg, iovec)
assert ret == 0, "%s" % c.get_errno()
assert iovec.iov_base != 0
assert iovec.iov_len == 0x1000
assert ret == 0
- vfu_unmap_sg(ctx, sg, iovec)
+ vfu_sgl_put(ctx, sg, iovec)
return 0
diff --git a/test/py/test_map_unmap_sg.py b/test/py/test_sgl_get_put.py
index fd606f4..d44dc6e 100644
--- a/test/py/test_map_unmap_sg.py
+++ b/test/py/test_sgl_get_put.py
@@ -40,7 +40,7 @@ def test_dma_sg_size():
assert size == len(dma_sg_t())
-def test_map_sg_with_invalid_region():
+def test_sgl_get_with_invalid_region():
global ctx
ctx = prepare_ctx_for_dma()
@@ -48,12 +48,12 @@ def test_map_sg_with_invalid_region():
sg = dma_sg_t()
iovec = iovec_t()
- ret = vfu_map_sg(ctx, sg, iovec)
+ ret = vfu_sgl_get(ctx, sg, iovec)
assert ret == -1
assert ctypes.get_errno() == errno.EINVAL
-def test_map_sg_without_fd():
+def test_sgl_get_without_fd():
sock = connect_client(ctx)
payload = vfio_user_dma_map(argsz=len(vfio_user_dma_map()),
@@ -64,14 +64,14 @@ def test_map_sg_without_fd():
sg = dma_sg_t()
iovec = iovec_t()
sg.region = 0
- ret = vfu_map_sg(ctx, sg, iovec)
+ ret = vfu_sgl_get(ctx, sg, iovec)
assert ret == -1
assert ctypes.get_errno() == errno.EFAULT
disconnect_client(ctx, sock)
-def test_map_multiple_sge():
+def test_get_multiple_sge():
sock = connect_client(ctx)
regions = 4
f = tempfile.TemporaryFile()
@@ -83,12 +83,12 @@ def test_map_multiple_sge():
offset=0, addr=0x1000 * i, size=4096)
msg(ctx, sock, VFIO_USER_DMA_MAP, payload, fds=[f.fileno()])
- ret, sg = vfu_addr_to_sg(ctx, dma_addr=0x1000, length=4096 * 3, max_sg=3,
- prot=mmap.PROT_READ)
+ ret, sg = vfu_addr_to_sgl(ctx, dma_addr=0x1000, length=4096 * 3,
+ max_nr_sgs=3, prot=mmap.PROT_READ)
assert ret == 3
iovec = (iovec_t * 3)()
- ret = vfu_map_sg(ctx, sg, iovec, cnt=3)
+ ret = vfu_sgl_get(ctx, sg, iovec, cnt=3)
assert ret == 0
assert iovec[0].iov_len == 4096
assert iovec[1].iov_len == 4096
@@ -97,7 +97,7 @@ def test_map_multiple_sge():
disconnect_client(ctx, sock)
-def test_unmap_sg():
+def test_sgl_put():
sock = connect_client(ctx)
regions = 4
f = tempfile.TemporaryFile()
@@ -109,19 +109,19 @@ def test_unmap_sg():
offset=0, addr=0x1000 * i, size=4096)
msg(ctx, sock, VFIO_USER_DMA_MAP, payload, fds=[f.fileno()])
- ret, sg = vfu_addr_to_sg(ctx, dma_addr=0x1000, length=4096 * 3, max_sg=3,
- prot=mmap.PROT_READ)
+ ret, sg = vfu_addr_to_sgl(ctx, dma_addr=0x1000, length=4096 * 3,
+ max_nr_sgs=3, prot=mmap.PROT_READ)
assert ret == 3
iovec = (iovec_t * 3)()
- ret = vfu_map_sg(ctx, sg, iovec, cnt=3)
+ ret = vfu_sgl_get(ctx, sg, iovec, cnt=3)
assert ret == 0
- vfu_unmap_sg(ctx, sg, iovec, cnt=3)
+ vfu_sgl_put(ctx, sg, iovec, cnt=3)
disconnect_client(ctx, sock)
-def test_map_unmap_sg_cleanup():
+def test_sgl_get_put_cleanup():
vfu_destroy_ctx(ctx)
# ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
diff --git a/test/unit-tests.c b/test/unit-tests.c
index 97b06f1..cdb88a2 100644
--- a/test/unit-tests.c
+++ b/test/unit-tests.c
@@ -310,7 +310,7 @@ test_dma_controller_remove_region_unmapped(void **state UNUSED)
}
static void
-test_dma_addr_to_sg(void **state UNUSED)
+test_dma_addr_to_sgl(void **state UNUSED)
{
dma_memory_region_t *r, *r1;
struct iovec iov[2] = { };
@@ -325,8 +325,8 @@ test_dma_addr_to_sg(void **state UNUSED)
/* fast path, region hint hit */
r->info.prot = PROT_WRITE;
- ret = dma_addr_to_sg(vfu_ctx.dma, (vfu_dma_addr_t)0x2000,
- 0x400, sg, 1, PROT_READ);
+ ret = dma_addr_to_sgl(vfu_ctx.dma, (vfu_dma_addr_t)0x2000,
+ 0x400, sg, 1, PROT_READ);
assert_int_equal(1, ret);
assert_int_equal(r->info.iova.iov_base, sg[0].dma_addr);
assert_int_equal(0, sg[0].region);
@@ -337,20 +337,20 @@ test_dma_addr_to_sg(void **state UNUSED)
errno = 0;
r->info.prot = PROT_WRITE;
- ret = dma_addr_to_sg(vfu_ctx.dma, (vfu_dma_addr_t)0x6000,
- 0x400, sg, 1, PROT_READ);
+ ret = dma_addr_to_sgl(vfu_ctx.dma, (vfu_dma_addr_t)0x6000,
+ 0x400, sg, 1, PROT_READ);
assert_int_equal(-1, ret);
assert_int_equal(ENOENT, errno);
r->info.prot = PROT_READ;
- ret = dma_addr_to_sg(vfu_ctx.dma, (vfu_dma_addr_t)0x2000,
- 0x400, sg, 1, PROT_WRITE);
+ ret = dma_addr_to_sgl(vfu_ctx.dma, (vfu_dma_addr_t)0x2000,
+ 0x400, sg, 1, PROT_WRITE);
assert_int_equal(-1, ret);
assert_int_equal(EACCES, errno);
r->info.prot = PROT_READ|PROT_WRITE;
- ret = dma_addr_to_sg(vfu_ctx.dma, (vfu_dma_addr_t)0x2000,
- 0x400, sg, 1, PROT_READ);
+ ret = dma_addr_to_sgl(vfu_ctx.dma, (vfu_dma_addr_t)0x2000,
+ 0x400, sg, 1, PROT_READ);
assert_int_equal(1, ret);
vfu_ctx.dma->nregions = 2;
@@ -359,8 +359,8 @@ test_dma_addr_to_sg(void **state UNUSED)
r1->info.iova.iov_len = 0x2000;
r1->info.vaddr = (void *)0xcafebabe;
r1->info.prot = PROT_WRITE;
- ret = dma_addr_to_sg(vfu_ctx.dma, (vfu_dma_addr_t)0x1000,
- 0x5000, sg, 2, PROT_READ);
+ ret = dma_addr_to_sgl(vfu_ctx.dma, (vfu_dma_addr_t)0x1000,
+ 0x5000, sg, 2, PROT_READ);
assert_int_equal(2, ret);
assert_int_equal(0x4000, sg[0].length);
assert_int_equal(r->info.iova.iov_base, sg[0].dma_addr);
@@ -374,7 +374,7 @@ test_dma_addr_to_sg(void **state UNUSED)
assert_int_equal(0, sg[1].offset);
assert_true(vfu_sg_is_mappable(&vfu_ctx, &sg[1]));
- assert_int_equal(0, dma_map_sg(vfu_ctx.dma, sg, iov, 2));
+ assert_int_equal(0, dma_sgl_get(vfu_ctx.dma, sg, iov, 2));
assert_int_equal(r->info.vaddr + sg[0].offset, iov[0].iov_base);
assert_int_equal(sg[0].length, iov[0].iov_len);
assert_int_equal(r1->info.vaddr + sg[1].offset, iov[1].iov_base);
@@ -672,7 +672,7 @@ main(void)
cmocka_unit_test_setup(test_dma_controller_add_region_no_fd, setup),
cmocka_unit_test_setup(test_dma_controller_remove_region_mapped, setup),
cmocka_unit_test_setup(test_dma_controller_remove_region_unmapped, setup),
- cmocka_unit_test_setup(test_dma_addr_to_sg, setup),
+ cmocka_unit_test_setup(test_dma_addr_to_sgl, setup),
cmocka_unit_test_setup(test_vfu_setup_device_dma, setup),
cmocka_unit_test_setup(test_migration_state_transitions, setup),
cmocka_unit_test_setup_teardown(test_setup_migration_region_size_ok,