aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJohn Levon <john.levon@nutanix.com>2022-05-27 19:06:31 +0100
committerGitHub <noreply@github.com>2022-05-27 19:06:31 +0100
commit188cd00c520855615331d35c087a22215767b8fb (patch)
tree8bda987bcdeb1c8cf0751dbe190a28aef2609272 /lib
parent538d6063c9f8d395e1d38285ddfe405c3fcd7619 (diff)
downloadlibvfio-user-188cd00c520855615331d35c087a22215767b8fb.zip
libvfio-user-188cd00c520855615331d35c087a22215767b8fb.tar.gz
libvfio-user-188cd00c520855615331d35c087a22215767b8fb.tar.bz2
re-work SGL API (#675)
Harmonize and rename the vfu_*sg() APIs to better reflect their functionality: in our case, there is no mapping happening as part of these calls, they are merely housekeeping for range splitting, dirty tracking, and so on. Signed-off-by: John Levon <john.levon@nutanix.com> Reviewed-by: Thanos Makatos <thanos.makatos@nutanix.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/dma.c12
-rw-r--r--lib/dma.h52
-rw-r--r--lib/libvfio-user.c49
3 files changed, 61 insertions, 52 deletions
diff --git a/lib/dma.c b/lib/dma.c
index daa1b58..5ca897f 100644
--- a/lib/dma.c
+++ b/lib/dma.c
@@ -417,7 +417,7 @@ MOCK_DEFINE(dma_controller_add_region)(dma_controller_t *dma,
int
_dma_addr_sg_split(const dma_controller_t *dma,
vfu_dma_addr_t dma_addr, uint64_t len,
- dma_sg_t *sg, int max_sg, int prot)
+ dma_sg_t *sg, int max_nr_sgs, int prot)
{
int idx;
int cnt = 0, ret;
@@ -433,7 +433,7 @@ _dma_addr_sg_split(const dma_controller_t *dma,
while (dma_addr >= region_start && dma_addr < region_end) {
size_t region_len = MIN((uint64_t)(region_end - dma_addr), len);
- if (cnt < max_sg) {
+ if (cnt < max_nr_sgs) {
ret = dma_init_sg(dma, &sg[cnt], dma_addr, region_len, prot, idx);
if (ret < 0) {
return ret;
@@ -460,7 +460,7 @@ out:
// There is still a region which was not found.
assert(len > 0);
return ERROR_INT(ENOENT);
- } else if (cnt > max_sg) {
+ } else if (cnt > max_nr_sgs) {
cnt = -cnt - 1;
}
errno = 0;
@@ -566,7 +566,7 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
* is purely for simplifying the implementation. We MUST allow arbitrary
* IOVAs.
*/
- ret = dma_addr_to_sg(dma, addr, len, &sg, 1, PROT_NONE);
+ ret = dma_addr_to_sgl(dma, addr, len, &sg, 1, PROT_NONE);
if (ret != 1 || sg.dma_addr != addr || sg.length != len) {
return ERROR_INT(ENOTSUP);
}
@@ -599,10 +599,6 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
return ERROR_INT(EINVAL);
}
- /*
- * TODO race condition between resetting bitmap and user calling
- * vfu_map_sg/vfu_unmap_sg().
- */
memcpy(bitmap, region->dirty_bitmap, size);
#ifdef DEBUG
log_dirty_bitmap(dma->vfu_ctx, region, bitmap, size);
diff --git a/lib/dma.h b/lib/dma.h
index aad3b9c..3fdbd65 100644
--- a/lib/dma.h
+++ b/lib/dma.h
@@ -48,13 +48,13 @@
* is registered with the DMA controllers at a unique, non-overlapping
* linear span of the DMA address space.
* - To perform DMA, the application should first build a scatter-gather
- * list (sglist) of dma_sg_t from DMA addresses. Then the sglist
- * can be mapped using dma_map_sg() into the process's virtual address space
- * as an iovec for direct access, and unmapped using dma_unmap_sg() when done.
+ * list (sgl) of dma_sg_t from DMA addresses. Then the sgl
+ * can be mapped using dma_sgl_get() into the process's virtual address space
+ * as an iovec for direct access, and unmapped using dma_sgl_put() when done.
* Every region is mapped into the application's virtual address space
* at registration time with R/W permissions.
- * dma_map_sg() ignores all protection bits and only does lookups and
- * returns pointers to the previously mapped regions. dma_unmap_sg() is
+ * dma_sgl_get() ignores all protection bits and only does lookups and
+ * returns pointers to the previously mapped regions. dma_sgl_put() is
* effectively a no-op.
*/
@@ -134,11 +134,11 @@ MOCK_DECLARE(int, dma_controller_remove_region, dma_controller_t *dma,
MOCK_DECLARE(void, dma_controller_unmap_region, dma_controller_t *dma,
dma_memory_region_t *region);
-// Helper for dma_addr_to_sg() slow path.
+// Helper for dma_addr_to_sgl() slow path.
int
_dma_addr_sg_split(const dma_controller_t *dma,
vfu_dma_addr_t dma_addr, uint64_t len,
- dma_sg_t *sg, int max_sg, int prot);
+ dma_sg_t *sg, int max_nr_sgs, int prot);
static void
_dma_mark_dirty(const dma_controller_t *dma, const dma_memory_region_t *region,
@@ -188,13 +188,13 @@ dma_init_sg(const dma_controller_t *dma, dma_sg_t *sg, vfu_dma_addr_t dma_addr,
* -1 if
* - the DMA address span is invalid
* - protection violation (errno=EACCES)
- * (-x - 1) if @max_sg is too small, where x is the number of sg entries
+ * (-x - 1) if @max_nr_sgs is too small, where x is the number of sg entries
* necessary to complete this request.
*/
static inline int
-dma_addr_to_sg(const dma_controller_t *dma,
- vfu_dma_addr_t dma_addr, size_t len,
- dma_sg_t *sg, int max_sg, int prot)
+dma_addr_to_sgl(const dma_controller_t *dma,
+ vfu_dma_addr_t dma_addr, size_t len,
+ dma_sg_t *sgl, size_t max_nr_sgs, int prot)
{
static __thread int region_hint;
int cnt, ret;
@@ -203,11 +203,11 @@ dma_addr_to_sg(const dma_controller_t *dma,
const void *region_end = iov_end(&region->info.iova);
// Fast path: single region.
- if (likely(max_sg > 0 && len > 0 &&
+ if (likely(max_nr_sgs > 0 && len > 0 &&
dma_addr >= region->info.iova.iov_base &&
dma_addr + len <= region_end &&
region_hint < dma->nregions)) {
- ret = dma_init_sg(dma, sg, dma_addr, len, prot, region_hint);
+ ret = dma_init_sg(dma, sgl, dma_addr, len, prot, region_hint);
if (ret < 0) {
return ret;
}
@@ -215,24 +215,26 @@ dma_addr_to_sg(const dma_controller_t *dma,
return 1;
}
// Slow path: search through regions.
- cnt = _dma_addr_sg_split(dma, dma_addr, len, sg, max_sg, prot);
+ cnt = _dma_addr_sg_split(dma, dma_addr, len, sgl, max_nr_sgs, prot);
if (likely(cnt > 0)) {
- region_hint = sg->region;
+ region_hint = sgl[0].region;
}
return cnt;
}
static inline int
-dma_map_sg(dma_controller_t *dma, dma_sg_t *sg, struct iovec *iov,
- int cnt)
+dma_sgl_get(dma_controller_t *dma, dma_sg_t *sgl, struct iovec *iov, size_t cnt)
{
dma_memory_region_t *region;
+ dma_sg_t *sg;
assert(dma != NULL);
- assert(sg != NULL);
+ assert(sgl != NULL);
assert(iov != NULL);
assert(cnt > 0);
+ sg = sgl;
+
do {
if (sg->region >= dma->nregions) {
return ERROR_INT(EINVAL);
@@ -257,14 +259,17 @@ dma_map_sg(dma_controller_t *dma, dma_sg_t *sg, struct iovec *iov,
}
static inline void
-dma_mark_sg_dirty(dma_controller_t *dma, dma_sg_t *sg, int cnt)
+dma_sgl_mark_dirty(dma_controller_t *dma, dma_sg_t *sgl, size_t cnt)
{
dma_memory_region_t *region;
+ dma_sg_t *sg;
assert(dma != NULL);
- assert(sg != NULL);
+ assert(sgl != NULL);
assert(cnt > 0);
+ sg = sgl;
+
do {
if (sg->region >= dma->nregions) {
return;
@@ -286,14 +291,17 @@ dma_mark_sg_dirty(dma_controller_t *dma, dma_sg_t *sg, int cnt)
}
static inline void
-dma_unmap_sg(dma_controller_t *dma, dma_sg_t *sg, int cnt)
+dma_sgl_put(dma_controller_t *dma, dma_sg_t *sgl, size_t cnt)
{
dma_memory_region_t *region;
+ dma_sg_t *sg;
assert(dma != NULL);
- assert(sg != NULL);
+ assert(sgl != NULL);
assert(cnt > 0);
+ sg = sgl;
+
do {
if (sg->region >= dma->nregions) {
return;
diff --git a/lib/libvfio-user.c b/lib/libvfio-user.c
index 7d324aa..90c4b39 100644
--- a/lib/libvfio-user.c
+++ b/lib/libvfio-user.c
@@ -2003,8 +2003,8 @@ quiesce_check_allowed(vfu_ctx_t *vfu_ctx)
}
EXPORT int
-vfu_addr_to_sg(vfu_ctx_t *vfu_ctx, vfu_dma_addr_t dma_addr,
- size_t len, dma_sg_t *sg, int max_sg, int prot)
+vfu_addr_to_sgl(vfu_ctx_t *vfu_ctx, vfu_dma_addr_t dma_addr,
+ size_t len, dma_sg_t *sgl, size_t max_nr_sgs, int prot)
{
assert(vfu_ctx != NULL);
@@ -2014,31 +2014,24 @@ vfu_addr_to_sg(vfu_ctx_t *vfu_ctx, vfu_dma_addr_t dma_addr,
quiesce_check_allowed(vfu_ctx);
- return dma_addr_to_sg(vfu_ctx->dma, dma_addr, len, sg, max_sg, prot);
+ return dma_addr_to_sgl(vfu_ctx->dma, dma_addr, len, sgl, max_nr_sgs, prot);
}
EXPORT int
-vfu_map_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, struct iovec *iov, int cnt,
- int flags)
+vfu_sgl_get(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl, struct iovec *iov, size_t cnt,
+ int flags)
{
- int ret;
-
if (unlikely(vfu_ctx->dma_unregister == NULL) || flags != 0) {
return ERROR_INT(EINVAL);
}
quiesce_check_allowed(vfu_ctx);
- ret = dma_map_sg(vfu_ctx->dma, sg, iov, cnt);
- if (ret < 0) {
- return -1;
- }
-
- return 0;
+ return dma_sgl_get(vfu_ctx->dma, sgl, iov, cnt);
}
EXPORT void
-vfu_mark_sg_dirty(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, int cnt)
+vfu_sgl_mark_dirty(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl, size_t cnt)
{
if (unlikely(vfu_ctx->dma_unregister == NULL)) {
return;
@@ -2046,12 +2039,12 @@ vfu_mark_sg_dirty(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, int cnt)
quiesce_check_allowed(vfu_ctx);
- return dma_mark_sg_dirty(vfu_ctx->dma, sg, cnt);
+ return dma_sgl_mark_dirty(vfu_ctx->dma, sgl, cnt);
}
EXPORT void
-vfu_unmap_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg,
- struct iovec *iov UNUSED, int cnt)
+vfu_sgl_put(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl,
+ struct iovec *iov UNUSED, size_t cnt)
{
if (unlikely(vfu_ctx->dma_unregister == NULL)) {
return;
@@ -2059,7 +2052,7 @@ vfu_unmap_sg(vfu_ctx_t *vfu_ctx, dma_sg_t *sg,
quiesce_check_allowed(vfu_ctx);
- return dma_unmap_sg(vfu_ctx->dma, sg, cnt);
+ return dma_sgl_put(vfu_ctx->dma, sgl, cnt);
}
static int
@@ -2156,17 +2149,29 @@ vfu_dma_transfer(vfu_ctx_t *vfu_ctx, enum vfio_user_command cmd,
}
EXPORT int
-vfu_dma_read(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, void *data)
+vfu_sgl_read(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl, size_t cnt, void *data)
{
assert(vfu_ctx->pending.state == VFU_CTX_PENDING_NONE);
- return vfu_dma_transfer(vfu_ctx, VFIO_USER_DMA_READ, sg, data);
+
+ /* Not currently implemented. */
+ if (cnt != 1) {
+ return ERROR_INT(ENOTSUP);
+ }
+
+ return vfu_dma_transfer(vfu_ctx, VFIO_USER_DMA_READ, sgl, data);
}
EXPORT int
-vfu_dma_write(vfu_ctx_t *vfu_ctx, dma_sg_t *sg, void *data)
+vfu_sgl_write(vfu_ctx_t *vfu_ctx, dma_sg_t *sgl, size_t cnt, void *data)
{
assert(vfu_ctx->pending.state == VFU_CTX_PENDING_NONE);
- return vfu_dma_transfer(vfu_ctx, VFIO_USER_DMA_WRITE, sg, data);
+
+ /* Not currently implemented. */
+ if (cnt != 1) {
+ return ERROR_INT(ENOTSUP);
+ }
+
+ return vfu_dma_transfer(vfu_ctx, VFIO_USER_DMA_WRITE, sgl, data);
}
EXPORT bool