aboutsummaryrefslogtreecommitdiff
path: root/lib/dma.h
diff options
context:
space:
mode:
authorJohn Levon <john.levon@nutanix.com>2021-03-31 16:49:52 +0100
committerGitHub <noreply@github.com>2021-03-31 16:49:52 +0100
commit14b237fc80473bf4419a2bccda4cf7e7d382c174 (patch)
tree959e0cc1c4d2557f624df6fa5c189a8cdc92993f /lib/dma.h
parent0ae00cbb9edcc3879b1276cd61479d668a7f1ec9 (diff)
downloadlibvfio-user-14b237fc80473bf4419a2bccda4cf7e7d382c174.zip
libvfio-user-14b237fc80473bf4419a2bccda4cf7e7d382c174.tar.gz
libvfio-user-14b237fc80473bf4419a2bccda4cf7e7d382c174.tar.bz2
rework DMA callbacks (#396)
This fixes a number of issues with how DMA is handled, based on some changes by Thanos Makatos: - rename callbacks to register/unregister, as there is not necessarily any mapping - provide the (large) page-aligned mapped start and size, the page size used, as well as the protection flags: some API users need these - for convenience, provide the virtual address separately that corresponds to the mapped region - we should only require a DMA controller to use vfu_addr_to_sg(), not an unregister callback - the callbacks should return errno not -errno - region removal was incorrectly updating the region array - various other cleanups and clarifications Signed-off-by: John Levon <john.levon@nutanix.com> Reviewed-by: Thanos Makatos <thanos.makatos@nutanix.com>
Diffstat (limited to 'lib/dma.h')
-rw-r--r--lib/dma.h108
1 files changed, 32 insertions, 76 deletions
diff --git a/lib/dma.h b/lib/dma.h
index 0eb7ac3..226e758 100644
--- a/lib/dma.h
+++ b/lib/dma.h
@@ -51,8 +51,6 @@
* list (sglist) of dma_sg_t from DMA addresses. Then the sglist
* can be mapped using dma_map_sg() into the process's virtual address space
* as an iovec for direct access, and unmapped using dma_unmap_sg() when done.
- * - dma_map_addr() and dma_unmap_addr() helper functions are provided
- * for mapping DMA regions that can fit into one scatter-gather entry.
* Every region is mapped into the application's virtual address space
* at registration time with R/W permissions.
* dma_map_sg() ignores all protection bits and only does lookups and
@@ -79,18 +77,14 @@
#include "libvfio-user.h"
#include "common.h"
+#define iov_end(iov) ((iov)->iov_base + (iov)->iov_len)
+
struct vfu_ctx;
typedef struct {
- dma_addr_t dma_addr; // DMA address of this region
- uint32_t prot; // memory protection of the mapping
- // defined in sys/mman.h
-
- size_t size; // Size of this region
+ vfu_dma_info_t info;
int fd; // File descriptor to mmap
- int page_size; // Page size of this fd
off_t offset; // File offset
- void *virt_addr; // Virtual address of this region
int refcnt; // Number of users of this region
char *dirty_bitmap; // Dirty page bitmap
} dma_memory_region_t;
@@ -117,20 +111,20 @@ dma_controller_destroy(dma_controller_t *dma);
* (e.g. due to conflict with existing region).
*/
MOCK_DECLARE(int, dma_controller_add_region, dma_controller_t *dma,
- dma_addr_t dma_addr, size_t size, int fd, off_t offset,
+ vfu_dma_addr_t dma_addr, size_t size, int fd, off_t offset,
uint32_t prot);
-MOCK_DECLARE(void, _dma_controller_do_remove_region, dma_controller_t *dma,
- dma_memory_region_t *region);
-
MOCK_DECLARE(int, dma_controller_remove_region, dma_controller_t *dma,
- dma_addr_t dma_addr, size_t size, vfu_unmap_dma_cb_t *unmap_dma,
- void *data);
+ vfu_dma_addr_t dma_addr, size_t size,
+ vfu_dma_unregister_cb_t *dma_unregister, void *data);
+
+MOCK_DECLARE(void, dma_controller_unmap_region, dma_controller_t *dma,
+ dma_memory_region_t *region);
// Helper for dma_addr_to_sg() slow path.
int
_dma_addr_sg_split(const dma_controller_t *dma,
- dma_addr_t dma_addr, uint32_t len,
+ vfu_dma_addr_t dma_addr, uint32_t len,
dma_sg_t *sg, int max_sg, int prot);
static bool
@@ -142,9 +136,9 @@ _dma_should_mark_dirty(const dma_controller_t *dma, int prot)
}
static size_t
-_get_pgstart(size_t pgsize, uint64_t base_addr, uint64_t offset)
+_get_pgstart(size_t pgsize, void *base_addr, uint64_t offset)
{
- return (offset - base_addr) / pgsize;
+ return (offset - (uint64_t)base_addr) / pgsize;
}
static size_t
@@ -164,7 +158,7 @@ _dma_bitmap_get_pgrange(const dma_controller_t *dma,
assert(start != NULL);
assert(end != NULL);
- *start = _get_pgstart(dma->dirty_pgsize, region->dma_addr, sg->offset);
+ *start = _get_pgstart(dma->dirty_pgsize, region->info.iova.iov_base, sg->offset);
*end = _get_pgend(dma->dirty_pgsize, sg->length, *start);
}
@@ -186,24 +180,24 @@ _dma_mark_dirty(const dma_controller_t *dma, const dma_memory_region_t *region,
}
static inline int
-dma_init_sg(const dma_controller_t *dma, dma_sg_t *sg, dma_addr_t dma_addr,
+dma_init_sg(const dma_controller_t *dma, dma_sg_t *sg, vfu_dma_addr_t dma_addr,
uint32_t len, int prot, int region_index)
{
const dma_memory_region_t *const region = &dma->regions[region_index];
- if ((prot & PROT_WRITE) && !(region->prot & PROT_WRITE)) {
+ if ((prot & PROT_WRITE) && !(region->info.prot & PROT_WRITE)) {
errno = EACCES;
return -1;
}
- sg->dma_addr = region->dma_addr;
+ sg->dma_addr = region->info.iova.iov_base;
sg->region = region_index;
- sg->offset = dma_addr - region->dma_addr;
+ sg->offset = dma_addr - region->info.iova.iov_base;
sg->length = len;
if (_dma_should_mark_dirty(dma, prot)) {
_dma_mark_dirty(dma, region, sg);
}
- sg->mappable = region->virt_addr != NULL;
+ sg->mappable = (region->info.vaddr != NULL);
return 0;
}
@@ -223,18 +217,19 @@ dma_init_sg(const dma_controller_t *dma, dma_sg_t *sg, dma_addr_t dma_addr,
*/
static inline int
dma_addr_to_sg(const dma_controller_t *dma,
- dma_addr_t dma_addr, uint32_t len,
+ vfu_dma_addr_t dma_addr, size_t len,
dma_sg_t *sg, int max_sg, int prot)
{
static __thread int region_hint;
int cnt, ret;
const dma_memory_region_t *const region = &dma->regions[region_hint];
- const dma_addr_t region_end = region->dma_addr + region->size;
+ const void *region_end = iov_end(&region->info.iova);
// Fast path: single region.
if (likely(max_sg > 0 && len > 0 &&
- dma_addr >= region->dma_addr && dma_addr + len <= region_end &&
+ dma_addr >= region->info.iova.iov_base &&
+ dma_addr + len <= region_end &&
region_hint < dma->nregions)) {
ret = dma_init_sg(dma, sg, dma_addr, len, prot, region_hint);
if (ret < 0) {
@@ -251,12 +246,6 @@ dma_addr_to_sg(const dma_controller_t *dma,
return cnt;
}
-MOCK_DECLARE(void *, dma_map_region, dma_memory_region_t *region, int prot,
- size_t offset, size_t len);
-
-int
-dma_unmap_region(dma_memory_region_t *region, void *virt_addr, size_t len);
-
static inline int
dma_map_sg(dma_controller_t *dma, const dma_sg_t *sg, struct iovec *iov,
int cnt)
@@ -273,12 +262,15 @@ dma_map_sg(dma_controller_t *dma, const dma_sg_t *sg, struct iovec *iov,
return -EINVAL;
}
region = &dma->regions[sg[i].region];
- if (region->virt_addr == NULL) {
+
+ if (region->info.vaddr == NULL) {
return -EFAULT;
}
+
vfu_log(dma->vfu_ctx, LOG_DEBUG, "map %#lx-%#lx\n",
- sg->dma_addr + sg->offset, sg->dma_addr + sg->offset + sg->length);
- iov[i].iov_base = region->virt_addr + sg[i].offset;
+ sg->dma_addr + sg->offset,
+ sg->dma_addr + sg->offset + sg->length);
+ iov[i].iov_base = region->info.vaddr + sg[i].offset;
iov[i].iov_len = sg[i].length;
region->refcnt++;
}
@@ -299,7 +291,8 @@ dma_unmap_sg(dma_controller_t *dma, const dma_sg_t *sg,
* tfind(3)
*/
for (r = dma->regions;
- r < dma->regions + dma->nregions && r->dma_addr != sg[i].dma_addr;
+ r < dma->regions + dma->nregions &&
+ r->info.iova.iov_base != sg[i].dma_addr;
r++);
if (r > dma->regions + dma->nregions) {
/* bad region */
@@ -312,39 +305,6 @@ dma_unmap_sg(dma_controller_t *dma, const dma_sg_t *sg,
return;
}
-static inline void *
-dma_map_addr(dma_controller_t *dma, dma_addr_t dma_addr, uint32_t len, int prot)
-{
- dma_sg_t sg;
- struct iovec iov;
-
- if (dma_addr_to_sg(dma, dma_addr, len, &sg, 1, prot) == 1 &&
- dma_map_sg(dma, &sg, &iov, 1) == 0) {
- return iov.iov_base;
- }
-
- return NULL;
-}
-
-static inline void
-dma_unmap_addr(dma_controller_t *dma,
- dma_addr_t dma_addr, uint32_t len, void *addr)
-{
- dma_sg_t sg;
- struct iovec iov = {
- .iov_base = addr,
- .iov_len = len,
- };
- int r;
-
- r = dma_addr_to_sg(dma, dma_addr, len, &sg, 1, PROT_NONE);
- if (r != 1) {
- assert(false);
- }
-
- dma_unmap_sg(dma, &sg, &iov, 1);
-}
-
int
dma_controller_dirty_page_logging_start(dma_controller_t *dma, size_t pgsize);
@@ -352,12 +312,8 @@ int
dma_controller_dirty_page_logging_stop(dma_controller_t *dma);
int
-dma_controller_dirty_page_get(dma_controller_t *dma, dma_addr_t addr, int len,
- size_t pgsize, size_t size, char **data);
-
-bool
-dma_controller_region_valid(dma_controller_t *dma, dma_addr_t dma_addr,
- size_t size);
+dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
+ int len, size_t pgsize, size_t size, char **data);
#endif /* LIB_VFIO_USER_DMA_H */