diff options
-rw-r--r-- | README.md | 43 | ||||
-rw-r--r-- | include/vfio-user.h | 26 | ||||
-rw-r--r-- | lib/dma.c | 6 | ||||
-rw-r--r-- | lib/libvfio-user.c | 63 | ||||
-rw-r--r-- | lib/migration.c | 170 | ||||
-rw-r--r-- | lib/migration.h | 7 | ||||
-rw-r--r-- | lib/migration_priv.h | 20 | ||||
-rw-r--r-- | lib/private.h | 7 | ||||
-rw-r--r-- | samples/client.c | 26 | ||||
-rw-r--r-- | test/mocks.c | 136 | ||||
-rw-r--r-- | test/mocks.h | 5 | ||||
-rw-r--r-- | test/unit-tests.c | 21 |
12 files changed, 416 insertions, 114 deletions
@@ -194,6 +194,49 @@ the emulated GPIO device's pins: cat /sys/class/gpio/gpiochip480/base > /sys/class/gpio/export for ((i=0;i<12;i++)); do cat /sys/class/gpio/OUT0/value; done +libvirt +------- + +1. Add `xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'` to the `domain` + element. + +2. Enable sharing of the guest's RAM: + + <memoryBacking> + <source type='file'/> + <access mode='shared'/> + </memoryBacking> + +3. Pass the vfio-user device: + + <qemu:commandline> + <qemu:arg value='-device'/> + <qemu:arg value='vfio-user-pci,socket=/var/run/vfio-user.sock,x-enable-migration=on'/> + </qemu:commandline> + + +nvmf/vfio-user +-------------- + +[SPDK v21.01](https://github.com/spdk/spdk/releases/tag/v21.01) added +experimental support for a virtual NVMe controller. The controller can be +used with the same command line as the one used for GPIO. + +To use the nvmf/vfio-user target with a libvirt quest, the guest RAM must be +backed by hugepages: + + <memoryBacking> + <hugepages> + <page size='2048' unit='KiB'/> + </hugepages> + <source type='memfd'/> + <access mode='shared'/> + </memoryBacking> + +Becasue SPDK must be run as root, either fix the vfio-user socket permissions +or configure libvirt to run QEMU as root. + + Mailing List & Chat =================== diff --git a/include/vfio-user.h b/include/vfio-user.h index 9d820c9..d90db1d 100644 --- a/include/vfio-user.h +++ b/include/vfio-user.h @@ -133,6 +133,20 @@ struct vfio_user_irq_info { uint32_t subindex; } __attribute__((packed)); +/* based on struct vfio_bitmap */ +struct vfio_user_bitmap { + uint64_t pgsize; + uint64_t size; + char data[]; +} __attribute__((packed)); + +/* based on struct vfio_iommu_type1_dirty_bitmap_get */ +struct vfio_user_bitmap_range { + uint64_t iova; + uint64_t size; + struct vfio_user_bitmap bitmap; +} __attribute__((packed)); + #if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0) /* copied from <linux/vfio.h> */ @@ -171,12 +185,6 @@ struct vfio_device_migration_info { }; #endif /* not a RHEL kernel */ -struct vfio_bitmap { - __u64 pgsize; /* page size for bitmap in bytes */ - __u64 size; /* in bytes */ - __u64 *data; /* one bit per page */ -}; - struct vfio_iommu_type1_dirty_bitmap { __u32 argsz; __u32 flags; @@ -186,12 +194,6 @@ struct vfio_iommu_type1_dirty_bitmap { __u8 data[]; }; -struct vfio_iommu_type1_dirty_bitmap_get { - __u64 iova; /* IO virtual address */ - __u64 size; /* Size of iova range */ - struct vfio_bitmap bitmap; -}; - #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0) */ #ifdef __cplusplus @@ -423,7 +423,7 @@ get_bitmap_size(size_t region_size, size_t pgsize) return ERROR_INT(EINVAL); } size_t nr_pages = (region_size / pgsize) + (region_size % pgsize != 0); - return (nr_pages / CHAR_BIT) + (nr_pages % CHAR_BIT != 0); + return ROUND_UP(nr_pages, sizeof(uint64_t) * CHAR_BIT) / CHAR_BIT; } int dma_controller_dirty_page_logging_start(dma_controller_t *dma, size_t pgsize) @@ -511,11 +511,13 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr, } if (pgsize != dma->dirty_pgsize) { + vfu_log(dma->vfu_ctx, LOG_ERR, "bad page size %ld", pgsize); return ERROR_INT(EINVAL); } bitmap_size = get_bitmap_size(len, pgsize); if (bitmap_size < 0) { + vfu_log(dma->vfu_ctx, LOG_ERR, "failed to get bitmap size"); return bitmap_size; } @@ -524,6 +526,8 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr, * expects to receive. */ if (size != (size_t)bitmap_size) { + vfu_log(dma->vfu_ctx, LOG_ERR, "bad bitmap size %ld != %ld", size, + bitmap_size); return ERROR_INT(EINVAL); } diff --git a/lib/libvfio-user.c b/lib/libvfio-user.c index 7518a46..c4f6c42 100644 --- a/lib/libvfio-user.c +++ b/lib/libvfio-user.c @@ -543,6 +543,11 @@ handle_dma_map_or_unmap(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg) ret = 0; } else { + + if (region->flags != 0) { + vfu_log(vfu_ctx, LOG_ERR, "bad flags=%#x", region->flags); + return ERROR_INT(ENOTSUP); + } ret = dma_controller_remove_region(vfu_ctx->dma, (void *)region->addr, region->size, @@ -560,20 +565,34 @@ handle_dma_map_or_unmap(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg) } static int -handle_device_reset(vfu_ctx_t *vfu_ctx) +do_device_reset(vfu_ctx_t *vfu_ctx, vfu_reset_type_t reason) { - vfu_log(vfu_ctx, LOG_DEBUG, "Device reset called by client"); + int ret; + if (vfu_ctx->reset != NULL) { - return vfu_ctx->reset(vfu_ctx, VFU_RESET_DEVICE); + ret = vfu_ctx->reset(vfu_ctx, reason); + if (ret < 0) { + return ret; + } + } + if (vfu_ctx->migration != NULL) { + return handle_device_state(vfu_ctx, vfu_ctx->migration, + VFIO_DEVICE_STATE_RUNNING, false); } return 0; } -static int -handle_dirty_pages_get(vfu_ctx_t *vfu_ctx, - struct iovec **iovecs, size_t *nr_iovecs, - struct vfio_iommu_type1_dirty_bitmap_get *ranges, - uint32_t size) +int +handle_device_reset(vfu_ctx_t *vfu_ctx, vfu_reset_type_t reason) +{ + return do_device_reset(vfu_ctx, reason); +} + +int +MOCK_DEFINE(handle_dirty_pages_get)(vfu_ctx_t *vfu_ctx, + struct iovec **iovecs, size_t *nr_iovecs, + struct vfio_user_bitmap_range *ranges, + uint32_t size) { int ret = EINVAL; size_t i; @@ -583,23 +602,25 @@ handle_dirty_pages_get(vfu_ctx_t *vfu_ctx, assert(nr_iovecs != NULL); assert(ranges != NULL); - if (size % sizeof(struct vfio_iommu_type1_dirty_bitmap_get) != 0) { + if (size % sizeof(struct vfio_user_bitmap_range) != 0) { return ERROR_INT(EINVAL); } - *nr_iovecs = size / sizeof(struct vfio_iommu_type1_dirty_bitmap_get); + *nr_iovecs = size / sizeof(struct vfio_user_bitmap_range); *iovecs = malloc(*nr_iovecs * sizeof(struct iovec)); if (*iovecs == NULL) { return -1; } for (i = 0; i < *nr_iovecs; i++) { - struct vfio_iommu_type1_dirty_bitmap_get *r = &ranges[i]; + struct vfio_user_bitmap_range *r = &ranges[i]; ret = dma_controller_dirty_page_get(vfu_ctx->dma, (vfu_dma_addr_t)r->iova, r->size, r->bitmap.pgsize, r->bitmap.size, (char **)&((*iovecs)[i].iov_base)); if (ret != 0) { ret = errno; + vfu_log(vfu_ctx, LOG_WARNING, + "failed to get dirty bitmap from DMA controller: %m"); goto out; } (*iovecs)[i].iov_len = r->bitmap.size; @@ -644,7 +665,7 @@ MOCK_DEFINE(handle_dirty_pages)(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg) break; case VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP: { - struct vfio_iommu_type1_dirty_bitmap_get *get; + struct vfio_user_bitmap_range *get; get = (void *)(dirty_bitmap + 1); ret = handle_dirty_pages_get(vfu_ctx, &msg->out_iovecs, @@ -829,11 +850,12 @@ MOCK_DEFINE(should_exec_command)(vfu_ctx_t *vfu_ctx, uint16_t cmd) "bad command %d while device in stop-and-copy state", cmd); return false; } - } else if (device_is_stopped(vfu_ctx->migration) && - cmd != VFIO_USER_DIRTY_PAGES) { - vfu_log(vfu_ctx, LOG_ERR, - "bad command %d while device in stopped state", cmd); - return false; + } else if (device_is_stopped(vfu_ctx->migration)) { + if (!cmd_allowed_when_stopped_and_copying(cmd)) { + vfu_log(vfu_ctx, LOG_ERR, + "bad command %d while device in stopped state", cmd); + return false; + } } return true; } @@ -873,7 +895,8 @@ MOCK_DEFINE(exec_command)(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg) break; case VFIO_USER_DEVICE_RESET: - ret = handle_device_reset(vfu_ctx); + vfu_log(vfu_ctx, LOG_INFO, "device reset by client"); + ret = handle_device_reset(vfu_ctx, VFU_RESET_DEVICE); break; case VFIO_USER_DIRTY_PAGES: @@ -1091,9 +1114,7 @@ vfu_reset_ctx(vfu_ctx_t *vfu_ctx, const char *reason) vfu_ctx); } - if (vfu_ctx->reset != NULL) { - vfu_ctx->reset(vfu_ctx, VFU_RESET_LOST_CONN); - } + do_device_reset(vfu_ctx, VFU_RESET_LOST_CONN); if (vfu_ctx->irqs != NULL) { irqs_reset(vfu_ctx); diff --git a/lib/migration.c b/lib/migration.c index 7294403..8f0706a 100644 --- a/lib/migration.c +++ b/lib/migration.c @@ -40,7 +40,7 @@ #include "migration_priv.h" bool -vfio_migr_state_transition_is_valid(uint32_t from, uint32_t to) +MOCK_DEFINE(vfio_migr_state_transition_is_valid)(uint32_t from, uint32_t to) { return migr_states[from].state & (1 << to); } @@ -98,48 +98,23 @@ init_migration(const vfu_migration_callbacks_t * callbacks, return migr; } -static void -migr_state_transition(struct migration *migr, enum migr_iter_state state) +void +MOCK_DEFINE(migr_state_transition)(struct migration *migr, + enum migr_iter_state state) { assert(migr != NULL); /* FIXME validate that state transition */ migr->iter.state = state; } -static ssize_t -handle_device_state(vfu_ctx_t *vfu_ctx, struct migration *migr, - uint32_t *device_state, bool is_write) { - - int ret; - - assert(migr != NULL); - assert(device_state != NULL); - - if (!is_write) { - *device_state = migr->info.device_state; - return 0; - } - - if (*device_state & ~VFIO_DEVICE_STATE_MASK) { - vfu_log(vfu_ctx, LOG_ERR, "bad device state %#x", *device_state); - return ERROR_INT(EINVAL); - } - - if (!vfio_migr_state_transition_is_valid(migr->info.device_state, - *device_state)) { - vfu_log(vfu_ctx, LOG_ERR, "bad transition from state %s to state %s", - migr_states[migr->info.device_state].name, - migr_states[*device_state].name); - return ERROR_INT(EINVAL); - } - - switch (*device_state) { +vfu_migr_state_t +MOCK_DEFINE(migr_state_vfio_to_vfu)(uint32_t device_state) +{ + switch (device_state) { case VFIO_DEVICE_STATE_STOP: - ret = migr->callbacks.transition(vfu_ctx, VFU_MIGR_STATE_STOP); - break; + return VFU_MIGR_STATE_STOP; case VFIO_DEVICE_STATE_RUNNING: - ret = migr->callbacks.transition(vfu_ctx, VFU_MIGR_STATE_RUNNING); - break; + return VFU_MIGR_STATE_RUNNING; case VFIO_DEVICE_STATE_SAVING: /* * FIXME How should the device operate during the stop-and-copy @@ -147,30 +122,69 @@ handle_device_state(vfu_ctx_t *vfu_ctx, struct migration *migr, * the migration region? E.g. Access to any other region should be * failed? This might be a good question to send to LKML. */ - ret = migr->callbacks.transition(vfu_ctx, - VFU_MIGR_STATE_STOP_AND_COPY); - break; + return VFU_MIGR_STATE_STOP_AND_COPY; case VFIO_DEVICE_STATE_RUNNING | VFIO_DEVICE_STATE_SAVING: - ret = migr->callbacks.transition(vfu_ctx, VFU_MIGR_STATE_PRE_COPY); - break; + return VFU_MIGR_STATE_PRE_COPY; case VFIO_DEVICE_STATE_RESUMING: - ret = migr->callbacks.transition(vfu_ctx, VFU_MIGR_STATE_RESUME); - break; - default: - assert(false); + return VFU_MIGR_STATE_RESUME; } + return -1; +} - if (ret == 0) { - migr->info.device_state = *device_state; - migr_state_transition(migr, VFIO_USER_MIGR_ITER_STATE_INITIAL); - } else if (ret < 0) { - vfu_log(vfu_ctx, LOG_ERR, "failed to transition to state %d: %m", - *device_state); +/** + * Returns 0 on success, -1 on error setting errno. + */ +int +MOCK_DEFINE(state_trans_notify)(vfu_ctx_t *vfu_ctx, + int (*fn)(vfu_ctx_t *, vfu_migr_state_t), + uint32_t vfio_device_state) +{ + /* + * We've already checked that device_state is valid by calling + * vfio_migr_state_transition_is_valid. + */ + return fn(vfu_ctx, migr_state_vfio_to_vfu(vfio_device_state)); +} + +/** + * Returns 0 on success, -1 on failure setting errno. + */ +ssize_t +MOCK_DEFINE(migr_trans_to_valid_state)(vfu_ctx_t *vfu_ctx, struct migration *migr, + uint32_t device_state, bool notify) +{ + if (notify) { + int ret = state_trans_notify(vfu_ctx, migr->callbacks.transition, + device_state); + if (ret != 0) { + return ret; + } } + migr->info.device_state = device_state; + migr_state_transition(migr, VFIO_USER_MIGR_ITER_STATE_INITIAL); + return 0; +} - return ret; +/** + * Returns 0 on success, -1 on failure setting errno. + */ +ssize_t +MOCK_DEFINE(handle_device_state)(vfu_ctx_t *vfu_ctx, struct migration *migr, + uint32_t device_state, bool notify) +{ + + assert(migr != NULL); + + if (!vfio_migr_state_transition_is_valid(migr->info.device_state, + device_state)) { + return ERROR_INT(EINVAL); + } + return migr_trans_to_valid_state(vfu_ctx, migr, device_state, notify); } +/** + * Returns 0 on success, -1 on error setting errno. + */ static ssize_t handle_pending_bytes(vfu_ctx_t *vfu_ctx, struct migration *migr, uint64_t *pending_bytes, bool is_write) @@ -223,6 +237,9 @@ handle_pending_bytes(vfu_ctx_t *vfu_ctx, struct migration *migr, * Make this behavior conditional. */ +/** + * Returns 0 on success, -1 on error setting errno. + */ static ssize_t handle_data_offset_when_saving(vfu_ctx_t *vfu_ctx, struct migration *migr, bool is_write) @@ -240,7 +257,7 @@ handle_data_offset_when_saving(vfu_ctx_t *vfu_ctx, struct migration *migr, case VFIO_USER_MIGR_ITER_STATE_STARTED: ret = migr->callbacks.prepare_data(vfu_ctx, &migr->iter.offset, &migr->iter.size); - if (ret < 0) { + if (ret != 0) { return ret; } /* @@ -266,6 +283,9 @@ handle_data_offset_when_saving(vfu_ctx_t *vfu_ctx, struct migration *migr, return 0; } +/** + * Returns 0 on success, -1 on error setting errno. + */ static ssize_t handle_data_offset(vfu_ctx_t *vfu_ctx, struct migration *migr, uint64_t *offset, bool is_write) @@ -290,7 +310,7 @@ handle_data_offset(vfu_ctx_t *vfu_ctx, struct migration *migr, return ERROR_INT(EINVAL); } ret = migr->callbacks.prepare_data(vfu_ctx, offset, NULL); - if (ret < 0) { + if (ret != 0) { return ret; } *offset += migr->data_offset; @@ -303,6 +323,9 @@ handle_data_offset(vfu_ctx_t *vfu_ctx, struct migration *migr, return ERROR_INT(EINVAL); } +/** + * Returns 0 on success, -1 on failure setting errno. + */ static ssize_t handle_data_size_when_saving(vfu_ctx_t *vfu_ctx, struct migration *migr, bool is_write) @@ -324,6 +347,9 @@ handle_data_size_when_saving(vfu_ctx_t *vfu_ctx, struct migration *migr, return 0; } +/** + * Returns 0 on success, -1 on error setting errno. + */ static ssize_t handle_data_size_when_resuming(vfu_ctx_t *vfu_ctx, struct migration *migr, uint64_t size, bool is_write) @@ -331,11 +357,14 @@ handle_data_size_when_resuming(vfu_ctx_t *vfu_ctx, struct migration *migr, assert(migr != NULL); if (is_write) { - return migr->callbacks.data_written(vfu_ctx, size); + return migr->callbacks.data_written(vfu_ctx, size); } return 0; } +/** + * Returns 0 on success, -1 on failure setting errno. + */ static ssize_t handle_data_size(vfu_ctx_t *vfu_ctx, struct migration *migr, uint64_t *size, bool is_write) @@ -361,12 +390,17 @@ handle_data_size(vfu_ctx_t *vfu_ctx, struct migration *migr, return ERROR_INT(EINVAL); } -static ssize_t -migration_region_access_registers(vfu_ctx_t *vfu_ctx, char *buf, size_t count, - loff_t pos, bool is_write) +/** + * Returns 0 on success, -1 on failure setting errno. + */ +ssize_t +MOCK_DEFINE(migration_region_access_registers)(vfu_ctx_t *vfu_ctx, char *buf, + size_t count, loff_t pos, + bool is_write) { struct migration *migr = vfu_ctx->migration; int ret; + uint32_t *device_state, old_device_state; assert(migr != NULL); @@ -377,7 +411,24 @@ migration_region_access_registers(vfu_ctx_t *vfu_ctx, char *buf, size_t count, "bad device_state access size %ld", count); return ERROR_INT(EINVAL); } - ret = handle_device_state(vfu_ctx, migr, (uint32_t *)buf, is_write); + device_state = (uint32_t *)buf; + if (!is_write) { + *device_state = migr->info.device_state; + return 0; + } + old_device_state = migr->info.device_state; + ret = handle_device_state(vfu_ctx, migr, *device_state , true); + if (ret == 0) { + vfu_log(vfu_ctx, LOG_DEBUG, + "migration: transition from state %s to state %s", + migr_states[old_device_state].name, + migr_states[*device_state].name); + } else { + vfu_log(vfu_ctx, LOG_ERR, + "migration: failed to transition from state %s to state %s", + migr_states[old_device_state].name, + migr_states[*device_state].name); + } break; case offsetof(struct vfio_device_migration_info, pending_bytes): if (count != sizeof(migr->info.pending_bytes)) { @@ -430,6 +481,9 @@ migration_region_access(vfu_ctx_t *vfu_ctx, char *buf, size_t count, if (pos + count <= sizeof(struct vfio_device_migration_info)) { ret = migration_region_access_registers(vfu_ctx, buf, count, pos, is_write); + if (ret != 0) { + return ret; + } } else { if (pos < (loff_t)migr->data_offset) { diff --git a/lib/migration.h b/lib/migration.h index aeaca66..ccb98aa 100644 --- a/lib/migration.h +++ b/lib/migration.h @@ -65,8 +65,11 @@ migration_get_pgsize(struct migration *migr); int migration_set_pgsize(struct migration *migr, size_t pgsize); -bool -vfio_migr_state_transition_is_valid(uint32_t from, uint32_t to); +MOCK_DECLARE(bool, vfio_migr_state_transition_is_valid, uint32_t from, + uint32_t to); + +MOCK_DECLARE(ssize_t, handle_device_state, vfu_ctx_t *vfu_ctx, + struct migration *migr, uint32_t device_state, bool notify); #endif /* LIB_VFIO_USER_MIGRATION_H */ diff --git a/lib/migration_priv.h b/lib/migration_priv.h index bf47621..1a0496f 100644 --- a/lib/migration_priv.h +++ b/lib/migration_priv.h @@ -75,7 +75,9 @@ struct migr_state_data { /* valid migration state transitions */ static const struct migr_state_data migr_states[(VFIO_DEVICE_STATE_MASK + 1)] = { [VFIO_DEVICE_STATE_STOP] = { - .state = 1 << VFIO_DEVICE_STATE_STOP, + .state = + (1 << VFIO_DEVICE_STATE_STOP) | + (1 << VFIO_DEVICE_STATE_RUNNING), .name = "stopped" }, [VFIO_DEVICE_STATE_RUNNING] = { @@ -91,6 +93,7 @@ static const struct migr_state_data migr_states[(VFIO_DEVICE_STATE_MASK + 1)] = [VFIO_DEVICE_STATE_SAVING] = { .state = (1 << VFIO_DEVICE_STATE_STOP) | + (1 << VFIO_DEVICE_STATE_RUNNING) | (1 << VFIO_DEVICE_STATE_SAVING) | (1 << VFIO_DEVICE_STATE_ERROR), .name = "stop-and-copy" @@ -112,6 +115,21 @@ static const struct migr_state_data migr_states[(VFIO_DEVICE_STATE_MASK + 1)] = } }; +MOCK_DECLARE(ssize_t, migration_region_access_registers, vfu_ctx_t *vfu_ctx, + char *buf, size_t count, loff_t pos, bool is_write); + +MOCK_DECLARE(void, migr_state_transition, struct migration *migr, + enum migr_iter_state state); + +MOCK_DECLARE(vfu_migr_state_t, migr_state_vfio_to_vfu, uint32_t device_state); + +MOCK_DECLARE(int, state_trans_notify, vfu_ctx_t *vfu_ctx, + int (*fn)(vfu_ctx_t *, vfu_migr_state_t), + uint32_t vfio_device_state); + +MOCK_DECLARE(ssize_t, migr_trans_to_valid_state, vfu_ctx_t *vfu_ctx, + struct migration *migr, uint32_t device_state, bool notify); + #endif /* ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab: */ diff --git a/lib/private.h b/lib/private.h index 97b2469..2d84fb8 100644 --- a/lib/private.h +++ b/lib/private.h @@ -192,6 +192,13 @@ MOCK_DECLARE(int, exec_command, vfu_ctx_t *vfu_ctx, vfu_msg_t *msg); MOCK_DECLARE(int, process_request, vfu_ctx_t *vfu_ctx); +MOCK_DECLARE(int, handle_dirty_pages_get, vfu_ctx_t *vfu_ctx, + struct iovec **iovecs, size_t *nr_iovecs, + struct vfio_user_bitmap_range *ranges, uint32_t size); + +int +handle_device_reset(vfu_ctx_t *vfu_ctx, vfu_reset_type_t reason); + #endif /* LIB_VFIO_USER_PRIVATE_H */ /* ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab: */ diff --git a/samples/client.c b/samples/client.c index 6c2edd4..420269c 100644 --- a/samples/client.c +++ b/samples/client.c @@ -642,7 +642,7 @@ get_dirty_bitmaps(int sock, struct vfio_user_dma_region *dma_regions, UNUSED int nr_dma_regions) { struct vfio_iommu_type1_dirty_bitmap dirty_bitmap = { 0 }; - struct vfio_iommu_type1_dirty_bitmap_get bitmaps[2] = { { 0 }, }; + struct vfio_user_bitmap_range bitmaps[2] = { { 0 }, }; int ret; size_t i; struct iovec iovecs[4] = { @@ -652,7 +652,7 @@ get_dirty_bitmaps(int sock, struct vfio_user_dma_region *dma_regions, } }; struct vfio_user_header hdr = {0}; - char data[ARRAY_SIZE(bitmaps)]; + uint64_t data[ARRAY_SIZE(bitmaps)]; assert(dma_regions != NULL); //FIXME: Is below assert correct? @@ -661,28 +661,28 @@ get_dirty_bitmaps(int sock, struct vfio_user_dma_region *dma_regions, for (i = 0; i < ARRAY_SIZE(bitmaps); i++) { bitmaps[i].iova = dma_regions[i].addr; bitmaps[i].size = dma_regions[i].size; - bitmaps[i].bitmap.size = 1; /* FIXME calculate based on page and IOVA size, don't hardcode */ + bitmaps[i].bitmap.size = sizeof(uint64_t); /* FIXME calculate based on page and IOVA size, don't hardcode */ bitmaps[i].bitmap.pgsize = sysconf(_SC_PAGESIZE); iovecs[(i + 2)].iov_base = &bitmaps[i]; /* FIXME the +2 is because iovecs[0] is the vfio_user_header and iovecs[1] is vfio_iommu_type1_dirty_bitmap */ - iovecs[(i + 2)].iov_len = sizeof(struct vfio_iommu_type1_dirty_bitmap_get); + iovecs[(i + 2)].iov_len = sizeof(struct vfio_user_bitmap_range); } /* * FIXME there should be at least two IOVAs. Send single message for two * IOVAs and ensure only one bit is set in first IOVA. */ - dirty_bitmap.argsz = sizeof(dirty_bitmap) + ARRAY_SIZE(bitmaps) * sizeof(struct vfio_iommu_type1_dirty_bitmap_get); + dirty_bitmap.argsz = sizeof(dirty_bitmap) + ARRAY_SIZE(bitmaps) * sizeof(struct vfio_user_bitmap_range); dirty_bitmap.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; ret = tran_sock_msg_iovec(sock, 0, VFIO_USER_DIRTY_PAGES, iovecs, ARRAY_SIZE(iovecs), NULL, 0, - &hdr, data, ARRAY_SIZE(data), NULL, 0); + &hdr, data, ARRAY_SIZE(data) * sizeof(uint64_t), NULL, 0); if (ret != 0) { err(EXIT_FAILURE, "failed to start dirty page logging"); } for (i = 0; i < ARRAY_SIZE(bitmaps); i++) { - printf("client: %s: %#llx-%#llx\t%hhu\n", __func__, bitmaps[i].iova, + printf("client: %s: %#lx-%#lx\t%#lx\n", __func__, bitmaps[i].iova, bitmaps[i].iova + bitmaps[i].size - 1, data[i]); } } @@ -1212,9 +1212,15 @@ int main(int argc, char *argv[]) * * unmap the first group of the DMA regions */ - ret = tran_sock_msg(sock, 7, VFIO_USER_DMA_UNMAP, - dma_regions, sizeof(*dma_regions) * server_max_fds, - NULL, NULL, 0); + { + struct vfio_user_dma_region r[server_max_fds]; + memcpy(r, dma_regions, sizeof(r)); + for (i = 0; i < (int)ARRAY_SIZE(r); i++) { + r[i].flags = 0; + } + ret = tran_sock_msg(sock, 7, VFIO_USER_DMA_UNMAP, r, sizeof(r), + NULL, NULL, 0); + } if (ret < 0) { err(EXIT_FAILURE, "failed to unmap DMA regions"); } diff --git a/test/mocks.c b/test/mocks.c index be9767f..5a2b55f 100644 --- a/test/mocks.c +++ b/test/mocks.c @@ -44,6 +44,7 @@ #include "mocks.h" #include "private.h" #include "tran_sock.h" +#include "migration_priv.h" struct function { @@ -67,6 +68,14 @@ static struct function funcs[] = { { .name = "process_request" }, { .name = "should_exec_command" }, { .name = "tran_sock_send_iovec" }, + { .name = "migration_region_access_registers" }, + { .name = "handle_dirty_pages_get" }, + { .name = "handle_device_state" }, + {. name = "vfio_migr_state_transition_is_valid" }, + { .name = "state_trans_notify" }, + { .name = "migr_trans_to_valid_state" }, + { .name = "migr_state_vfio_to_vfu" }, + { .name = "migr_state_transition" }, /* system libs */ { .name = "bind" }, { .name = "close" }, @@ -258,6 +267,116 @@ handle_dirty_pages(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg) return mock(); } +ssize_t +migration_region_access_registers(vfu_ctx_t *vfu_ctx, char *buf, size_t count, + loff_t pos, bool is_write) +{ + if (!is_patched("migration_region_access_registers")) { + return __real_migration_region_access_registers(vfu_ctx, buf, count, + pos, is_write); + } + check_expected(vfu_ctx); + check_expected(buf); + check_expected(count); + check_expected(pos); + check_expected(is_write); + errno = mock(); + return mock(); +} + +int +handle_dirty_pages_get(vfu_ctx_t *vfu_ctx, + struct iovec **iovecs, size_t *nr_iovecs, + struct vfio_user_bitmap_range *ranges, uint32_t size) +{ + if (!is_patched("handle_dirty_pages_get")) { + return __real_handle_dirty_pages_get(vfu_ctx, iovecs, nr_iovecs, + ranges, size); + } + check_expected(vfu_ctx); + check_expected(iovecs); + check_expected(nr_iovecs); + check_expected(ranges); + check_expected(size); + return mock(); +} + +ssize_t +handle_device_state(vfu_ctx_t *vfu_ctx, struct migration *migr, + uint32_t device_state, bool notify) { + + if (!is_patched("handle_device_state")) { + return __real_handle_device_state(vfu_ctx, migr, device_state, + notify); + } + check_expected(vfu_ctx); + check_expected(migr); + check_expected(device_state); + check_expected(notify); + return mock(); +} + +void +migr_state_transition(struct migration *migr, enum migr_iter_state state) +{ + if (!is_patched("migr_state_transition")) { + __real_migr_state_transition(migr, state); + return; + } + check_expected(migr); + check_expected(state); +} + +bool +vfio_migr_state_transition_is_valid(uint32_t from, uint32_t to) +{ + if (!is_patched("vfio_migr_state_transition_is_valid")) { + return __real_vfio_migr_state_transition_is_valid(from, to); + } + check_expected(from); + check_expected(to); + return mock(); +} + +int +state_trans_notify(vfu_ctx_t *vfu_ctx, int (*fn)(vfu_ctx_t*, vfu_migr_state_t), + uint32_t vfio_device_state) +{ + if (!is_patched("state_trans_notify")) { + return __real_state_trans_notify(vfu_ctx, fn, vfio_device_state); + } + check_expected(vfu_ctx); + check_expected(fn); + check_expected(vfio_device_state); + errno = mock(); + return mock(); +} + +ssize_t +migr_trans_to_valid_state(vfu_ctx_t *vfu_ctx, struct migration *migr, + uint32_t device_state, bool notify) +{ + if (!is_patched("migr_trans_to_valid_state")) { + return __real_migr_trans_to_valid_state(vfu_ctx, migr, device_state, + notify); + } + check_expected(vfu_ctx); + check_expected(migr); + check_expected(device_state); + check_expected(notify); + return mock(); +} + +vfu_migr_state_t +migr_state_vfio_to_vfu(uint32_t vfio_device_state) +{ + if (!is_patched("migr_state_vfio_to_vfu")) { + return __real_migr_state_vfio_to_vfu(vfio_device_state); + } + check_expected(vfio_device_state); + return mock(); +} + /* Always mocked. */ void mock_dma_register(vfu_ctx_t *vfu_ctx, vfu_dma_info_t *info) @@ -274,6 +393,23 @@ mock_dma_unregister(vfu_ctx_t *vfu_ctx, vfu_dma_info_t *info) return mock(); } +int +mock_reset_cb(vfu_ctx_t *vfu_ctx, vfu_reset_type_t type) +{ + check_expected(vfu_ctx); + check_expected(type); + return mock(); +} + + +int +mock_notify_migr_state_trans_cb(vfu_ctx_t *vfu_ctx, vfu_migr_state_t vfu_state) +{ + check_expected(vfu_ctx); + check_expected(vfu_state); + return mock(); +} + /* System-provided funcs. */ int diff --git a/test/mocks.h b/test/mocks.h index 496cc54..7547956 100644 --- a/test/mocks.h +++ b/test/mocks.h @@ -38,4 +38,9 @@ void mock_dma_register(vfu_ctx_t *vfu_ctx, vfu_dma_info_t *info); int mock_dma_unregister(vfu_ctx_t *vfu_ctx, vfu_dma_info_t *info); +int mock_reset_cb(vfu_ctx_t *vfu_ctx, vfu_reset_type_t type); + +int mock_notify_migr_state_trans_cb(vfu_ctx_t *vfu_ctx, + vfu_migr_state_t vfu_state); + /* ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab: */ diff --git a/test/unit-tests.c b/test/unit-tests.c index 029e8d2..cac4445 100644 --- a/test/unit-tests.c +++ b/test/unit-tests.c @@ -845,9 +845,10 @@ test_migration_state_transitions(void **state UNUSED) bool (*f)(uint32_t, uint32_t) = vfio_migr_state_transition_is_valid; uint32_t i, j; - /* from stopped (000b): all transitions are invalid */ + /* from stopped (000b): all transitions are invalid except to running */ assert_true(f(0, 0)); - for (i = 1; i < 8; i++) { + assert_true(f(0, 1)); + for (i = 2; i < 8; i++) { assert_false(f(0, i)); } @@ -863,7 +864,7 @@ test_migration_state_transitions(void **state UNUSED) /* from stop-and-copy (010b) */ assert_true(f(2, 0)); - assert_false(f(2, 1)); + assert_true(f(2, 1)); assert_true(f(2, 2)); assert_false(f(2, 3)); assert_false(f(2, 4)); @@ -1107,28 +1108,30 @@ test_should_exec_command(UNUSED void **state) patch("cmd_allowed_when_stopped_and_copying"); patch("device_is_stopped"); - /* XXX stopped and copying, command allowed */ + /* TEST stopped and copying, command allowed */ will_return(device_is_stopped_and_copying, true); expect_value(device_is_stopped_and_copying, migration, &migration); will_return(cmd_allowed_when_stopped_and_copying, true); expect_value(cmd_allowed_when_stopped_and_copying, cmd, 0xbeef); assert_true(should_exec_command(&vfu_ctx, 0xbeef)); - /* XXX stopped and copying, command not allowed */ + /* TEST stopped and copying, command not allowed */ will_return(device_is_stopped_and_copying, true); expect_any(device_is_stopped_and_copying, migration); will_return(cmd_allowed_when_stopped_and_copying, false); expect_any(cmd_allowed_when_stopped_and_copying, cmd); assert_false(should_exec_command(&vfu_ctx, 0xbeef)); - /* XXX stopped */ + /* TEST stopped */ will_return(device_is_stopped_and_copying, false); expect_any(device_is_stopped_and_copying, migration); will_return(device_is_stopped, true); expect_value(device_is_stopped, migration, &migration); + will_return(cmd_allowed_when_stopped_and_copying, false); + expect_value(cmd_allowed_when_stopped_and_copying, cmd, 0xbeef); assert_false(should_exec_command(&vfu_ctx, 0xbeef)); - /* XXX none of the above */ + /* TEST none of the above */ will_return(device_is_stopped_and_copying, false); expect_any(device_is_stopped_and_copying, migration); will_return(device_is_stopped, false); @@ -1226,7 +1229,7 @@ test_dma_controller_dirty_page_get(void **state UNUSED) { dma_memory_region_t *r; uint64_t len = UINT32_MAX + (uint64_t)10; - char bp[131073]; + char bp[0x20008]; /* must be QWORD aligned */ vfu_ctx.dma->nregions = 1; r = &vfu_ctx.dma->regions[0]; @@ -1236,7 +1239,7 @@ test_dma_controller_dirty_page_get(void **state UNUSED) vfu_ctx.dma->dirty_pgsize = 4096; assert_int_equal(0, dma_controller_dirty_page_get(vfu_ctx.dma, (void *)0, - len, 4096, 131073, (char **)&bp)); + len, 4096, sizeof(bp), (char **)&bp)); } int |