aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWilliam Henderson <william.henderson@nutanix.com>2023-08-17 10:37:06 +0000
committerJohn Levon <john.levon@nutanix.com>2023-09-15 12:59:39 +0100
commit8a88c5e2b257a6100a6e7c673ed1b27394d26725 (patch)
treea89748c4924f77bd19574ace9a89b745397edf33
parent72ddc0817293cccbcd1b3406c0b089d5016b7766 (diff)
downloadlibvfio-user-8a88c5e2b257a6100a6e7c673ed1b27394d26725.zip
libvfio-user-8a88c5e2b257a6100a6e7c673ed1b27394d26725.tar.gz
libvfio-user-8a88c5e2b257a6100a6e7c673ed1b27394d26725.tar.bz2
refactor: device feature handling and minor changes
Signed-off-by: William Henderson <william.henderson@nutanix.com>
-rw-r--r--lib/dma.c11
-rw-r--r--lib/libvfio-user.c246
-rw-r--r--lib/migration.c14
3 files changed, 155 insertions, 116 deletions
diff --git a/lib/dma.c b/lib/dma.c
index c83ec44..79bfd7d 100644
--- a/lib/dma.c
+++ b/lib/dma.c
@@ -631,6 +631,7 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
for (i = 0; i < (size_t)bitmap_size; i++) {
uint8_t val = region->dirty_bitmap[i];
+ uint8_t *outp = (uint8_t *)&bitmap[i];
uint8_t out = 0;
/*
@@ -647,8 +648,14 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
out = 0;
} else {
uint8_t zero = 0;
- __atomic_exchange(&region->dirty_bitmap[i], &zero,
- &out, __ATOMIC_SEQ_CST);
+ if (dma->dirty_pgsize == pgsize) {
+ __atomic_exchange(&region->dirty_bitmap[i], &zero,
+ outp, __ATOMIC_SEQ_CST);
+ continue;
+ } else {
+ __atomic_exchange(&region->dirty_bitmap[i], &zero,
+ &out, __ATOMIC_SEQ_CST);
+ }
}
for (j = 0; j < 8; j++) {
diff --git a/lib/libvfio-user.c b/lib/libvfio-user.c
index 1cb97dd..cd1b558 100644
--- a/lib/libvfio-user.c
+++ b/lib/libvfio-user.c
@@ -949,131 +949,179 @@ is_dma_feature(uint32_t feature) {
}
static int
-handle_device_feature(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
+handle_migration_device_feature_get(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg,
+ uint32_t feature)
{
- assert(vfu_ctx != NULL);
- assert(msg != NULL);
+ // all supported outgoing data is currently the same size as
+ // vfio_user_device_feature_migration
+ msg->out.iov.iov_len = sizeof(struct vfio_user_device_feature)
+ + sizeof(struct vfio_user_device_feature_migration);
+ msg->out.iov.iov_base = calloc(1, msg->out.iov.iov_len);
- if (vfu_ctx->migration == NULL ||
- msg->in.iov.iov_len < sizeof(struct vfio_user_device_feature)) {
- return ERROR_INT(EINVAL);
+ if (msg->out.iov.iov_base == NULL) {
+ return ERROR_INT(ENOMEM);
}
- struct vfio_user_device_feature *req = msg->in.iov.iov_base;
+ memcpy(msg->out.iov.iov_base, msg->in.iov.iov_base,
+ sizeof(struct vfio_user_device_feature));
- uint32_t supported_flags =
- device_feature_flags_supported(req->flags & VFIO_DEVICE_FEATURE_MASK);
+ struct vfio_user_device_feature *res = msg->out.iov.iov_base;
- if ((req->flags & supported_flags) !=
- (req->flags & ~VFIO_DEVICE_FEATURE_MASK) || supported_flags == 0) {
- return ERROR_INT(EINVAL);
+ res->argsz = sizeof(struct vfio_user_device_feature)
+ + sizeof(struct vfio_user_device_feature_migration);
+
+ switch (feature) {
+ case VFIO_DEVICE_FEATURE_MIGRATION: {
+ struct vfio_user_device_feature_migration *mig =
+ (void *)res->data;
+ // FIXME are these always supported? Can we consider to be
+ // "supported" if said support is just an empty callback?
+ //
+ // We don't need to return RUNNING or ERROR since they are
+ // always supported.
+ mig->flags = VFIO_MIGRATION_STOP_COPY
+ | VFIO_MIGRATION_PRE_COPY;
+ return 0;
+ }
+
+ case VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE: {
+ struct vfio_user_device_feature_mig_state *state =
+ (void *)res->data;
+ state->device_state = migration_get_state(vfu_ctx);
+ return 0;
+ }
+
+ default: return ERROR_INT(EINVAL);
}
+}
- uint32_t feature = req->flags & VFIO_DEVICE_FEATURE_MASK;
+static int
+handle_migration_device_feature_set(vfu_ctx_t *vfu_ctx, uint32_t feature,
+ struct vfio_user_device_feature *res)
+{
+ assert(feature == VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE);
- ssize_t ret;
+ struct vfio_user_device_feature_mig_state *state = (void *)res->data;
- if (req->flags & VFIO_DEVICE_FEATURE_PROBE) {
- msg->out.iov.iov_base = malloc(msg->in.iov.iov_len);
- msg->out.iov.iov_len = msg->in.iov.iov_len;
+ return migration_set_state(vfu_ctx, state->device_state);
+}
- if (msg->out.iov.iov_base == NULL) {
- return ERROR_INT(ENOMEM);
- }
+static int
+handle_dma_device_feature_get(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg,
+ struct vfio_user_device_feature *req)
+{
+ struct vfio_user_device_feature_dma_logging_report *rep =
+ (void *)req->data;
- memcpy(msg->out.iov.iov_base, msg->in.iov.iov_base,
- msg->out.iov.iov_len);
+ ssize_t bitmap_size = get_bitmap_size(rep->length, rep->page_size);
- ret = 0;
- } else if (req->flags & VFIO_DEVICE_FEATURE_GET) {
- if (is_migration_feature(feature)) {
- // all supported outgoing data is currently the same size as
- // vfio_user_device_feature_migration
- msg->out.iov.iov_len = sizeof(struct vfio_user_device_feature)
- + sizeof(struct vfio_user_device_feature_migration);
- msg->out.iov.iov_base = calloc(1, msg->out.iov.iov_len);
-
- if (msg->out.iov.iov_base == NULL) {
- return ERROR_INT(ENOMEM);
- }
+ msg->out.iov.iov_len = sizeof(struct vfio_user_device_feature)
+ + sizeof(struct vfio_user_device_feature_dma_logging_report)
+ + bitmap_size;
+ msg->out.iov.iov_base = calloc(1, msg->out.iov.iov_len);
- memcpy(msg->out.iov.iov_base, msg->in.iov.iov_base,
- sizeof(struct vfio_user_device_feature));
+ if (msg->out.iov.iov_base == NULL) {
+ return ERROR_INT(ENOMEM);
+ }
- struct vfio_user_device_feature *res = msg->out.iov.iov_base;
+ memcpy(msg->out.iov.iov_base, msg->in.iov.iov_base,
+ sizeof(struct vfio_user_device_feature) +
+ sizeof(struct vfio_user_device_feature_dma_logging_report));
- res->argsz = sizeof(struct vfio_user_device_feature)
- + sizeof(struct vfio_user_device_feature_migration);
+ struct vfio_user_device_feature *res = msg->out.iov.iov_base;
- if (feature == VFIO_DEVICE_FEATURE_MIGRATION) {
- struct vfio_user_device_feature_migration *mig =
- (void*)res->data;
+ res->argsz = msg->out.iov.iov_len;
- // FIXME are these always supported? Can we consider to be
- // "supported" if said support is just an empty callback?
- //
- // We don't need to return RUNNING or ERROR since they are always
- // supported.
- mig->flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY;
+ dma_controller_t *dma = vfu_ctx->dma;
- ret = 0;
- } else {
- assert(feature == VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE);
+ assert(dma != NULL);
- struct vfio_user_device_feature_mig_state *state =
- (void*)res->data;
-
- state->device_state = migration_get_state(vfu_ctx);
+ char * bitmap = (char *) msg->out.iov.iov_base
+ + sizeof(struct vfio_user_device_feature)
+ + sizeof(struct vfio_user_device_feature_dma_logging_report);
- ret = 0;
- }
- } else if (is_dma_feature(feature)) {
- struct vfio_user_device_feature_dma_logging_report *rep =
- (void*)req->data;
+ int ret = dma_controller_dirty_page_get(dma,
+ (vfu_dma_addr_t) rep->iova,
+ rep->length,
+ rep->page_size,
+ bitmap_size,
+ bitmap);
+
+ if (ret < 0) {
+ msg->out.iov.iov_len = 0;
+ }
- ssize_t bitmap_size = get_bitmap_size(rep->length, rep->page_size);
+ return ret;
+}
- msg->out.iov.iov_len = sizeof(struct vfio_user_device_feature)
- + sizeof(struct vfio_user_device_feature_dma_logging_report)
- + bitmap_size;
- msg->out.iov.iov_base = calloc(1, msg->out.iov.iov_len);
+static int
+handle_dma_device_feature_set(vfu_ctx_t *vfu_ctx, uint32_t feature,
+ struct vfio_user_device_feature *res)
+{
+ dma_controller_t *dma = vfu_ctx->dma;
- if (msg->out.iov.iov_base == NULL) {
- return ERROR_INT(ENOMEM);
- }
+ assert(dma != NULL);
- memcpy(msg->out.iov.iov_base, msg->in.iov.iov_base,
- sizeof(struct vfio_user_device_feature) +
- sizeof(struct vfio_user_device_feature_dma_logging_report));
+ if (feature == VFIO_DEVICE_FEATURE_DMA_LOGGING_START) {
+ struct vfio_user_device_feature_dma_logging_control *ctl =
+ (void *)res->data;
+ return dma_controller_dirty_page_logging_start(dma,
+ ctl->page_size);
+ } else {
+ assert(feature == VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP);
- struct vfio_user_device_feature *res = msg->out.iov.iov_base;
+ dma_controller_dirty_page_logging_stop(dma);
+ return 0;
+ }
+}
- res->argsz = msg->out.iov.iov_len;
+static int
+handle_device_feature(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
+{
+ assert(vfu_ctx != NULL);
+ assert(msg != NULL);
- dma_controller_t *dma = vfu_ctx->dma;
+ if (vfu_ctx->migration == NULL ||
+ msg->in.iov.iov_len < sizeof(struct vfio_user_device_feature)) {
+ return ERROR_INT(EINVAL);
+ }
- assert(dma != NULL);
+ struct vfio_user_device_feature *req = msg->in.iov.iov_base;
- char* bitmap = (char*) msg->out.iov.iov_base
- + sizeof(struct vfio_user_device_feature)
- + sizeof(struct vfio_user_device_feature_dma_logging_report);
+ uint32_t operations = req->flags & ~VFIO_DEVICE_FEATURE_MASK;
+ uint32_t feature = req->flags & VFIO_DEVICE_FEATURE_MASK;
- ret = dma_controller_dirty_page_get(dma,
- (vfu_dma_addr_t) rep->iova,
- rep->length,
- rep->page_size,
- bitmap_size,
- bitmap);
+ uint32_t supported_ops = device_feature_flags_supported(feature);
- if (ret < 0) {
- msg->out.iov.iov_len = 0;
- }
+ if ((req->flags & supported_ops) != operations || supported_ops == 0) {
+ return ERROR_INT(EINVAL);
+ }
+
+ ssize_t ret;
+
+ if (req->flags & VFIO_DEVICE_FEATURE_PROBE) {
+ msg->out.iov.iov_len = msg->in.iov.iov_len;
+ msg->out.iov.iov_base = malloc(msg->out.iov.iov_len);
+
+ if (msg->out.iov.iov_base == NULL) {
+ return ERROR_INT(ENOMEM);
+ }
+
+ memcpy(msg->out.iov.iov_base, msg->in.iov.iov_base,
+ msg->out.iov.iov_len);
+
+ ret = 0;
+ } else if (req->flags & VFIO_DEVICE_FEATURE_GET) {
+ if (is_migration_feature(feature)) {
+ ret = handle_migration_device_feature_get(vfu_ctx, msg, feature);
+ } else if (is_dma_feature(feature)) {
+ ret = handle_dma_device_feature_get(vfu_ctx, msg, req);
} else {
return ERROR_INT(EINVAL);
}
} else if (req->flags & VFIO_DEVICE_FEATURE_SET) {
- msg->out.iov.iov_base = malloc(msg->in.iov.iov_len);
msg->out.iov.iov_len = msg->in.iov.iov_len;
+ msg->out.iov.iov_base = malloc(msg->out.iov.iov_len);
if (msg->out.iov.iov_base == NULL) {
return ERROR_INT(ENOMEM);
@@ -1085,27 +1133,9 @@ handle_device_feature(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
struct vfio_user_device_feature *res = msg->out.iov.iov_base;
if (is_migration_feature(feature)) {
- assert(feature == VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE);
-
- struct vfio_user_device_feature_mig_state *state = (void*)res->data;
-
- ret = migration_set_state(vfu_ctx, state->device_state);
+ ret = handle_migration_device_feature_set(vfu_ctx, feature, res);
} else if (is_dma_feature(feature)) {
- dma_controller_t *dma = vfu_ctx->dma;
-
- assert(dma != NULL);
-
- if (feature == VFIO_DEVICE_FEATURE_DMA_LOGGING_START) {
- struct vfio_user_device_feature_dma_logging_control *ctl =
- (void*)res->data;
- ret = dma_controller_dirty_page_logging_start(dma,
- ctl->page_size);
- } else {
- assert(feature == VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP);
-
- dma_controller_dirty_page_logging_stop(dma);
- ret = 0;
- }
+ ret = handle_dma_device_feature_set(vfu_ctx, feature, res);
} else {
return ERROR_INT(EINVAL);
}
diff --git a/lib/migration.c b/lib/migration.c
index 60e83e7..dcc56f5 100644
--- a/lib/migration.c
+++ b/lib/migration.c
@@ -46,7 +46,7 @@
*
* The indices of each state are those in the vfio_user_device_mig_state enum.
*/
-static const char transitions[8] = {
+static const char transitions[VFIO_USER_DEVICE_NUM_STATES] = {
0b00000000, // ERROR -> {}
0b00011100, // STOP -> {RUNNING, STOP_COPY, RESUMING}
0b01000010, // RUNNING -> {STOP, PRE_COPY}
@@ -66,8 +66,9 @@ static const char transitions[8] = {
* This can be indexed as `next_state[current][target] == next`. If next is
* ERROR, then the transition is not allowed.
*/
-static const uint32_t next_state[VFIO_USER_DEVICE_NUM_STATES][VFIO_USER_DEVICE_NUM_STATES] = {
- [VFIO_USER_DEVICE_STATE_ERROR] = {0, 0, 0, 0, 0, 0, 0, 0},
+static const uint32_t
+next_state[VFIO_USER_DEVICE_NUM_STATES][VFIO_USER_DEVICE_NUM_STATES] = {
+ [VFIO_USER_DEVICE_STATE_ERROR] = { 0, 0, 0, 0, 0, 0, 0, 0 },
[VFIO_USER_DEVICE_STATE_STOP] = {
[VFIO_USER_DEVICE_STATE_ERROR] = VFIO_USER_DEVICE_STATE_ERROR,
[VFIO_USER_DEVICE_STATE_STOP] = VFIO_USER_DEVICE_STATE_STOP,
@@ -108,7 +109,7 @@ static const uint32_t next_state[VFIO_USER_DEVICE_NUM_STATES][VFIO_USER_DEVICE_N
[VFIO_USER_DEVICE_STATE_PRE_COPY] = VFIO_USER_DEVICE_STATE_STOP,
[VFIO_USER_DEVICE_STATE_PRE_COPY_P2P] = VFIO_USER_DEVICE_STATE_ERROR,
},
- [VFIO_USER_DEVICE_STATE_RUNNING_P2P] = {0, 0, 0, 0, 0, 0, 0, 0},
+ [VFIO_USER_DEVICE_STATE_RUNNING_P2P] = { 0, 0, 0, 0, 0, 0, 0, 0 },
[VFIO_USER_DEVICE_STATE_PRE_COPY] = {
[VFIO_USER_DEVICE_STATE_ERROR] = VFIO_USER_DEVICE_STATE_ERROR,
[VFIO_USER_DEVICE_STATE_STOP] = VFIO_USER_DEVICE_STATE_RUNNING,
@@ -119,7 +120,7 @@ static const uint32_t next_state[VFIO_USER_DEVICE_NUM_STATES][VFIO_USER_DEVICE_N
[VFIO_USER_DEVICE_STATE_PRE_COPY] = VFIO_USER_DEVICE_STATE_PRE_COPY,
[VFIO_USER_DEVICE_STATE_PRE_COPY_P2P] = VFIO_USER_DEVICE_STATE_ERROR,
},
- [VFIO_USER_DEVICE_STATE_PRE_COPY_P2P] = {0, 0, 0, 0, 0, 0, 0, 0},
+ [VFIO_USER_DEVICE_STATE_PRE_COPY_P2P] = { 0, 0, 0, 0, 0, 0, 0, 0 },
};
bool
@@ -158,7 +159,8 @@ init_migration(const vfu_migration_callbacks_t *callbacks, int *err)
migr->callbacks = *callbacks;
if (migr->callbacks.transition == NULL ||
migr->callbacks.read_data == NULL ||
- migr->callbacks.write_data == NULL) {
+ migr->callbacks.write_data == NULL ||
+ migr->callbacks.version != VFU_MIGR_CALLBACKS_VERS) {
free(migr);
*err = EINVAL;
return NULL;