/* * Copyright (c) 2020 Nutanix Inc. All rights reserved. * * Authors: Thanos Makatos * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Nutanix nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include "dma.h" #include "irq.h" #include "libvfio-user.h" #include "migration.h" #include "migration_priv.h" #include "mocks.h" #include "pci.h" #include "private.h" #include "tran_sock.h" #define DMACSIZE (sizeof(dma_controller_t) + sizeof(dma_memory_region_t) * 5) /* * These globals are used in the unit tests; they're re-initialized each time by * setup(), but having them as globals makes for significantly less * boiler-plate. */ static char dmacbuf[DMACSIZE]; static vfu_ctx_t vfu_ctx; static vfu_msg_t msg; static size_t nr_fds; static int fds[2]; static int ret; static vfu_msg_t * mkmsg(enum vfio_user_command cmd, void *data, size_t size) { msg.hdr.cmd = cmd; msg.hdr.msg_size = size; msg.in.iov.iov_base = data; msg.in.iov.iov_len = size; if (nr_fds != 0) { msg.in.fds = fds; msg.in.nr_fds = nr_fds; } else { msg.in.fds = NULL; msg.in.nr_fds = 0; } return &msg; } /* * FIXME we shouldn't have to specify a setup function explicitly for each unit * test, cmocka should provide that. E.g. cmocka_run_group_tests enables us to * run a function before/after ALL unit tests have finished, we can extend it * and provide a function to execute before and after each unit test. */ static int setup(void **state UNUSED) { memset(&vfu_ctx, 0, sizeof(vfu_ctx)); vfu_ctx.client_max_fds = 10; memset(dmacbuf, 0, DMACSIZE); vfu_ctx.dma = (void *)dmacbuf; vfu_ctx.dma->max_regions = 10; vfu_ctx.dma->vfu_ctx = &vfu_ctx; memset(&msg, 0, sizeof(msg)); msg.hdr.flags.type = VFIO_USER_F_TYPE_COMMAND; msg.hdr.msg_size = sizeof(msg.hdr); fds[0] = fds[1] = -1; nr_fds = 0; ret = 0; unpatch_all(); return 0; } /* FIXME must replace test_dma_map_without_dma */ static void test_dma_map_mappable_without_fd(void **state UNUSED) { struct vfio_user_dma_map dma_map = { .argsz = sizeof(dma_map), }; ret = handle_dma_map(&vfu_ctx, mkmsg(VFIO_USER_DMA_MAP, &dma_map, sizeof(dma_map)), &dma_map); assert_int_equal(0, ret); } static void test_dma_map_without_fd(void **state UNUSED) { struct vfio_user_dma_map dma_map = { .argsz = sizeof(dma_map), .addr = 0xdeadbeef, .size = 0xcafebabe, .offset = 0x8badf00d, .flags = 0 }; patch("dma_controller_add_region"); will_return(dma_controller_add_region, 0); will_return(dma_controller_add_region, 0); expect_value(dma_controller_add_region, dma, vfu_ctx.dma); expect_value(dma_controller_add_region, dma_addr, dma_map.addr); expect_value(dma_controller_add_region, size, dma_map.size); expect_value(dma_controller_add_region, fd, -1); expect_value(dma_controller_add_region, offset, dma_map.offset); expect_value(dma_controller_add_region, prot, PROT_NONE); ret = handle_dma_map(&vfu_ctx, mkmsg(VFIO_USER_DMA_MAP, &dma_map, sizeof(dma_map)), &dma_map); assert_int_equal(0, ret); } static int check_dma_info(const LargestIntegralType value, const LargestIntegralType cvalue) { vfu_dma_info_t *info = (vfu_dma_info_t *)(long)value; vfu_dma_info_t *cinfo = (vfu_dma_info_t *)(long)cvalue; return info->iova.iov_base == cinfo->iova.iov_base && info->iova.iov_len == cinfo->iova.iov_len && info->vaddr == cinfo->vaddr && info->mapping.iov_base == cinfo->mapping.iov_base && info->mapping.iov_len == cinfo->mapping.iov_len && info->page_size == cinfo->page_size && info->prot == cinfo->prot; } /* * Checks that handle_dma_map returns 0 when dma_controller_add_region * succeeds. */ static void test_dma_map_return_value(void **state UNUSED) { dma_controller_t dma = { 0 }; vfu_ctx_t vfu_ctx = { .dma = &dma }; dma.vfu_ctx = &vfu_ctx; struct vfio_user_dma_map dma_map = { .argsz = sizeof(dma_map) }; patch("dma_controller_add_region"); expect_value(dma_controller_add_region, dma, (uintptr_t)vfu_ctx.dma); expect_value(dma_controller_add_region, dma_addr, dma_map.addr); expect_value(dma_controller_add_region, size, dma_map.size); expect_value(dma_controller_add_region, fd, -1); expect_value(dma_controller_add_region, offset, dma_map.offset); expect_value(dma_controller_add_region, prot, PROT_NONE); will_return(dma_controller_add_region, 0); will_return(dma_controller_add_region, 2); assert_int_equal(0, handle_dma_map(&vfu_ctx, mkmsg(VFIO_USER_DMA_MAP, &dma_map, sizeof(dma_map)), &dma_map)); } /* * Tests that handle_dma_unmap correctly removes a region. */ static void test_handle_dma_unmap(void **state UNUSED) { struct vfio_user_dma_unmap dma_unmap = { .argsz = sizeof(dma_unmap), .addr = 0x1000, .size = 0x1000 }; vfu_ctx.dma->nregions = 3; vfu_ctx.dma->regions[0].info.iova.iov_base = (void *)0x1000; vfu_ctx.dma->regions[0].info.iova.iov_len = 0x1000; vfu_ctx.dma->regions[0].fd = -1; vfu_ctx.dma->regions[1].info.iova.iov_base = (void *)0x4000; vfu_ctx.dma->regions[1].info.iova.iov_len = 0x2000; vfu_ctx.dma->regions[1].fd = -1; vfu_ctx.dma->regions[2].info.iova.iov_base = (void *)0x8000; vfu_ctx.dma->regions[2].info.iova.iov_len = 0x3000; vfu_ctx.dma->regions[2].fd = -1; vfu_ctx.dma_unregister = mock_dma_unregister; expect_value(mock_dma_unregister, vfu_ctx, &vfu_ctx); expect_check(mock_dma_unregister, info, check_dma_info, &vfu_ctx.dma->regions[0].info); ret = handle_dma_unmap(&vfu_ctx, mkmsg(VFIO_USER_DMA_UNMAP, &dma_unmap, sizeof(dma_unmap)), &dma_unmap); assert_int_equal(0, ret); assert_int_equal(2, vfu_ctx.dma->nregions); assert_int_equal(0x4000, vfu_ctx.dma->regions[0].info.iova.iov_base); assert_int_equal(0x2000, vfu_ctx.dma->regions[0].info.iova.iov_len); assert_int_equal(0x8000, vfu_ctx.dma->regions[1].info.iova.iov_base); assert_int_equal(0x3000, vfu_ctx.dma->regions[1].info.iova.iov_len); free(msg.out.iov.iov_base); } static void test_dma_controller_add_region_no_fd(void **state UNUSED) { vfu_dma_addr_t dma_addr = (void *)0xdeadbeef; dma_memory_region_t *r; off_t offset = 0; size_t size = 0; int fd = -1; assert_int_equal(0, dma_controller_add_region(vfu_ctx.dma, dma_addr, size, fd, offset, PROT_NONE)); assert_int_equal(1, vfu_ctx.dma->nregions); r = &vfu_ctx.dma->regions[0]; assert_ptr_equal(NULL, r->info.vaddr); assert_ptr_equal(NULL, r->info.mapping.iov_base); assert_int_equal(0, r->info.mapping.iov_len); assert_ptr_equal(dma_addr, r->info.iova.iov_base); assert_int_equal(size, r->info.iova.iov_len); assert_int_equal(sysconf(_SC_PAGE_SIZE), r->info.page_size); assert_int_equal(offset, r->offset); assert_int_equal(fd, r->fd); assert_int_equal(PROT_NONE, r->info.prot); } static void test_dma_controller_remove_region_mapped(void **state UNUSED) { vfu_ctx.dma->nregions = 1; vfu_ctx.dma->regions[0].info.iova.iov_base = (void *)0xdeadbeef; vfu_ctx.dma->regions[0].info.iova.iov_len = 0x100; vfu_ctx.dma->regions[0].info.mapping.iov_base = (void *)0xcafebabe; vfu_ctx.dma->regions[0].info.mapping.iov_len = 0x1000; vfu_ctx.dma->regions[0].info.vaddr = (void *)0xcafebabe; expect_value(mock_dma_unregister, vfu_ctx, &vfu_ctx); expect_check(mock_dma_unregister, info, check_dma_info, &vfu_ctx.dma->regions[0].info); /* FIXME add unit test when dma_unregister fails */ patch("dma_controller_unmap_region"); expect_value(dma_controller_unmap_region, dma, vfu_ctx.dma); expect_value(dma_controller_unmap_region, region, &vfu_ctx.dma->regions[0]); assert_int_equal(0, dma_controller_remove_region(vfu_ctx.dma, (void *)0xdeadbeef, 0x100, mock_dma_unregister, &vfu_ctx)); } static void test_dma_controller_remove_region_unmapped(void **state UNUSED) { vfu_ctx.dma->nregions = 1; vfu_ctx.dma->regions[0].info.iova.iov_base = (void *)0xdeadbeef; vfu_ctx.dma->regions[0].info.iova.iov_len = 0x100; vfu_ctx.dma->regions[0].fd = -1; expect_value(mock_dma_unregister, vfu_ctx, &vfu_ctx); expect_check(mock_dma_unregister, info, check_dma_info, &vfu_ctx.dma->regions[0].info); patch("dma_controller_unmap_region"); assert_int_equal(0, dma_controller_remove_region(vfu_ctx.dma, (void *)0xdeadbeef, 0x100, mock_dma_unregister, &vfu_ctx)); } static void test_dma_addr_to_sgl(void **state UNUSED) { dma_memory_region_t *r, *r1; struct iovec iov[2] = { }; dma_sg_t sg[2]; int ret; vfu_ctx.dma->nregions = 1; r = &vfu_ctx.dma->regions[0]; r->info.iova.iov_base = (void *)0x1000; r->info.iova.iov_len = 0x4000; r->info.vaddr = (void *)0xdeadbeef; /* fast path, region hint hit */ r->info.prot = PROT_WRITE; ret = dma_addr_to_sgl(vfu_ctx.dma, (vfu_dma_addr_t)0x2000, 0x400, sg, 1, PROT_READ); assert_int_equal(1, ret); assert_int_equal(r->info.iova.iov_base, sg[0].dma_addr); assert_int_equal(0, sg[0].region); assert_int_equal(0x2000 - (long)r->info.iova.iov_base, sg[0].offset); assert_int_equal(0x400, sg[0].length); assert_true(vfu_sg_is_mappable(&vfu_ctx, &sg[0])); errno = 0; r->info.prot = PROT_WRITE; ret = dma_addr_to_sgl(vfu_ctx.dma, (vfu_dma_addr_t)0x6000, 0x400, sg, 1, PROT_READ); assert_int_equal(-1, ret); assert_int_equal(ENOENT, errno); r->info.prot = PROT_READ; ret = dma_addr_to_sgl(vfu_ctx.dma, (vfu_dma_addr_t)0x2000, 0x400, sg, 1, PROT_WRITE); assert_int_equal(-1, ret); assert_int_equal(EACCES, errno); r->info.prot = PROT_READ|PROT_WRITE; ret = dma_addr_to_sgl(vfu_ctx.dma, (vfu_dma_addr_t)0x2000, 0x400, sg, 1, PROT_READ); assert_int_equal(1, ret); vfu_ctx.dma->nregions = 2; r1 = &vfu_ctx.dma->regions[1]; r1->info.iova.iov_base = (void *)0x5000; r1->info.iova.iov_len = 0x2000; r1->info.vaddr = (void *)0xcafebabe; r1->info.prot = PROT_WRITE; ret = dma_addr_to_sgl(vfu_ctx.dma, (vfu_dma_addr_t)0x1000, 0x5000, sg, 2, PROT_READ); assert_int_equal(2, ret); assert_int_equal(0x4000, sg[0].length); assert_int_equal(r->info.iova.iov_base, sg[0].dma_addr); assert_int_equal(0, sg[0].region); assert_int_equal(0, sg[0].offset); assert_true(vfu_sg_is_mappable(&vfu_ctx, &sg[0])); assert_int_equal(0x1000, sg[1].length); assert_int_equal(r1->info.iova.iov_base, sg[1].dma_addr); assert_int_equal(1, sg[1].region); assert_int_equal(0, sg[1].offset); assert_true(vfu_sg_is_mappable(&vfu_ctx, &sg[1])); assert_int_equal(0, dma_sgl_get(vfu_ctx.dma, sg, iov, 2)); assert_int_equal(r->info.vaddr + sg[0].offset, iov[0].iov_base); assert_int_equal(sg[0].length, iov[0].iov_len); assert_int_equal(r1->info.vaddr + sg[1].offset, iov[1].iov_base); assert_int_equal(sg[1].length, iov[1].iov_len); /* TODO test more scenarios */ } static void test_vfu_setup_device_dma(void **state UNUSED) { vfu_ctx_t vfu_ctx = { 0 }; assert_int_equal(0, vfu_setup_device_dma(&vfu_ctx, NULL, NULL)); assert_non_null(vfu_ctx.dma); free(vfu_ctx.dma); } typedef struct { int fd; int conn_fd; } tran_sock_t; static void test_migration_state_transitions(void **state UNUSED) { bool (*f)(uint32_t, uint32_t) = vfio_migr_state_transition_is_valid; uint32_t i; /* from ERROR: all transitions are invalid */ for (i = 0; i < 8; i++) { assert_false(f(VFIO_USER_DEVICE_STATE_ERROR, i)); } /* from STOP */ assert_false(f(VFIO_USER_DEVICE_STATE_STOP, VFIO_USER_DEVICE_STATE_ERROR)); assert_false(f(VFIO_USER_DEVICE_STATE_STOP, VFIO_USER_DEVICE_STATE_STOP)); assert_true(f(VFIO_USER_DEVICE_STATE_STOP, VFIO_USER_DEVICE_STATE_RUNNING)); assert_true(f(VFIO_USER_DEVICE_STATE_STOP, VFIO_USER_DEVICE_STATE_STOP_COPY)); assert_true(f(VFIO_USER_DEVICE_STATE_STOP, VFIO_USER_DEVICE_STATE_RESUMING)); assert_false(f(VFIO_USER_DEVICE_STATE_STOP, VFIO_USER_DEVICE_STATE_RUNNING_P2P)); assert_false(f(VFIO_USER_DEVICE_STATE_STOP, VFIO_USER_DEVICE_STATE_PRE_COPY)); assert_false(f(VFIO_USER_DEVICE_STATE_STOP, VFIO_USER_DEVICE_STATE_PRE_COPY_P2P)); /* from RUNNING */ assert_false(f(VFIO_USER_DEVICE_STATE_RUNNING, VFIO_USER_DEVICE_STATE_ERROR)); assert_true(f(VFIO_USER_DEVICE_STATE_RUNNING, VFIO_USER_DEVICE_STATE_STOP)); assert_false(f(VFIO_USER_DEVICE_STATE_RUNNING, VFIO_USER_DEVICE_STATE_RUNNING)); assert_false(f(VFIO_USER_DEVICE_STATE_RUNNING, VFIO_USER_DEVICE_STATE_STOP_COPY)); assert_false(f(VFIO_USER_DEVICE_STATE_RUNNING, VFIO_USER_DEVICE_STATE_RESUMING)); assert_false(f(VFIO_USER_DEVICE_STATE_RUNNING, VFIO_USER_DEVICE_STATE_RUNNING_P2P)); assert_true(f(VFIO_USER_DEVICE_STATE_RUNNING, VFIO_USER_DEVICE_STATE_PRE_COPY)); assert_false(f(VFIO_USER_DEVICE_STATE_RUNNING, VFIO_USER_DEVICE_STATE_PRE_COPY_P2P)); /* from STOP_COPY and RESUMING */ for (i = 3; i < 5; i++) { assert_false(f(i, VFIO_USER_DEVICE_STATE_ERROR)); assert_true(f(i, VFIO_USER_DEVICE_STATE_STOP)); assert_false(f(i, VFIO_USER_DEVICE_STATE_RUNNING)); assert_false(f(i, VFIO_USER_DEVICE_STATE_STOP_COPY)); assert_false(f(i, VFIO_USER_DEVICE_STATE_RESUMING)); assert_false(f(i, VFIO_USER_DEVICE_STATE_RUNNING_P2P)); assert_false(f(i, VFIO_USER_DEVICE_STATE_PRE_COPY)); assert_false(f(i, VFIO_USER_DEVICE_STATE_PRE_COPY_P2P)); } /* from RUNNING_P2P: all transitions are invalid */ for (i = 0; i < 8; i++) { assert_false(f(VFIO_USER_DEVICE_STATE_RUNNING_P2P, i)); } /* from PRE_COPY */ assert_false(f(VFIO_USER_DEVICE_STATE_PRE_COPY, VFIO_USER_DEVICE_STATE_ERROR)); assert_false(f(VFIO_USER_DEVICE_STATE_PRE_COPY, VFIO_USER_DEVICE_STATE_STOP)); assert_true(f(VFIO_USER_DEVICE_STATE_PRE_COPY, VFIO_USER_DEVICE_STATE_RUNNING)); assert_true(f(VFIO_USER_DEVICE_STATE_PRE_COPY, VFIO_USER_DEVICE_STATE_STOP_COPY)); assert_false(f(VFIO_USER_DEVICE_STATE_PRE_COPY, VFIO_USER_DEVICE_STATE_RESUMING)); assert_false(f(VFIO_USER_DEVICE_STATE_PRE_COPY, VFIO_USER_DEVICE_STATE_RUNNING_P2P)); assert_false(f(VFIO_USER_DEVICE_STATE_PRE_COPY, VFIO_USER_DEVICE_STATE_PRE_COPY)); assert_false(f(VFIO_USER_DEVICE_STATE_PRE_COPY, VFIO_USER_DEVICE_STATE_PRE_COPY_P2P)); /* from PRE_COPY_P2P: all transitions are invalid */ for (i = 0; i < 8; i++) { assert_false(f(VFIO_USER_DEVICE_STATE_PRE_COPY_P2P, i)); } } static vfu_migr_state_t LAST_STATE = -1; static void *LAST_WRITE = NULL; static int transition_callback(vfu_ctx_t *ctx UNUSED, vfu_migr_state_t state) { LAST_STATE = state; return 0; } static ssize_t read_callback(vfu_ctx_t *ctx UNUSED, void *buf, uint64_t count) { assert(count < 256); uint8_t *data = buf; for (uint8_t i = 0; i < count; i++) { *(data + i) = i; } return count; } static ssize_t write_callback(vfu_ctx_t *ctx UNUSED, void *buf, uint64_t count) { if (LAST_WRITE != NULL) { free(LAST_WRITE); } LAST_WRITE = malloc(count); memcpy(LAST_WRITE, buf, count); return count; } static struct test_setup_migr_reg_dat { vfu_ctx_t *v; const vfu_migration_callbacks_t c; } migr_reg_data = { .c = { .version = VFU_MIGR_CALLBACKS_VERS, .transition = transition_callback, .read_data = read_callback, .write_data = write_callback } }; static int setup_test_setup_migration(void **state) { struct test_setup_migr_reg_dat *p = &migr_reg_data; p->v = vfu_create_ctx(VFU_TRANS_SOCK, "test", 0, NULL, VFU_DEV_TYPE_PCI); if (p->v == NULL) { return -1; } *state = p; return setup(state); } static int teardown_test_setup_migration(void **state) { struct test_setup_migr_reg_dat *p = *state; vfu_destroy_ctx(p->v); if (LAST_WRITE != NULL) { free(LAST_WRITE); LAST_WRITE = NULL; } return 0; } static void test_setup_migration_callbacks(void **state) { struct test_setup_migr_reg_dat *p = *state; int r = vfu_setup_device_migration_callbacks(p->v, 0, &p->c); assert_int_equal(0, r); assert_non_null(p->v->migration); assert_int_equal(p->v->migration->state, VFIO_USER_DEVICE_STATE_RUNNING); } static void test_setup_migration_callbacks_resuming(void **state) { struct test_setup_migr_reg_dat *p = *state; int r = vfu_setup_device_migration_callbacks(p->v, LIBVFIO_USER_MIG_FLAG_START_RESUMING, &p->c); assert_int_equal(0, r); assert_non_null(p->v->migration); assert_int_equal(p->v->migration->state, VFIO_USER_DEVICE_STATE_RESUMING); } static void test_handle_device_state(void **state) { test_setup_migration_callbacks(state); struct test_setup_migr_reg_dat *p = *state; struct migration *migr = p->v->migration; assert(migr->state == VFIO_USER_DEVICE_STATE_RUNNING); int r; r = handle_device_state(p->v, migr, VFIO_USER_DEVICE_STATE_PRE_COPY, true); assert_int_equal(0, r); assert_int_equal(LAST_STATE, VFU_MIGR_STATE_PRE_COPY); r = handle_device_state(p->v, migr, VFIO_USER_DEVICE_STATE_STOP_COPY, true); assert_int_equal(0, r); assert_int_equal(LAST_STATE, VFU_MIGR_STATE_STOP_AND_COPY); r = handle_device_state(p->v, migr, VFIO_USER_DEVICE_STATE_STOP, true); assert_int_equal(0, r); assert_int_equal(LAST_STATE, VFU_MIGR_STATE_STOP); r = handle_device_state(p->v, migr, VFIO_USER_DEVICE_STATE_RUNNING, true); assert_int_equal(0, r); assert_int_equal(LAST_STATE, VFU_MIGR_STATE_RUNNING); } static void test_handle_mig_data_read(void **state) { test_setup_migration_callbacks(state); struct test_setup_migr_reg_dat *p = *state; struct migration *migr = p->v->migration; struct vfio_user_mig_data data = { .argsz = sizeof(data), .size = 4 }; vfu_msg_t *m = mkmsg(VFIO_USER_MIG_DATA_READ, &data, sizeof(data)); uint8_t expect[4] = {0, 1, 2, 3}; p->v->client_max_data_xfer_size = 4; ssize_t r; migr->state = VFIO_USER_DEVICE_STATE_PRE_COPY; r = handle_mig_data_read(p->v, m); assert_int_equal(4, r); assert_int_equal(0, memcmp(msg.out.iov.iov_base + sizeof(data), &expect, 4)); free(msg.out.iov.iov_base); migr->state = VFIO_USER_DEVICE_STATE_STOP_COPY; r = handle_mig_data_read(p->v, m); assert_int_equal(4, r); assert_int_equal(0, memcmp(msg.out.iov.iov_base + sizeof(data), &expect, 4)); free(msg.out.iov.iov_base); } static void test_handle_mig_data_read_too_long(void **state) { test_setup_migration_callbacks(state); struct test_setup_migr_reg_dat *p = *state; struct migration *migr = p->v->migration; struct vfio_user_mig_data data = { .argsz = sizeof(data), .size = 4 }; vfu_msg_t *m = mkmsg(VFIO_USER_MIG_DATA_READ, &data, sizeof(data)); p->v->client_max_data_xfer_size = 2; ssize_t r; migr->state = VFIO_USER_DEVICE_STATE_PRE_COPY; r = handle_mig_data_read(p->v, m); assert_int_equal(-EINVAL, r); } static void test_handle_mig_data_read_invalid_state(void **state) { test_setup_migration_callbacks(state); struct test_setup_migr_reg_dat *p = *state; struct migration *migr = p->v->migration; struct vfio_user_mig_data data = { .argsz = sizeof(data), .size = 4 }; vfu_msg_t *m = mkmsg(VFIO_USER_MIG_DATA_READ, &data, sizeof(data)); p->v->client_max_data_xfer_size = 4; ssize_t r; migr->state = VFIO_USER_DEVICE_STATE_RUNNING; r = handle_mig_data_read(p->v, m); assert_int_equal(-EINVAL, r); migr->state = VFIO_USER_DEVICE_STATE_STOP; r = handle_mig_data_read(p->v, m); assert_int_equal(-EINVAL, r); } static void test_handle_mig_data_write(void **state) { test_setup_migration_callbacks(state); struct test_setup_migr_reg_dat *p = *state; struct migration *migr = p->v->migration; uint8_t req[12] = {0}; struct vfio_user_mig_data *data = (void*)&req; data->argsz = sizeof(req); data->size = 4; uint8_t *buf = (void*)data + sizeof(*data); for (int i = 0; i < 4; i++) { *(buf + i) = i; } vfu_msg_t *m = mkmsg(VFIO_USER_MIG_DATA_READ, &req, sizeof(req)); p->v->client_max_data_xfer_size = 4; ssize_t r; migr->state = VFIO_USER_DEVICE_STATE_RESUMING; r = handle_mig_data_write(p->v, m); assert_int_equal(4, r); assert_int_equal(0, memcmp(LAST_WRITE, buf, 4)); } static void test_handle_mig_data_write_invalid_state(void **state) { test_setup_migration_callbacks(state); struct test_setup_migr_reg_dat *p = *state; struct migration *migr = p->v->migration; uint8_t req[12] = {0}; struct vfio_user_mig_data *data = (void*)&req; data->argsz = sizeof(req); data->size = 4; uint8_t *buf = (void*)data + sizeof(*data); for (int i = 0; i < 4; i++) { *(buf + i) = i; } vfu_msg_t *m = mkmsg(VFIO_USER_MIG_DATA_READ, &req, sizeof(req)); p->v->client_max_data_xfer_size = 4; ssize_t r; migr->state = VFIO_USER_DEVICE_STATE_RUNNING; r = handle_mig_data_write(p->v, m); assert_int_equal(-EINVAL, r); } static void test_device_is_stopped_and_copying(UNUSED void **state) { assert_false(device_is_stopped_and_copying(vfu_ctx.migration)); assert_false(device_is_stopped(vfu_ctx.migration)); size_t i; struct migration migration; vfu_ctx.migration = &migration; for (i = 0; i < 8; i++) { migration.state = i; bool r = device_is_stopped_and_copying(vfu_ctx.migration); if (i == VFIO_USER_DEVICE_STATE_STOP_COPY) { assert_true(r); } else { assert_false(r); } r = device_is_stopped(vfu_ctx.migration); if (i == VFIO_USER_DEVICE_STATE_STOP) { assert_true(r); } else { assert_false(r); } } vfu_ctx.migration = NULL; } static void test_cmd_allowed_when_stopped_and_copying(UNUSED void **state) { size_t i; for (i = 0; i < VFIO_USER_MAX; i++) { bool r = cmd_allowed_when_stopped_and_copying(i); if (i == VFIO_USER_REGION_READ || i == VFIO_USER_REGION_WRITE || i == VFIO_USER_DIRTY_PAGES || i == VFIO_USER_DEVICE_FEATURE || i == VFIO_USER_MIG_DATA_READ) { assert_true(r); } else { assert_false(r); } } } static void test_should_exec_command(UNUSED void **state) { struct migration migration = { 0 }; vfu_ctx.migration = &migration; patch("device_is_stopped_and_copying"); patch("cmd_allowed_when_stopped_and_copying"); patch("device_is_stopped"); /* TEST stopped and copying, command allowed */ will_return(device_is_stopped_and_copying, true); expect_value(device_is_stopped_and_copying, migration, &migration); will_return(cmd_allowed_when_stopped_and_copying, true); expect_value(cmd_allowed_when_stopped_and_copying, cmd, 0xbeef); assert_true(should_exec_command(&vfu_ctx, 0xbeef)); /* TEST stopped and copying, command not allowed */ will_return(device_is_stopped_and_copying, true); expect_any(device_is_stopped_and_copying, migration); will_return(cmd_allowed_when_stopped_and_copying, false); expect_any(cmd_allowed_when_stopped_and_copying, cmd); assert_false(should_exec_command(&vfu_ctx, 0xbeef)); /* TEST stopped */ will_return(device_is_stopped_and_copying, false); expect_any(device_is_stopped_and_copying, migration); will_return(device_is_stopped, true); expect_value(device_is_stopped, migration, &migration); will_return(cmd_allowed_when_stopped_and_copying, false); expect_value(cmd_allowed_when_stopped_and_copying, cmd, 0xbeef); assert_false(should_exec_command(&vfu_ctx, 0xbeef)); /* TEST none of the above */ will_return(device_is_stopped_and_copying, false); expect_any(device_is_stopped_and_copying, migration); will_return(device_is_stopped, false); expect_any(device_is_stopped, migration); assert_true(should_exec_command(&vfu_ctx, 0xbeef)); } int main(void) { const struct CMUnitTest tests[] = { cmocka_unit_test_setup(test_dma_map_mappable_without_fd, setup), cmocka_unit_test_setup(test_dma_map_without_fd, setup), cmocka_unit_test_setup(test_dma_map_return_value, setup), cmocka_unit_test_setup(test_handle_dma_unmap, setup), cmocka_unit_test_setup(test_dma_controller_add_region_no_fd, setup), cmocka_unit_test_setup(test_dma_controller_remove_region_mapped, setup), cmocka_unit_test_setup(test_dma_controller_remove_region_unmapped, setup), cmocka_unit_test_setup(test_dma_addr_to_sgl, setup), cmocka_unit_test_setup(test_vfu_setup_device_dma, setup), cmocka_unit_test_setup(test_migration_state_transitions, setup), cmocka_unit_test_setup_teardown(test_setup_migration_callbacks, setup_test_setup_migration, teardown_test_setup_migration), cmocka_unit_test_setup_teardown(test_setup_migration_callbacks_resuming, setup_test_setup_migration, teardown_test_setup_migration), cmocka_unit_test_setup_teardown(test_handle_device_state, setup_test_setup_migration, teardown_test_setup_migration), cmocka_unit_test_setup_teardown(test_handle_mig_data_read, setup_test_setup_migration, teardown_test_setup_migration), cmocka_unit_test_setup_teardown(test_handle_mig_data_read_too_long, setup_test_setup_migration, teardown_test_setup_migration), cmocka_unit_test_setup_teardown(test_handle_mig_data_read_invalid_state, setup_test_setup_migration, teardown_test_setup_migration), cmocka_unit_test_setup_teardown(test_handle_mig_data_write, setup_test_setup_migration, teardown_test_setup_migration), cmocka_unit_test_setup_teardown(test_handle_mig_data_write_invalid_state, setup_test_setup_migration, teardown_test_setup_migration), cmocka_unit_test_setup(test_device_is_stopped_and_copying, setup), cmocka_unit_test_setup(test_cmd_allowed_when_stopped_and_copying, setup), cmocka_unit_test_setup(test_should_exec_command, setup), }; return cmocka_run_group_tests(tests, NULL, NULL); } /* ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab: */