aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/muser.h330
-rw-r--r--lib/cap.c31
-rw-r--r--lib/cap.h7
-rw-r--r--lib/dma.c4
-rw-r--r--lib/dma.h3
-rw-r--r--lib/irq.c40
-rw-r--r--lib/migration.c12
-rw-r--r--lib/migration.h7
-rw-r--r--lib/muser_ctx.c558
-rw-r--r--lib/muser_priv.h38
-rw-r--r--samples/client.c10
-rw-r--r--samples/gpio-pci-idio-16.c42
-rw-r--r--samples/null.c5
-rw-r--r--samples/server.c105
14 files changed, 662 insertions, 530 deletions
diff --git a/include/muser.h b/include/muser.h
index 7c18490..be8b0d0 100644
--- a/include/muser.h
+++ b/include/muser.h
@@ -62,13 +62,6 @@ typedef struct {
typedef struct lm_ctx lm_ctx_t;
-// Region flags.
-#define LM_REG_FLAG_READ (1 << 0)
-#define LM_REG_FLAG_WRITE (1 << 1)
-#define LM_REG_FLAG_MMAP (1 << 2) // TODO: how this relates to IO bar?
-#define LM_REG_FLAG_RW (LM_REG_FLAG_READ | LM_REG_FLAG_WRITE)
-#define LM_REG_FLAG_MEM (1 << 3) // if unset, bar is IO
-
struct lm_mmap_area {
uint64_t start;
uint64_t size;
@@ -80,39 +73,24 @@ struct lm_sparse_mmap_areas {
};
/**
- * Prototype for region access callback. When a region is accessed, libmuser
- * calls the previously registered callback with the following arguments:
- *
- * @pvt: private data originally set in dev_info
- * @buf: buffer containing the data to be written or data to be read into
- * @count: number of bytes being read or written
- * @offset: byte offset within the region
- * @is_write: whether or not this is a write
- *
- * @returns the number of bytes read or written, or a negative integer on error
- */
-typedef ssize_t (lm_region_access_t) (void *pvt, char *buf, size_t count,
- loff_t offset, bool is_write);
-
-/**
* Prototype for memory access callback. The program MUST first map device
* memory in its own virtual address space using lm_mmap, do any additional
* work required, and finally return that memory. When a region is memory
* mapped, libmuser calls previously register callback with the following
* arguments:
*
- * @pvt: private data originally set in dev_info
+ * @pvt: private pointer
* @off: offset of memory area being memory mapped
* @len: length of memory area being memory mapped
*
* @returns the memory address returned by lm_mmap, or MAP_FAILED on failure
*/
-typedef unsigned long (lm_map_region_t) (void *pvt, unsigned long off,
+typedef unsigned long (lm_map_region_cb_t) (void *pvt, unsigned long off,
unsigned long len);
/**
* Creates a mapping of a device region into the caller's virtual memory. It
- * must be called by lm_map_region_t.
+ * must be called by lm_map_region_cb_t.
*
* @lm_ctx: the libmuser context to create mapping from
* @offset: offset of the region being mapped
@@ -122,81 +100,6 @@ typedef unsigned long (lm_map_region_t) (void *pvt, unsigned long off,
*/
void *lm_mmap(lm_ctx_t * lm_ctx, off_t offset, size_t length);
-typedef struct {
-
- /*
- * Region flags, see LM_REG_FLAG_XXX above.
- */
- uint32_t flags;
-
- /*
- * Size of the region.
- */
- uint32_t size;
-
- /*
- * Callback function that is called when the region is read or written.
- * Note that the memory of the region is owned by the user, except for the
- * standard header (first 64 bytes) of the PCI configuration space.
- */
- lm_region_access_t *fn;
-
- /*
- * Callback function that is called when the region is memory mapped.
- * Required if LM_REG_FLAG_MEM is set, otherwise ignored.
- */
- lm_map_region_t *map;
- struct lm_sparse_mmap_areas *mmap_areas; /* sparse mmap areas */
-} lm_reg_info_t;
-
-enum {
- LM_DEV_INTX_IRQ_IDX,
- LM_DEV_MSI_IRQ_IDX,
- LM_DEV_MSIX_IRQ_IDX,
- LM_DEV_ERR_IRQ_INDEX,
- LM_DEV_REQ_IRQ_INDEX,
- LM_DEV_NUM_IRQS
-};
-
-/* FIXME these are PCI regions */
-enum {
- LM_DEV_BAR0_REG_IDX,
- LM_DEV_BAR1_REG_IDX,
- LM_DEV_BAR2_REG_IDX,
- LM_DEV_BAR3_REG_IDX,
- LM_DEV_BAR4_REG_IDX,
- LM_DEV_BAR5_REG_IDX,
- LM_DEV_ROM_REG_IDX,
- LM_DEV_CFG_REG_IDX,
- LM_DEV_VGA_REG_IDX,
- LM_DEV_NUM_REGS, /* TODO rename to LM_DEV_NUM_PCI_REGS */
-};
-
-typedef struct {
- uint32_t irq_count[LM_DEV_NUM_IRQS];
-
- /*
- * Per-region information. Only supported regions need to be defined,
- * unsupported regions should be left to 0.
- */
- lm_reg_info_t reg_info[LM_DEV_NUM_REGS];
-
- /*
- * Device and vendor ID.
- */
- lm_pci_hdr_id_t id;
-
- /*
- * Subsystem vendor and device ID.
- */
- lm_pci_hdr_ss_t ss;
-
- /*
- * Class code.
- */
- lm_pci_hdr_cc_t cc;
-} lm_pci_info_t;
-
/*
* Returns a pointer to the standard part of the PCI configuration space.
*/
@@ -212,8 +115,9 @@ typedef enum {
/**
* Callback function signature for log function
- *
+ * @pvt: private pointer
* @lm_log_fn_t: typedef for log function.
+ * @msg: message
*/
typedef void (lm_log_fn_t) (void *pvt, lm_log_lvl_t lvl, const char *msg);
@@ -254,6 +158,10 @@ typedef enum {
* FIXME the names of migration callback functions are probably far too long,
* but for now it helps with the implementation.
*/
+/**
+ * Migration callback function.
+ * @pvt: private pointer
+ */
typedef int (lm_migration_callback_t)(void *pvt);
typedef enum {
@@ -318,85 +226,168 @@ typedef struct {
struct lm_sparse_mmap_areas *mmap_areas;
} lm_migration_t;
+/*
+ * Attaching to the transport is non-blocking. The library will not attempt
+ * to attach during context creation time. The caller must then manually
+ * call lm_ctx_try_attach(), which is non-blocking, as many times as
+ * necessary.
+ */
+#define LM_FLAG_ATTACH_NB (1 << 0)
+
/**
- * Device information structure, used to create the lm_ctx.
- * To be filled and passed to lm_ctx_create()
+ * Creates libmuser context.
+ * @trans: transport type
+ * @path: path to socket file.
+ * @flags: context flag
+ * @log: log function
+ * @log_lvl: logging level
+ * @pvt: private data
+ * @returns the lm_ctx to be used or NULL on error. Sets errno.
*/
-typedef struct {
- char *uuid;
+lm_ctx_t *lm_create_ctx(lm_trans_t trans, const char *path,
+ int flags, lm_log_fn_t *log, lm_log_lvl_t log_lvl,
+ void *pvt);
- /*
- * Private data passed to various lm_XXX functions.
- */
- void *pvt;
+//TODO: Check other PCI header registers suitable to be filled by device.
+// Or should we pass whole lm_pci_hdr_t to be filled by user.
+/**
+ * Setup PCI header data.
+ * @lm_ctx: the libmuser context
+ * @id: Device and vendor ID
+ * @ss: Subsystem vendor and device ID
+ * @cc: Class code
+ * @extended: support extended PCI config space
+ */
+int lm_pci_setup_config_hdr(lm_ctx_t *lm_ctx, lm_pci_hdr_id_t id,
+ lm_pci_hdr_ss_t ss, lm_pci_hdr_cc_t cc,
+ bool extended);
- /*
- * Whether an extended PCI configuration space should be created.
- */
- bool extended;
+//TODO: Support variable size capabilities.
+/**
+ * Setup PCI capabilities.
+ * @lm_ctx: the libmuser context
+ * @caps: array of (lm_cap_t *)
+ * *nr_caps: number of elements in @caps
+ */
+int lm_pci_setup_caps(lm_ctx_t *lm_ctx, lm_cap_t **caps, int nr_caps);
- /*
- * Function to call for logging. Optional.
- */
- lm_log_fn_t *log;
+// Region flags.
+#define LM_REG_FLAG_READ (1 << 0)
+#define LM_REG_FLAG_WRITE (1 << 1)
+#define LM_REG_FLAG_MMAP (1 << 2) // TODO: how this relates to IO bar?
+#define LM_REG_FLAG_RW (LM_REG_FLAG_READ | LM_REG_FLAG_WRITE)
+#define LM_REG_FLAG_MEM (1 << 3) // if unset, bar is IO
- /*
- * Log level. Messages above this level are not logged. Optional
- */
- lm_log_lvl_t log_lvl;
+/**
+ * Prototype for region access callback. When a region is accessed, libmuser
+ * calls the previously registered callback with the following arguments:
+ *
+ * @pvt: private data originally passed by lm_create_ctx()
+ * @buf: buffer containing the data to be written or data to be read into
+ * @count: number of bytes being read or written
+ * @offset: byte offset within the region
+ * @is_write: whether or not this is a write
+ *
+ * @returns the number of bytes read or written, or a negative integer on error
+ */
+typedef ssize_t (lm_region_access_cb_t) (void *pvt, char *buf, size_t count,
+ loff_t offset, bool is_write);
- /*
- * PCI configuration.
- */
- lm_pci_info_t pci_info;
+/* FIXME these are PCI regions */
+enum {
+ LM_DEV_BAR0_REG_IDX,
+ LM_DEV_BAR1_REG_IDX,
+ LM_DEV_BAR2_REG_IDX,
+ LM_DEV_BAR3_REG_IDX,
+ LM_DEV_BAR4_REG_IDX,
+ LM_DEV_BAR5_REG_IDX,
+ LM_DEV_ROM_REG_IDX,
+ LM_DEV_CFG_REG_IDX,
+ LM_DEV_VGA_REG_IDX,
+ LM_DEV_NUM_REGS, /* TODO rename to LM_DEV_NUM_PCI_REGS */
+};
- /*
- * Function that is called when the guest resets the device. Optional.
- */
- int (*reset) (void *pvt);
+/**
+ * Set up a region.
+ * @lm_ctx: the libmuser context
+ * @region_idx: region index
+ * @size: size of the region
+ * @region_access: callback function to access region
+ * @flags: region flags
+ * @mmap_areas: mmap areas info
+ * @region_map: callback function to map region
+ */
+int lm_setup_region(lm_ctx_t *lm_ctx, int region_idx, size_t size,
+ lm_region_access_cb_t *region_access, int flags,
+ struct lm_sparse_mmap_areas *mmap_areas,
+ lm_map_region_cb_t *map);
- /*
- * Function that is called when the guest maps a DMA region. Optional.
- */
- void (*map_dma) (void *pvt, uint64_t iova, uint64_t len);
+/*
+ * Callback function that is called when the guest resets the device.
+ * @pvt: private pointer
+ */
+typedef int (lm_reset_cb_t) (void *pvt);
- /*
- * Function that is called when the guest unmaps a DMA region. The device
- * must release all references to that region before the callback returns.
- * This is required if you want to be able to access guest memory.
- */
- int (*unmap_dma) (void *pvt, uint64_t iova);
+/*
+ * Function that is called when the guest maps a DMA region. Optional.
+ * @pvt: private pointer
+ * @iova: iova address
+ * @len: length
+ */
+typedef void (lm_map_dma_cb_t) (void *pvt, uint64_t iova, uint64_t len);
- lm_trans_t trans;
+/*
+ * Function that is called when the guest unmaps a DMA region. The device
+ * must release all references to that region before the callback returns.
+ * This is required if you want to be able to access guest memory.
+ * @pvt: private pointer
+ * @iova: iova address
+ * @len: length
+ */
+typedef int (lm_unmap_dma_cb_t) (void *pvt, uint64_t iova, uint64_t len);
- /*
- * Attaching to the transport is non-blocking. The library will not attempt
- * to attach during context creation time. The caller must then manually
- * call lm_ctx_try_attach(), which is non-blocking, as many times as
- * necessary.
- */
-#define LM_FLAG_ATTACH_NB (1 << 0)
- uint64_t flags;
+/**
+ * Setup device reset callback.
+ * @lm_ctx: the libmuser context
+ * @reset: device reset callback (optional)
+ */
+int lm_setup_device_reset_cb(lm_ctx_t *lm_ctx, lm_reset_cb_t *reset);
- /*
- * PCI capabilities.
- */
- int nr_caps;
- lm_cap_t **caps;
+/**
+ * Setup device DMA map/unmap callbacks.
+ * @lm_ctx: the libmuser context
+ * @map_dma: DMA region map callback (optional)
+ * @unmap_dma: DMA region unmap callback (optional)
+ */
- lm_migration_t migration;
+int lm_setup_device_dma_cb(lm_ctx_t *lm_ctx, lm_map_dma_cb_t *map_dma,
+ lm_unmap_dma_cb_t *unmap_dma);
-} lm_dev_info_t;
+enum lm_dev_irq_type {
+ LM_DEV_INTX_IRQ,
+ LM_DEV_MSI_IRQ,
+ LM_DEV_MSIX_IRQ,
+ LM_DEV_ERR_IRQ,
+ LM_DEV_REQ_IRQ,
+ LM_DEV_NUM_IRQS
+};
/**
- * Creates libmuser context.
- *
- * @dev_info: device information used to create the context.
- *
- * @returns the lm_ctx to be used or NULL on error. Sets errno.
+ * Setup device IRQ counts.
+ * @lm_ctx: the libmuser context
+ * @type: IRQ type (LM_DEV_INTX_IRQ ... LM_DEV_REQ_IRQ)
+ * @count: number of irqs
*/
-lm_ctx_t *
-lm_ctx_create(const lm_dev_info_t *dev_info);
+int lm_setup_device_nr_irqs(lm_ctx_t *lm_ctx, enum lm_dev_irq_type type,
+ uint32_t count);
+
+//TODO: Re-visit once migration support is done.
+/**
+ * Enable support for device migration.
+ * @lm_ctx: the libmuser context
+ * @migration: information required to migrate device
+ */
+int lm_setup_device_migration(lm_ctx_t *lm_ctx, lm_migration_t *migration);
/**
* Destroys libmuser context.
@@ -418,16 +409,6 @@ int
lm_ctx_drive(lm_ctx_t *lm_ctx);
/**
- * Creates and runs an lm_ctx.
- *
- * @dev_info: device information used to create the context
- *
- * @returns 0 on success, -1 on failure. Sets errno.
- */
-int
-lm_ctx_run(lm_dev_info_t *dev_info);
-
-/**
* Polls, without blocking, an lm_ctx. This is an alternative to using
* a thread and making a blocking call to lm_ctx_drive(). Instead, the
* application can periodically poll the context directly from one of
@@ -540,6 +521,7 @@ void
lm_unmap_sg(lm_ctx_t *lm_ctx, const dma_sg_t *sg,
struct iovec *iov, int cnt);
+//FIXME: Remove if we dont need this.
/**
* Returns the PCI region given the position and size of an address span in the
* PCI configuration space.
@@ -579,6 +561,7 @@ lm_dma_write(lm_ctx_t *lm_ctx, dma_sg_t *sg, void *data);
/**
* Returns the non-standard part of the PCI configuration space.
+ * @lm_ctx: the libmuser context
*/
uint8_t *
lm_get_pci_non_std_config_space(lm_ctx_t *lm_ctx);
@@ -588,6 +571,7 @@ lm_get_pci_non_std_config_space(lm_ctx_t *lm_ctx);
* creating the context. Returns 0 on success and -1 on error. If errno is set
* to EAGAIN or EWOULDBLOCK then the transport is not ready to attach to and the
* operation must be retried.
+ * @lm_ctx: the libmuser context
*/
int
lm_ctx_try_attach(lm_ctx_t *lm_ctx);
@@ -595,6 +579,8 @@ lm_ctx_try_attach(lm_ctx_t *lm_ctx);
/*
* FIXME need to make sure that there can be at most one capability with a given
* ID, otherwise this function will return the first one with this ID.
+ * @lm_ctx: the libmuser context
+ * @id: capability id
*/
uint8_t *
lm_ctx_get_cap(lm_ctx_t *lm_ctx, uint8_t id);
diff --git a/lib/cap.c b/lib/cap.c
index bdc07f8..9ace154 100644
--- a/lib/cap.c
+++ b/lib/cap.c
@@ -404,17 +404,18 @@ cap_maybe_access(lm_ctx_t *lm_ctx, struct caps *caps, char *buf, size_t count,
offset - (loff_t)(cap - config_space->raw));
}
-struct caps *
-caps_create(lm_ctx_t *lm_ctx, lm_cap_t **lm_caps, int nr_caps)
+struct caps *caps_create(lm_ctx_t *lm_ctx, lm_cap_t **lm_caps, int nr_caps,
+ int *err)
{
- int i, err = 0;
+ int i;
uint8_t *prev;
uint8_t next;
lm_pci_config_space_t *config_space;
struct caps *caps = NULL;
+ *err = 0;
if (nr_caps <= 0 || nr_caps >= LM_MAX_CAPS) {
- errno = EINVAL;
+ *err = EINVAL;
return NULL;
}
@@ -422,8 +423,8 @@ caps_create(lm_ctx_t *lm_ctx, lm_cap_t **lm_caps, int nr_caps)
caps = calloc(1, sizeof *caps);
if (caps == NULL) {
- err = ENOMEM;
- goto out;
+ *err = ENOMEM;
+ goto err_out;
}
config_space = lm_get_pci_config_space(lm_ctx);
@@ -439,14 +440,14 @@ caps_create(lm_ctx_t *lm_ctx, lm_cap_t **lm_caps, int nr_caps)
size_t size;
if (!cap_is_valid(id)) {
- err = EINVAL;
- goto out;
+ *err = EINVAL;
+ goto err_out;
}
size = cap_handlers[id].size;
if (size == 0) {
- err = EINVAL;
- goto out;
+ *err = EINVAL;
+ goto err_out;
}
caps->caps[i].start = next;
@@ -464,13 +465,11 @@ caps_create(lm_ctx_t *lm_ctx, lm_cap_t **lm_caps, int nr_caps)
}
caps->nr_caps = nr_caps;
-out:
- if (err) {
- free(caps);
- caps = NULL;
- errno = err;
- }
return caps;
+
+err_out:
+ free(caps);
+ return NULL;
}
/* ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab: */
diff --git a/lib/cap.h b/lib/cap.h
index 527b6c0..038562f 100644
--- a/lib/cap.h
+++ b/lib/cap.h
@@ -39,12 +39,9 @@ struct caps;
/**
* Initializes PCI capabilities.
- *
- * Returns <0 on error, 0 if no capabilities are to be added, and >0 if all
- * capabilities have been added.
*/
-struct caps *
-caps_create(lm_ctx_t *lm_ctx, lm_cap_t **caps, int nr_caps);
+struct caps *caps_create(lm_ctx_t *lm_ctx, lm_cap_t **caps, int nr_caps,
+ int *err);
/*
* Conditionally accesses the PCI capabilities. Returns:
diff --git a/lib/dma.c b/lib/dma.c
index e862531..af5c624 100644
--- a/lib/dma.c
+++ b/lib/dma.c
@@ -138,7 +138,7 @@ dma_controller_region_valid(dma_controller_t *dma, dma_addr_t dma_addr,
int
dma_controller_remove_region(dma_controller_t *dma,
dma_addr_t dma_addr, size_t size,
- int (*unmap_dma) (void*, uint64_t), void *data)
+ lm_unmap_dma_cb_t *unmap_dma, void *data)
{
int idx;
dma_memory_region_t *region;
@@ -150,7 +150,7 @@ dma_controller_remove_region(dma_controller_t *dma,
region = &dma->regions[idx];
if (region->dma_addr == dma_addr && region->size == size) {
if (region->refcnt > 0) {
- err = unmap_dma(data, region->dma_addr);
+ err = unmap_dma(data, region->dma_addr, region->size);
if (err != 0) {
lm_log(dma->lm_ctx, LM_ERR,
"failed to notify of removal of DMA region %#lx-%#lx: %s\n",
diff --git a/lib/dma.h b/lib/dma.h
index 3e2aaf2..32140d8 100644
--- a/lib/dma.h
+++ b/lib/dma.h
@@ -73,6 +73,7 @@
#include <sys/mman.h>
#include <stdint.h>
#include <stdlib.h>
+#include <limits.h>
#include "muser.h"
#include "common.h"
@@ -119,7 +120,7 @@ dma_controller_add_region(dma_controller_t *dma,
int
dma_controller_remove_region(dma_controller_t *dma,
dma_addr_t dma_addr, size_t size,
- int (*unmap_dma) (void*, uint64_t), void *data);
+ lm_unmap_dma_cb_t *unmap_dma, void *data);
// Helper for dma_addr_to_sg() slow path.
int
diff --git a/lib/irq.c b/lib/irq.c
index 4fa7513..b334745 100644
--- a/lib/irq.c
+++ b/lib/irq.c
@@ -66,22 +66,22 @@ irqs_disable(lm_ctx_t *lm_ctx, uint32_t index)
case VFIO_PCI_MSI_IRQ_INDEX:
case VFIO_PCI_MSIX_IRQ_INDEX:
lm_log(lm_ctx, LM_DBG, "disabling IRQ %s", vfio_irq_idx_to_str(index));
- lm_ctx->irqs.type = IRQ_NONE;
- for (i = 0; i < lm_ctx->irqs.max_ivs; i++) {
- if (lm_ctx->irqs.efds[i] >= 0) {
- if (close(lm_ctx->irqs.efds[i]) == -1) {
+ lm_ctx->irqs->type = IRQ_NONE;
+ for (i = 0; i < lm_ctx->irqs->max_ivs; i++) {
+ if (lm_ctx->irqs->efds[i] >= 0) {
+ if (close(lm_ctx->irqs->efds[i]) == -1) {
lm_log(lm_ctx, LM_DBG, "failed to close IRQ fd %d: %m",
- lm_ctx->irqs.efds[i]);
+ lm_ctx->irqs->efds[i]);
}
- lm_ctx->irqs.efds[i] = -1;
+ lm_ctx->irqs->efds[i] = -1;
}
}
return 0;
case VFIO_PCI_ERR_IRQ_INDEX:
- irq_efd = &lm_ctx->irqs.err_efd;
+ irq_efd = &lm_ctx->irqs->err_efd;
break;
case VFIO_PCI_REQ_IRQ_INDEX:
- irq_efd = &lm_ctx->irqs.req_efd;
+ irq_efd = &lm_ctx->irqs->req_efd;
break;
}
@@ -109,7 +109,7 @@ irqs_set_data_none(lm_ctx_t *lm_ctx, struct vfio_irq_set *irq_set)
eventfd_t val;
for (i = irq_set->start; i < (irq_set->start + irq_set->count); i++) {
- efd = lm_ctx->irqs.efds[i];
+ efd = lm_ctx->irqs->efds[i];
if (efd >= 0) {
val = 1;
ret = eventfd_write(efd, val);
@@ -135,7 +135,7 @@ irqs_set_data_bool(lm_ctx_t *lm_ctx, struct vfio_irq_set *irq_set, void *data)
assert(data != NULL);
for (i = irq_set->start, d8 = data; i < (irq_set->start + irq_set->count);
i++, d8++) {
- efd = lm_ctx->irqs.efds[i];
+ efd = lm_ctx->irqs->efds[i];
if (efd >= 0 && *d8 == 1) {
val = 1;
ret = eventfd_write(efd, val);
@@ -159,18 +159,18 @@ irqs_set_data_eventfd(lm_ctx_t *lm_ctx, struct vfio_irq_set *irq_set, void *data
assert(data != NULL);
for (i = irq_set->start, d32 = data; i < (irq_set->start + irq_set->count);
i++, d32++) {
- efd = lm_ctx->irqs.efds[i];
+ efd = lm_ctx->irqs->efds[i];
if (efd >= 0) {
if (close(efd) == -1) {
lm_log(lm_ctx, LM_DBG, "failed to close IRQ fd %d: %m", efd);
}
- lm_ctx->irqs.efds[i] = -1;
+ lm_ctx->irqs->efds[i] = -1;
}
if (*d32 >= 0) {
- lm_ctx->irqs.efds[i] = *d32;
+ lm_ctx->irqs->efds[i] = *d32;
}
- lm_log(lm_ctx, LM_DBG, "event fd[%d]=%d", i, lm_ctx->irqs.efds[i]);
+ lm_log(lm_ctx, LM_DBG, "event fd[%d]=%d", i, lm_ctx->irqs->efds[i]);
}
return 0;
@@ -260,8 +260,8 @@ dev_set_irqs_validate(lm_ctx_t *lm_ctx, struct vfio_irq_set *irq_set)
return -EINVAL;
}
// If IRQs are set, ensure index matches what's enabled for the device.
- if ((irq_set->count != 0) && (lm_ctx->irqs.type != IRQ_NONE) &&
- (irq_set->index != LM2VFIO_IRQT(lm_ctx->irqs.type))) {
+ if ((irq_set->count != 0) && (lm_ctx->irqs->type != IRQ_NONE) &&
+ (irq_set->index != LM2VFIO_IRQT(lm_ctx->irqs->type))) {
lm_log(lm_ctx, LM_DBG, "bad IRQ index\n");
return -EINVAL;
}
@@ -369,9 +369,9 @@ static int validate_irq_subindex(lm_ctx_t *lm_ctx, uint32_t subindex)
return -1;
}
- if ((subindex >= lm_ctx->irqs.max_ivs)) {
+ if ((subindex >= lm_ctx->irqs->max_ivs)) {
lm_log(lm_ctx, LM_ERR, "bad IRQ %d, max=%d\n", subindex,
- lm_ctx->irqs.max_ivs);
+ lm_ctx->irqs->max_ivs);
/* FIXME should return -errno */
errno = EINVAL;
return -1;
@@ -391,14 +391,14 @@ lm_irq_trigger(lm_ctx_t *lm_ctx, uint32_t subindex)
return ret;
}
- if (lm_ctx->irqs.efds[subindex] == -1) {
+ if (lm_ctx->irqs->efds[subindex] == -1) {
lm_log(lm_ctx, LM_ERR, "no fd for interrupt %d\n", subindex);
/* FIXME should return -errno */
errno = ENOENT;
return -1;
}
- return eventfd_write(lm_ctx->irqs.efds[subindex], val);
+ return eventfd_write(lm_ctx->irqs->efds[subindex], val);
}
int
diff --git a/lib/migration.c b/lib/migration.c
index 4072614..3493617 100644
--- a/lib/migration.c
+++ b/lib/migration.c
@@ -75,18 +75,19 @@ static const __u32 migr_states[VFIO_DEVICE_STATE_MASK] = {
(1 << VFIO_DEVICE_STATE_RESUMING)
};
-struct migration*
-init_migration(const lm_migration_t * const lm_migr)
+struct migration *init_migration(const lm_migration_t * const lm_migr, int *err)
{
struct migration *migr;
+ *err = 0;
if (lm_migr->size < sizeof(struct vfio_device_migration_info)) {
- errno = EINVAL;
+ *err = EINVAL;
return NULL;
}
migr = calloc(1, sizeof *migr);
if (migr == NULL) {
+ *err = ENOMEM;
return NULL;
}
@@ -98,7 +99,7 @@ init_migration(const lm_migration_t * const lm_migr)
/* FIXME this should be done in lm_ctx_run or poll */
- migr->info.device_state = VFIO_DEVICE_STATE_RUNNING;
+ migr->info.device_state = VFIO_DEVICE_STATE_RUNNING;
migr->callbacks = lm_migr->callbacks;
if (migr->callbacks.transition == NULL ||
@@ -107,9 +108,10 @@ init_migration(const lm_migration_t * const lm_migr)
migr->callbacks.read_data == NULL ||
migr->callbacks.write_data == NULL) {
free(migr);
- errno = EINVAL;
+ *err = EINVAL;
return NULL;
}
+
return migr;
}
diff --git a/lib/migration.h b/lib/migration.h
index a16d5fa..079e6eb 100644
--- a/lib/migration.h
+++ b/lib/migration.h
@@ -43,13 +43,12 @@
#include "muser.h"
-struct migration *
-init_migration(const lm_migration_t * const lm_migr);
+struct migration *init_migration(const lm_migration_t * const lm_migr,
+ int *err);
ssize_t
handle_migration_region_access(lm_ctx_t *lm_ctx, void *pvt,
- struct migration *migr,
- char *buf, size_t count,
+ struct migration *migr, char *buf, size_t count,
loff_t pos, bool is_write);
bool
diff --git a/lib/muser_ctx.c b/lib/muser_ctx.c
index 617a218..a186466 100644
--- a/lib/muser_ctx.c
+++ b/lib/muser_ctx.c
@@ -78,6 +78,12 @@ lm_log(lm_ctx_t *lm_ctx, lm_log_lvl_t lvl, const char *fmt, ...)
errno = _errno;
}
+static inline int ERROR(int err)
+{
+ errno = err;
+ return -1;
+}
+
static size_t
get_vfio_caps_size(bool is_migr_reg, struct lm_sparse_mmap_areas *m)
{
@@ -339,7 +345,7 @@ do_access(lm_ctx_t *lm_ctx, char *buf, uint8_t count, uint64_t pos, bool is_writ
* Checking whether a callback exists might sound expensive however this
* code is not performance critical. This works well when we don't expect a
* region to be used, so the user of the library can simply leave the
- * callback NULL in lm_ctx_create.
+ * callback NULL in lm_create_ctx.
*/
if (lm_ctx->reg_info[idx].fn != NULL) {
return lm_ctx->reg_info[idx].fn(lm_ctx->pvt, buf, count, offset,
@@ -565,7 +571,7 @@ handle_dma_map_or_unmap(lm_ctx_t *lm_ctx, uint32_t size, bool map,
dma_regions[i].addr,
dma_regions[i].addr + dma_regions[i].size - 1);
}
- }
+ }
if (ret < 0) {
return ret;
}
@@ -1000,14 +1006,113 @@ reply:
return ret;
}
+static int prepare_ctx(lm_ctx_t *lm_ctx)
+{
+ lm_reg_info_t *cfg_reg;
+ const lm_reg_info_t zero_reg = { 0 };
+ int err;
+ uint32_t max_ivs = 0, i;
+ size_t size;
+
+ if (lm_ctx->ready != 0) {
+ return 0;
+ }
+
+ // Attach to the muser control device. With LM_FLAG_ATTACH_NB caller is
+ // always expected to call lm_ctx_try_attach().
+ if ((lm_ctx->flags & LM_FLAG_ATTACH_NB) == 0) {
+ lm_ctx->conn_fd = lm_ctx->trans->attach(lm_ctx);
+ if (lm_ctx->conn_fd < 0) {
+ err = lm_ctx->conn_fd;
+ if (err != EINTR) {
+ lm_log(lm_ctx, LM_ERR, "failed to attach: %s",
+ strerror(-err));
+ }
+ return err;
+ }
+ }
+
+ cfg_reg = &lm_ctx->reg_info[LM_DEV_CFG_REG_IDX];
+
+ // Set a default config region if none provided.
+ /* TODO should it be enough to check that the size of region is 0? */
+ if (memcmp(cfg_reg, &zero_reg, sizeof(*cfg_reg)) == 0) {
+ cfg_reg->flags = LM_REG_FLAG_RW;
+ cfg_reg->size = PCI_CFG_SPACE_SIZE;
+ }
+
+ // This maybe allocated by lm_setup_pci_config_hdr().
+ if (lm_ctx->pci_config_space == NULL) {
+ lm_ctx->pci_config_space = calloc(1, cfg_reg->size);
+ if (lm_ctx->pci_config_space == NULL) {
+ return -ENOMEM;
+ }
+ }
+
+ // Set type for region registers.
+ for (i = 0; i < PCI_BARS_NR; i++) {
+ if (!(lm_ctx->reg_info[i].flags & LM_REG_FLAG_MEM)) {
+ lm_ctx->pci_config_space->hdr.bars[i].io.region_type |= 0x1;
+ }
+ }
+
+ if (lm_ctx->irqs == NULL) {
+ /*
+ * FIXME need to check that the number of MSI and MSI-X IRQs are valid
+ * (1, 2, 4, 8, 16 or 32 for MSI and up to 2048 for MSI-X).
+ */
+
+ // Work out highest count of irq vectors.
+ for (i = 0; i < LM_DEV_NUM_IRQS; i++) {
+ if (max_ivs < lm_ctx->irq_count[i]) {
+ max_ivs = lm_ctx->irq_count[i];
+ }
+ }
+
+ //FIXME: assert(max_ivs > 0)?
+ size = sizeof(int) * max_ivs;
+ lm_ctx->irqs = calloc(1, sizeof(lm_irqs_t) + size);
+ if (lm_ctx->irqs == NULL) {
+ // lm_ctx->pci_config_space should be free'ed by lm_destroy_ctx().
+ return -ENOMEM;
+ }
+
+ // Set context irq information.
+ for (i = 0; i < max_ivs; i++) {
+ lm_ctx->irqs->efds[i] = -1;
+ }
+ lm_ctx->irqs->err_efd = -1;
+ lm_ctx->irqs->req_efd = -1;
+ lm_ctx->irqs->type = IRQ_NONE;
+ lm_ctx->irqs->max_ivs = max_ivs;
+
+ // Reflect on the config space whether INTX is available.
+ if (lm_ctx->irq_count[LM_DEV_INTX_IRQ] != 0) {
+ lm_ctx->pci_config_space->hdr.intr.ipin = 1; // INTA#
+ }
+ }
+
+ if (lm_ctx->caps != NULL) {
+ lm_ctx->pci_config_space->hdr.sts.cl = 0x1;
+ lm_ctx->pci_config_space->hdr.cap = PCI_STD_HEADER_SIZEOF;
+ }
+ lm_ctx->ready = 1;
+
+ return 0;
+}
+
int
lm_ctx_drive(lm_ctx_t *lm_ctx)
{
int err;
if (lm_ctx == NULL) {
- errno = EINVAL;
- return -1;
+ return ERROR(EINVAL);
+ }
+
+ err = prepare_ctx(lm_ctx);
+ if (err < 0) {
+ return ERROR(-err);
}
do {
@@ -1026,6 +1131,7 @@ lm_ctx_poll(lm_ctx_t *lm_ctx)
return -ENOTSUP;
}
+ assert(lm_ctx->ready == 1);
err = process_request(lm_ctx);
return err >= 0 ? 0 : err;
@@ -1069,13 +1175,17 @@ lm_ctx_destroy(lm_ctx_t *lm_ctx)
free(lm_ctx->uuid);
free(lm_ctx->pci_config_space);
- lm_ctx->trans->detach(lm_ctx);
+ if (lm_ctx->trans->detach != NULL) {
+ lm_ctx->trans->detach(lm_ctx);
+ }
if (lm_ctx->dma != NULL) {
dma_controller_destroy(lm_ctx->dma);
}
free_sparse_mmap_areas(lm_ctx);
+ free(lm_ctx->reg_info);
free(lm_ctx->caps);
free(lm_ctx->migration);
+ free(lm_ctx->irqs);
free(lm_ctx);
// FIXME: Maybe close any open irq efds? Unmap stuff?
}
@@ -1096,286 +1206,294 @@ copy_sparse_mmap_area(struct lm_sparse_mmap_areas *src)
return dest;
}
-static int
-copy_sparse_mmap_areas(lm_reg_info_t *dst, const lm_reg_info_t *src)
+int
+lm_ctx_try_attach(lm_ctx_t *lm_ctx)
{
- int i;
+ int err;
- assert(dst != NULL);
- assert(src != NULL);
+ assert(lm_ctx != NULL);
- for (i = 0; i < LM_DEV_NUM_REGS; i++) {
- if (!src[i].mmap_areas)
- continue;
- dst[i].mmap_areas = copy_sparse_mmap_area(src[i].mmap_areas);
- if (dst[i].mmap_areas == NULL) {
- return -ENOMEM;
- }
+ if ((lm_ctx->flags & LM_FLAG_ATTACH_NB) == 0) {
+ return ERROR(EINVAL);
}
- return 0;
+ err = prepare_ctx(lm_ctx);
+ if (err < 0) {
+ return ERROR(-err);
+ }
+
+ return lm_ctx->trans->attach(lm_ctx);
}
-static int
-pci_config_setup(lm_ctx_t *lm_ctx, const lm_dev_info_t *dev_info)
+lm_ctx_t *lm_create_ctx(lm_trans_t trans, const char *path, int flags,
+ lm_log_fn_t *log, lm_log_lvl_t log_lvl, void *pvt)
{
- lm_reg_info_t *cfg_reg;
- const lm_reg_info_t zero_reg = { 0 };
- int i;
-
- assert(lm_ctx != NULL);
- assert(dev_info != NULL);
-
- // Convenience pointer.
- cfg_reg = &lm_ctx->reg_info[LM_DEV_CFG_REG_IDX];
+ lm_ctx_t *lm_ctx = NULL;
+ int err = 0;
- // Set a default config region if none provided.
- /* TODO should it be enough to check that the size of region is 0? */
- if (memcmp(cfg_reg, &zero_reg, sizeof(*cfg_reg)) == 0) {
- cfg_reg->flags = LM_REG_FLAG_RW;
- cfg_reg->size = PCI_CFG_SPACE_SIZE;
- } else {
- // Validate the config region provided.
- if ((cfg_reg->flags != LM_REG_FLAG_RW) ||
- ((cfg_reg->size != PCI_CFG_SPACE_SIZE) &&
- (cfg_reg->size != PCI_CFG_SPACE_EXP_SIZE))) {
- return EINVAL;
- }
+ if (trans != LM_TRANS_SOCK) {
+ errno = ENOTSUP;
+ return NULL;
}
- // Allocate a buffer for the config space.
- lm_ctx->pci_config_space = calloc(1, cfg_reg->size);
- if (lm_ctx->pci_config_space == NULL) {
- return -1;
+ lm_ctx = calloc(1, sizeof(lm_ctx_t));
+ if (lm_ctx == NULL) {
+ return NULL;
}
+ lm_ctx->trans = &sock_transport_ops;
- // Bounce misc PCI basic header data.
- lm_ctx->pci_config_space->hdr.id = dev_info->pci_info.id;
- lm_ctx->pci_config_space->hdr.cc = dev_info->pci_info.cc;
- lm_ctx->pci_config_space->hdr.ss = dev_info->pci_info.ss;
+ //FIXME: Validate arguments.
+ // Set other context data.
+ lm_ctx->pvt = pvt;
+ lm_ctx->log = log;
+ lm_ctx->log_lvl = log_lvl;
+ lm_ctx->flags = flags;
- // Reflect on the config space whether INTX is available.
- if (dev_info->pci_info.irq_count[LM_DEV_INTX_IRQ_IDX] != 0) {
- lm_ctx->pci_config_space->hdr.intr.ipin = 1; // INTA#
+ lm_ctx->uuid = strdup(path);
+ if (lm_ctx->uuid == NULL) {
+ err = errno;
+ goto out;
}
- // Set type for region registers.
- for (i = 0; i < PCI_BARS_NR; i++) {
- if ((dev_info->pci_info.reg_info[i].flags & LM_REG_FLAG_MEM) == 0) {
- lm_ctx->pci_config_space->hdr.bars[i].io.region_type |= 0x1;
- }
+ /*
+ * FIXME: Now we always allocate for migration region. Check if its better
+ * to seperate migration region from standard regions in lm_ctx.reg_info
+ * and move it into lm_ctx.migration.
+ */
+ lm_ctx->nr_regions = LM_DEV_NUM_REGS + 1;
+ lm_ctx->reg_info = calloc(lm_ctx->nr_regions, sizeof *lm_ctx->reg_info);
+ if (lm_ctx->reg_info == NULL) {
+ err = -ENOMEM;
+ goto out;
}
- if (dev_info->migration.size != 0) {
- const lm_migration_t *migr = &dev_info->migration;
-
- /* FIXME hacky, find a more robust way to allocate a region index */
- lm_ctx->migr_reg = &lm_ctx->reg_info[(lm_ctx->nr_regions - 1)];
- lm_ctx->migr_reg->flags = LM_REG_FLAG_RW;
- lm_ctx->migr_reg->size = sizeof(struct vfio_device_migration_info) + dev_info->migration.size;
- /* FIXME is there are sparse areas need to setup flags accordingly */
- lm_ctx->migr_reg->mmap_areas = copy_sparse_mmap_area(migr->mmap_areas);
- lm_ctx->migration = init_migration(&dev_info->migration);
- if (lm_ctx->migration == NULL) {
- goto err;
+ if (lm_ctx->trans->init != NULL) {
+ err = lm_ctx->trans->init(lm_ctx);
+ if (err < 0) {
+ goto out;
}
+ lm_ctx->fd = err;
}
+ err = 0;
- // Initialise PCI capabilities.
- if (dev_info->nr_caps > 0) {
- lm_ctx->caps = caps_create(lm_ctx, dev_info->caps, dev_info->nr_caps);
- if (lm_ctx->caps == NULL) {
- /* FIXME is this safe? lm_ctx might not have been fully initialized */
- lm_log(lm_ctx, LM_ERR, "failed to create PCI capabilities: %m\n");
- goto err;
+out:
+ if (err != 0) {
+ if (lm_ctx != NULL) {
+ lm_ctx_destroy(lm_ctx);
+ lm_ctx = NULL;
}
-
- lm_ctx->pci_config_space->hdr.sts.cl = 0x1;
- lm_ctx->pci_config_space->hdr.cap = PCI_STD_HEADER_SIZEOF;
+ errno = -err;
}
- return 0;
-
-err:
- free(lm_ctx->pci_config_space);
- lm_ctx->pci_config_space = NULL;
-
- return -1;
+ return lm_ctx;
}
-static void
-pci_info_bounce(lm_ctx_t *lm_ctx, const lm_pci_info_t *pci_info)
+int lm_pci_setup_config_hdr(lm_ctx_t *lm_ctx, lm_pci_hdr_id_t id,
+ lm_pci_hdr_ss_t ss, lm_pci_hdr_cc_t cc,
+ UNUSED bool extended)
{
- int i;
+ lm_pci_config_space_t *config_space;
assert(lm_ctx != NULL);
- assert(pci_info != NULL);
- for (i = 0; i < LM_DEV_NUM_IRQS; i++) {
- lm_ctx->irq_count[i] = pci_info->irq_count[i];
+ if (lm_ctx->pci_config_space != NULL) {
+ lm_log(lm_ctx, LM_ERR, "pci header already setup");
+ return ERROR(EEXIST);
}
- for (i = 0; i < LM_DEV_NUM_REGS; i++) {
- lm_ctx->reg_info[i].flags = pci_info->reg_info[i].flags;
- lm_ctx->reg_info[i].size = pci_info->reg_info[i].size;
- lm_ctx->reg_info[i].fn = pci_info->reg_info[i].fn;
- lm_ctx->reg_info[i].map = pci_info->reg_info[i].map;
- // Sparse map data copied by copy_sparse_mmap_areas().
+ /* TODO: supported extended PCI config space. */
+
+ // Allocate a buffer for the config space.
+ config_space = calloc(1, PCI_CFG_SPACE_SIZE);
+ if (config_space == NULL) {
+ return ERROR(ENOMEM);
}
+
+ config_space->hdr.id = id;
+ config_space->hdr.ss = ss;
+ config_space->hdr.cc = cc;
+ lm_ctx->pci_config_space = config_space;
+
+ return 0;
}
-int
-lm_ctx_try_attach(lm_ctx_t *lm_ctx)
+int lm_pci_setup_caps(lm_ctx_t *lm_ctx, lm_cap_t **caps, int nr_caps)
{
+ int ret;
+
assert(lm_ctx != NULL);
- if ((lm_ctx->flags & LM_FLAG_ATTACH_NB) == 0) {
- errno = EINVAL;
- return -1;
+ if (lm_ctx->caps != NULL) {
+ lm_log(lm_ctx, LM_ERR, "capabilities are already setup");
+ return ERROR(EEXIST);
}
- return lm_ctx->trans->attach(lm_ctx);
-}
-
-lm_ctx_t *
-lm_ctx_create(const lm_dev_info_t *dev_info)
-{
- lm_ctx_t *lm_ctx = NULL;
- uint32_t max_ivs = 0;
- uint32_t i;
- int err = 0;
- size_t size = 0;
- if (dev_info == NULL) {
- errno = EINVAL;
- return NULL;
+ if (caps == NULL || nr_caps == 0) {
+ lm_log(lm_ctx, LM_ERR, "Invalid args passed");
+ return ERROR(EINVAL);
}
- if (dev_info->trans != LM_TRANS_SOCK) {
- errno = ENOTSUP;
- return NULL;
+ lm_ctx->caps = caps_create(lm_ctx, caps, nr_caps, &ret);
+ if (lm_ctx->caps == NULL) {
+ lm_log(lm_ctx, LM_ERR, "failed to create PCI capabilities: %s",
+ strerror(ret));
+ return ERROR(ret);
}
- /*
- * FIXME need to check that the number of MSI and MSI-X IRQs are valid
- * (1, 2, 4, 8, 16 or 32 for MSI and up to 2048 for MSI-X).
- */
+ return 0;
+}
- // Work out highest count of irq vectors.
- for (i = 0; i < LM_DEV_NUM_IRQS; i++) {
- if (max_ivs < dev_info->pci_info.irq_count[i]) {
- max_ivs = dev_info->pci_info.irq_count[i];
- }
- }
+static int
+copy_sparse_mmap_areas(lm_reg_info_t *reg_info,
+ struct lm_sparse_mmap_areas *mmap_areas)
+{
+ int nr_mmap_areas;
+ size_t size;
- // Allocate an lm_ctx with room for the irq vectors.
- size += sizeof(int) * max_ivs;
- lm_ctx = calloc(1, sizeof(lm_ctx_t) + size);
- if (lm_ctx == NULL) {
- return NULL;
+ if (mmap_areas == NULL) {
+ return 0;
}
- lm_ctx->trans = &sock_transport_ops;
- // Set context irq information.
- for (i = 0; i < max_ivs; i++) {
- lm_ctx->irqs.efds[i] = -1;
+ nr_mmap_areas = mmap_areas->nr_mmap_areas;
+ size = sizeof(*mmap_areas) + (nr_mmap_areas * sizeof(struct lm_mmap_area));
+ reg_info->mmap_areas = calloc(1, size);
+ if (reg_info->mmap_areas == NULL) {
+ return -ENOMEM;
}
- lm_ctx->irqs.err_efd = -1;
- lm_ctx->irqs.req_efd = -1;
- lm_ctx->irqs.type = IRQ_NONE;
- lm_ctx->irqs.max_ivs = max_ivs;
- // Set other context data.
- lm_ctx->pvt = dev_info->pvt;
- lm_ctx->log = dev_info->log;
- lm_ctx->log_lvl = dev_info->log_lvl;
- lm_ctx->reset = dev_info->reset;
- lm_ctx->flags = dev_info->flags;
+ memcpy(reg_info->mmap_areas, mmap_areas, size);
- lm_ctx->uuid = strdup(dev_info->uuid);
- if (lm_ctx->uuid == NULL) {
- err = errno;
- goto out;
- }
+ return 0;
+}
- lm_ctx->nr_regions = LM_DEV_NUM_REGS;
- if (dev_info->migration.size > 0) {
- lm_ctx->nr_regions += 1;
- }
- lm_ctx->reg_info = calloc(lm_ctx->nr_regions, sizeof *lm_ctx->reg_info);
- if (lm_ctx->reg_info == NULL) {
- err = -ENOMEM;
- goto out;
- }
+static inline bool is_valid_pci_config_space_region(int flags, size_t size)
+{
+ return flags == LM_REG_FLAG_RW && (size == PCI_CFG_SPACE_SIZE
+ || size == PCI_CFG_SPACE_EXP_SIZE);
+}
- // Bounce the provided pci_info into the context.
- pci_info_bounce(lm_ctx, &dev_info->pci_info);
+int lm_setup_region(lm_ctx_t *lm_ctx, int region_idx, size_t size,
+ lm_region_access_cb_t *region_access, int flags,
+ struct lm_sparse_mmap_areas *mmap_areas,
+ lm_map_region_cb_t *map)
+{
+ int ret;
- /*
- * FIXME above memcpy also copies reg_info->mmap_areas. If pci_config_setup
- * fails then we try to free reg_info->mmap_areas, which is wrong because
- * this is a user pointer.
- */
- for (i = 0; i < lm_ctx->nr_regions; i++) {
- lm_ctx->reg_info[i].mmap_areas = NULL;
- }
+ assert(lm_ctx != NULL);
- // Setup the PCI config space for this context.
- err = pci_config_setup(lm_ctx, dev_info);
- if (err != 0) {
- goto out;
- }
+ switch(region_idx) {
+ case LM_DEV_BAR0_REG_IDX ... LM_DEV_VGA_REG_IDX:
+ // Validate the config region provided.
+ if (region_idx == LM_DEV_CFG_REG_IDX &&
+ !is_valid_pci_config_space_region(flags, size)) {
+ return ERROR(EINVAL);
+ }
- // Bounce info for the sparse mmap areas.
- err = copy_sparse_mmap_areas(lm_ctx->reg_info, dev_info->pci_info.reg_info);
- if (err) {
- goto out;
- }
+ lm_ctx->reg_info[region_idx].flags = flags;
+ lm_ctx->reg_info[region_idx].size = size;
+ lm_ctx->reg_info[region_idx].fn = region_access;
- if (lm_ctx->trans->init != NULL) {
- err = lm_ctx->trans->init(lm_ctx);
- if (err < 0) {
- goto out;
+ if (map != NULL) {
+ lm_ctx->reg_info[region_idx].map = map;
}
- lm_ctx->fd = err;
+ if (mmap_areas) {
+ ret = copy_sparse_mmap_areas(&lm_ctx->reg_info[region_idx],
+ mmap_areas);
+ if (ret < 0) {
+ return ERROR(-ret);
+ }
+ }
+ break;
+ default:
+ lm_log(lm_ctx, LM_ERR, "Invalid region index %d", region_idx);
+ return ERROR(EINVAL);
}
- err = 0;
- lm_ctx->map_dma = dev_info->map_dma;
- lm_ctx->unmap_dma = dev_info->unmap_dma;
+ return 0;
+}
+
+int lm_setup_device_reset_cb(lm_ctx_t *lm_ctx, lm_reset_cb_t *reset)
+{
+
+ assert(lm_ctx != NULL);
+ lm_ctx->reset = reset;
+
+ return 0;
+}
+
+int lm_setup_device_dma_cb(lm_ctx_t *lm_ctx, lm_map_dma_cb_t *map_dma,
+ lm_unmap_dma_cb_t *unmap_dma)
+{
+
+ assert(lm_ctx != NULL);
+
+ lm_ctx->map_dma = map_dma;
+ lm_ctx->unmap_dma = unmap_dma;
// Create the internal DMA controller.
if (lm_ctx->unmap_dma != NULL) {
lm_ctx->dma = dma_controller_create(lm_ctx, LM_DMA_REGIONS);
if (lm_ctx->dma == NULL) {
- err = errno;
- goto out;
+ return ERROR(ENOMEM);
}
}
- // Attach to the muser control device. With LM_FLAG_ATTACH_NB caller is
- // always expected to call lm_ctx_try_attach().
- if ((dev_info->flags & LM_FLAG_ATTACH_NB) == 0) {
- lm_ctx->conn_fd = lm_ctx->trans->attach(lm_ctx);
- if (lm_ctx->conn_fd < 0) {
- err = lm_ctx->conn_fd;
- if (err != EINTR) {
- lm_log(lm_ctx, LM_ERR, "failed to attach: %s",
- strerror(-err));
- }
- goto out;
- }
+ return 0;
+}
+
+int lm_setup_device_nr_irqs(lm_ctx_t *lm_ctx, enum lm_dev_irq_type type,
+ uint32_t count)
+{
+
+ assert(lm_ctx != NULL);
+
+ if (type < LM_DEV_INTX_IRQ || type > LM_DEV_REQ_IRQ) {
+ lm_log(lm_ctx, LM_ERR, "Invalid IRQ index %d, should be between "
+ "(%d to %d)", type, LM_DEV_INTX_IRQ,
+ LM_DEV_REQ_IRQ);
+ return ERROR(EINVAL);
}
-out:
- if (err != 0) {
- if (lm_ctx != NULL) {
- lm_ctx_destroy(lm_ctx);
- lm_ctx = NULL;
- }
- errno = -err;
+ lm_ctx->irq_count[type] = count;
+
+ return 0;
+}
+
+int lm_setup_device_migration(lm_ctx_t *lm_ctx, lm_migration_t *migration)
+{
+ lm_reg_info_t *migr_reg;
+ int ret = 0;
+
+ assert(lm_ctx != NULL);
+
+ //FIXME: Validate args.
+
+ if (lm_ctx->migr_reg != NULL) {
+ lm_log(lm_ctx, LM_ERR, "device migration is already setup");
+ return ERROR(EEXIST);
}
- return lm_ctx;
+ /* FIXME hacky, find a more robust way to allocate a region index */
+ migr_reg = &lm_ctx->reg_info[(lm_ctx->nr_regions - 1)];
+
+ /* FIXME: Are there sparse areas need to be setup flags accordingly */
+ ret = copy_sparse_mmap_areas(migr_reg, migration->mmap_areas);
+ if (ret < 0) {
+ return ERROR(-ret);
+ }
+
+ migr_reg->flags = LM_REG_FLAG_RW;
+ migr_reg->size = sizeof(struct vfio_device_migration_info) + migration->size;
+
+ lm_ctx->migration = init_migration(migration, &ret);
+ if (lm_ctx->migration == NULL) {
+ lm_log(lm_ctx, LM_ERR, "failed to initialize device migration");
+ free(migr_reg->mmap_areas);
+ return ERROR(ret);
+ }
+ lm_ctx->migr_reg = migr_reg;
+
+ return 0;
}
/*
@@ -1438,20 +1556,6 @@ lm_unmap_sg(lm_ctx_t *lm_ctx, const dma_sg_t *sg, struct iovec *iov, int cnt)
return dma_unmap_sg(lm_ctx->dma, sg, iov, cnt);
}
-int
-lm_ctx_run(lm_dev_info_t *dev_info)
-{
- int ret;
-
- lm_ctx_t *lm_ctx = lm_ctx_create(dev_info);
- if (lm_ctx == NULL) {
- return -1;
- }
- ret = lm_ctx_drive(lm_ctx);
- lm_ctx_destroy(lm_ctx);
- return ret;
-}
-
uint8_t *
lm_ctx_get_cap(lm_ctx_t *lm_ctx, uint8_t id)
{
diff --git a/lib/muser_priv.h b/lib/muser_priv.h
index 5ca63a9..7fa2c2e 100644
--- a/lib/muser_priv.h
+++ b/lib/muser_priv.h
@@ -66,15 +66,41 @@ typedef struct {
struct migration;
+typedef struct {
+
+ /*
+ * Region flags, see LM_REG_FLAG_XXX above.
+ */
+ uint32_t flags;
+
+ /*
+ * Size of the region.
+ */
+ uint32_t size;
+
+ /*
+ * Callback function that is called when the region is read or written.
+ * Note that the memory of the region is owned by the user, except for the
+ * standard header (first 64 bytes) of the PCI configuration space.
+ */
+ lm_region_access_cb_t *fn;
+
+ /*
+ * Callback function that is called when the region is memory mapped.
+ * Required if LM_REG_FLAG_MEM is set, otherwise ignored.
+ */
+ lm_map_region_cb_t *map;
+ struct lm_sparse_mmap_areas *mmap_areas; /* sparse mmap areas */
+} lm_reg_info_t;
+
struct lm_ctx {
void *pvt;
dma_controller_t *dma;
int fd;
int conn_fd;
- int (*reset) (void *pvt);
+ lm_reset_cb_t *reset;
lm_log_lvl_t log_lvl;
lm_log_fn_t *log;
- uint32_t irq_count[LM_DEV_NUM_IRQS];
size_t nr_regions;
lm_reg_info_t *reg_info;
lm_pci_config_space_t *pci_config_space;
@@ -82,8 +108,8 @@ struct lm_ctx {
struct caps *caps;
uint64_t flags;
char *uuid;
- void (*map_dma) (void *pvt, uint64_t iova, uint64_t len);
- int (*unmap_dma) (void *pvt, uint64_t iova);
+ lm_map_dma_cb_t *map_dma;
+ lm_unmap_dma_cb_t *unmap_dma;
/* TODO there should be a void * variable to store transport-specific stuff */
/* LM_TRANS_SOCK */
@@ -94,7 +120,9 @@ struct lm_ctx {
lm_reg_info_t *migr_reg;
struct migration *migration;
- lm_irqs_t irqs; /* XXX must be last */
+ uint32_t irq_count[LM_DEV_NUM_IRQS];
+ lm_irqs_t *irqs;
+ int ready;
};
int
diff --git a/samples/client.c b/samples/client.c
index 91ce8a0..fa8e040 100644
--- a/samples/client.c
+++ b/samples/client.c
@@ -50,11 +50,11 @@
#define CLIENT_MAX_FDS (32)
static char *irq_to_str[] = {
- [LM_DEV_INTX_IRQ_IDX] = "INTx",
- [LM_DEV_MSI_IRQ_IDX] = "MSI",
- [LM_DEV_MSIX_IRQ_IDX] = "MSI-X",
- [LM_DEV_ERR_IRQ_INDEX] = "ERR",
- [LM_DEV_REQ_IRQ_INDEX] = "REQ"
+ [LM_DEV_INTX_IRQ] = "INTx",
+ [LM_DEV_MSI_IRQ] = "MSI",
+ [LM_DEV_MSIX_IRQ] = "MSI-X",
+ [LM_DEV_ERR_IRQ] = "ERR",
+ [LM_DEV_REQ_IRQ] = "REQ"
};
void
diff --git a/samples/gpio-pci-idio-16.c b/samples/gpio-pci-idio-16.c
index d4e485f..7dd8ec6 100644
--- a/samples/gpio-pci-idio-16.c
+++ b/samples/gpio-pci-idio-16.c
@@ -75,6 +75,9 @@ main(int argc, char *argv[])
char opt;
struct sigaction act = {.sa_handler = _sa_handler};
lm_ctx_t *lm_ctx;
+ lm_pci_hdr_id_t id = {.vid = 0x494F, .did = 0x0DC8};
+ lm_pci_hdr_ss_t ss = {0};
+ lm_pci_hdr_cc_t cc = {0};
while ((opt = getopt(argc, argv, "v")) != -1) {
switch (opt) {
@@ -91,27 +94,13 @@ main(int argc, char *argv[])
errx(EXIT_FAILURE, "missing MUSER socket path");
}
- lm_dev_info_t dev_info = {
- .trans = LM_TRANS_SOCK,
- .log = verbose ? _log : NULL,
- .log_lvl = LM_DBG,
- .pci_info = {
- .id = {.vid = 0x494F, .did = 0x0DC8 },
- .reg_info[LM_DEV_BAR2_REG_IDX] = {
- .flags = LM_REG_FLAG_RW,
- .size = 0x100,
- .fn = &bar2_access
- },
- },
- .uuid = argv[optind],
- };
-
sigemptyset(&act.sa_mask);
if (sigaction(SIGINT, &act, NULL) == -1) {
err(EXIT_FAILURE, "failed to register signal handler");
}
- lm_ctx = lm_ctx_create(&dev_info);
+ lm_ctx = lm_create_ctx(LM_TRANS_SOCK, argv[optind], 0,
+ verbose ? _log : NULL, LM_DBG, NULL);
if (lm_ctx == NULL) {
if (errno == EINTR) {
printf("interrupted\n");
@@ -120,16 +109,31 @@ main(int argc, char *argv[])
err(EXIT_FAILURE, "failed to initialize device emulation");
}
- ret = lm_ctx_drive(lm_ctx);
+ ret = lm_pci_setup_config_hdr(lm_ctx, id, ss, cc, false);
+ if (ret < 0) {
+ fprintf(stderr, "failed to setup pci header\n");
+ goto out;
+ }
+ ret = lm_setup_region(lm_ctx, LM_DEV_BAR2_REG_IDX, 0x100, &bar2_access,
+ LM_REG_FLAG_RW, NULL, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "failed to setup region\n");
+ goto out;
+ }
+
+ ret = lm_ctx_drive(lm_ctx);
if (ret != 0) {
if (ret != -ENOTCONN && ret != -EINTR) {
- err(EXIT_FAILURE, "failed to realize device emulation");
+ fprintf(stderr, "failed to realize device emulation\n");
+ goto out;
}
+ ret = 0;
}
+out:
lm_ctx_destroy(lm_ctx);
- return EXIT_SUCCESS;
+ return ret;
}
/* ex: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab: */
diff --git a/samples/null.c b/samples/null.c
index 97c3fcf..5d52893 100644
--- a/samples/null.c
+++ b/samples/null.c
@@ -78,9 +78,8 @@ int main(int argc, char **argv)
errx(EXIT_FAILURE, "missing MUSER socket path");
}
- lm_dev_info_t dev_info = {.uuid = argv[1], .log = null_log, .log_lvl = LM_DBG };
-
- lm_ctx_t *lm_ctx = lm_ctx_create(&dev_info);
+ lm_ctx_t *lm_ctx = lm_create_ctx(LM_TRANS_SOCK, argv[1], 0, null_log,
+ LM_DBG, NULL);
if (lm_ctx == NULL) {
err(EXIT_FAILURE, "failed to create libmuser context");
}
diff --git a/samples/server.c b/samples/server.c
index 432d0ce..f39452c 100644
--- a/samples/server.c
+++ b/samples/server.c
@@ -154,13 +154,14 @@ static void map_dma(void *pvt, uint64_t iova, uint64_t len)
server_data->regions[idx].len = len;
}
-static int unmap_dma(void *pvt, uint64_t iova)
+static int unmap_dma(void *pvt, uint64_t iova, uint64_t len)
{
struct server_data *server_data = pvt;
int idx;
for (idx = 0; idx < NR_DMA_REGIONS; idx++) {
- if (server_data->regions[idx].addr == iova) {
+ if (server_data->regions[idx].addr == iova &&
+ server_data->regions[idx].len == len) {
server_data->regions[idx].addr = 0;
server_data->regions[idx].len = 0;
return 0;
@@ -382,6 +383,9 @@ int main(int argc, char *argv[])
int nr_sparse_areas = 2, size = 1024, i;
struct lm_sparse_mmap_areas *sparse_areas;
lm_ctx_t *lm_ctx;
+ lm_pci_hdr_id_t id = {.raw = 0xdeadbeef};
+ lm_pci_hdr_ss_t ss = {.raw = 0xcafebabe};
+ lm_pci_hdr_cc_t cc = {.pi = 0xab, .scc = 0xcd, .bcc = 0xef};
while ((opt = getopt(argc, argv, "v")) != -1) {
switch (opt) {
@@ -413,62 +417,71 @@ int main(int argc, char *argv[])
sparse_areas->areas[i].start += size;
sparse_areas->areas[i].size = size;
}
-
- lm_dev_info_t dev_info = {
- .trans = LM_TRANS_SOCK,
- .log = verbose ? _log : NULL,
- .log_lvl = LM_DBG,
- .pci_info = {
- .id.raw = 0xdeadbeef,
- .ss.raw = 0xcafebabe,
- .cc = {.pi = 0xab, .scc = 0xcd, .bcc = 0xef},
- .reg_info[LM_DEV_BAR0_REG_IDX] = {
- .flags = LM_REG_FLAG_RW,
- .size = sizeof(time_t),
- .fn = &bar0_access
- },
- .reg_info[LM_DEV_BAR1_REG_IDX] = {
- .flags = LM_REG_FLAG_RW,
- .size = sysconf(_SC_PAGESIZE),
- .fn = &bar1_access,
- .mmap_areas = sparse_areas,
- .map = map_area
- },
- .irq_count[LM_DEV_INTX_IRQ_IDX] = 1,
- },
- .uuid = argv[optind],
- .reset = device_reset,
- .map_dma = map_dma,
- .unmap_dma = unmap_dma,
- .pvt = &server_data,
- .migration = {
- .size = server_data.migration.migr_data_len,
- .mmap_areas = sparse_areas,
- .callbacks = {
- .transition = &migration_device_state_transition,
- .get_pending_bytes = &migration_get_pending_bytes,
- .prepare_data = &migration_prepare_data,
- .read_data = &migration_read_data,
- .data_written = &migration_data_written,
- .write_data = &migration_write_data
- }
- }
- };
-
sigemptyset(&act.sa_mask);
if (sigaction(SIGALRM, &act, NULL) == -1) {
err(EXIT_FAILURE, "failed to register signal handler");
}
- server_data.lm_ctx = lm_ctx = lm_ctx_create(&dev_info);
+ server_data.lm_ctx = lm_ctx = lm_create_ctx(LM_TRANS_SOCK, argv[optind], 0,
+ verbose ? _log : NULL, LM_DBG, &server_data);
if (lm_ctx == NULL) {
err(EXIT_FAILURE, "failed to initialize device emulation\n");
}
+ ret = lm_pci_setup_config_hdr(lm_ctx, id, ss, cc, false);
+ if (ret < 0) {
+ err(EXIT_FAILURE, "failed to setup PCI header");
+ }
+
+ ret = lm_setup_region(lm_ctx, LM_DEV_BAR0_REG_IDX, sizeof(time_t),
+ &bar0_access, LM_REG_FLAG_RW, NULL, NULL);
+ if (ret < 0) {
+ err(EXIT_FAILURE, "failed to setup BAR0 region");
+ }
+
+ ret = lm_setup_region(lm_ctx, LM_DEV_BAR1_REG_IDX, sysconf(_SC_PAGESIZE),
+ &bar1_access, LM_REG_FLAG_RW, sparse_areas, map_area);
+ if (ret < 0) {
+ err(EXIT_FAILURE, "failed to setup BAR1 region");
+ }
+
+ ret = lm_setup_device_reset_cb(lm_ctx, &device_reset);
+ if (ret < 0) {
+ err(EXIT_FAILURE, "failed to setup device reset callbacks");
+ }
+
+ ret = lm_setup_device_dma_cb(lm_ctx, &map_dma, &unmap_dma);
+ if (ret < 0) {
+ err(EXIT_FAILURE, "failed to setup device DMA callbacks");
+ }
+
+ ret = lm_setup_device_nr_irqs(lm_ctx, LM_DEV_INTX_IRQ, 1);
+ if (ret < 0) {
+ err(EXIT_FAILURE, "failed to setup irq counts");
+ }
+
+ lm_migration_t migration = {
+ .size = server_data.migration.migr_data_len,
+ .mmap_areas = sparse_areas,
+ .callbacks = {
+ .transition = &migration_device_state_transition,
+ .get_pending_bytes = &migration_get_pending_bytes,
+ .prepare_data = &migration_prepare_data,
+ .read_data = &migration_read_data,
+ .data_written = &migration_data_written,
+ .write_data = &migration_write_data
+ }
+ };
+
+ ret = lm_setup_device_migration(lm_ctx, &migration);
+ if (ret < 0) {
+ err(EXIT_FAILURE, "failed to setup device migration");
+ }
+
server_data.migration.migr_data = aligned_alloc(server_data.migration.migr_data_len,
server_data.migration.migr_data_len);
if (server_data.migration.migr_data == NULL) {
- errx(EXIT_FAILURE, "failed to allocate migration data");
+ err(EXIT_FAILURE, "failed to allocate migration data");
}
do {