aboutsummaryrefslogtreecommitdiff
path: root/src/include/ipxe/dma.h
diff options
context:
space:
mode:
authorMichael Brown <mcb30@ipxe.org>2020-11-04 15:18:49 +0000
committerMichael Brown <mcb30@ipxe.org>2020-11-05 20:03:50 +0000
commitdda03c884d70d18546bb2d02f92acb4c4da28fc8 (patch)
tree775637d2c67340a945df274ff5da3d88353d43c8 /src/include/ipxe/dma.h
parentbe1c87b72237f633c4f4b05bcb133acf2967d788 (diff)
downloadipxe-dda03c884d70d18546bb2d02f92acb4c4da28fc8.zip
ipxe-dda03c884d70d18546bb2d02f92acb4c4da28fc8.tar.gz
ipxe-dda03c884d70d18546bb2d02f92acb4c4da28fc8.tar.bz2
[dma] Define a DMA API to allow for non-flat device address spaces
iPXE currently assumes that DMA-capable devices can directly address physical memory using host addresses. This assumption fails when using an IOMMU. Define an internal DMA API with two implementations: a "flat" implementation for use in legacy BIOS or other environments in which flat physical addressing is guaranteed to be used and all allocated physical addresses are guaranteed to be within a 32-bit address space, and an "operations-based" implementation for use in UEFI or other environments in which DMA mapping may require bus-specific handling. The purpose of the fully inlined "flat" implementation is to allow the trivial identity DMA mappings to be optimised out at build time, thereby avoiding an increase in code size for legacy BIOS builds. Signed-off-by: Michael Brown <mcb30@ipxe.org>
Diffstat (limited to 'src/include/ipxe/dma.h')
-rw-r--r--src/include/ipxe/dma.h334
1 files changed, 334 insertions, 0 deletions
diff --git a/src/include/ipxe/dma.h b/src/include/ipxe/dma.h
new file mode 100644
index 0000000..d3db061
--- /dev/null
+++ b/src/include/ipxe/dma.h
@@ -0,0 +1,334 @@
+#ifndef _IPXE_DMA_H
+#define _IPXE_DMA_H
+
+/** @file
+ *
+ * DMA mappings
+ *
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
+
+#include <stdint.h>
+#include <ipxe/api.h>
+#include <ipxe/io.h>
+#include <ipxe/iobuf.h>
+#include <ipxe/malloc.h>
+#include <config/ioapi.h>
+
+#ifdef DMAAPI_OP
+#define DMAAPI_PREFIX_op
+#else
+#define DMAAPI_PREFIX_op __op_
+#endif
+
+#ifdef DMAAPI_FLAT
+#define DMAAPI_PREFIX_flat
+#else
+#define DMAAPI_PREFIX_flat __flat_
+#endif
+
+/** A DMA mapping */
+struct dma_mapping {
+ /** Device-side address */
+ physaddr_t addr;
+};
+
+/** A DMA-capable device */
+struct dma_device {
+ /** DMA operations */
+ struct dma_operations *op;
+ /** Addressable space mask */
+ physaddr_t mask;
+ /** Total number of mappings (for debugging) */
+ unsigned int mapped;
+ /** Total number of allocations (for debugging) */
+ unsigned int allocated;
+};
+
+/** DMA operations */
+struct dma_operations {
+ /**
+ * Map buffer for DMA
+ *
+ * @v dma DMA device
+ * @v addr Buffer address
+ * @v len Length of buffer
+ * @v flags Mapping flags
+ * @v map DMA mapping to fill in
+ * @ret rc Return status code
+ */
+ int ( * map ) ( struct dma_device *dma, physaddr_t addr, size_t len,
+ int flags, struct dma_mapping *map );
+ /**
+ * Unmap buffer
+ *
+ * @v dma DMA device
+ * @v map DMA mapping
+ */
+ void ( * unmap ) ( struct dma_device *dma, struct dma_mapping *map );
+ /**
+ * Allocate and map DMA-coherent buffer
+ *
+ * @v dma DMA device
+ * @v len Length of buffer
+ * @v align Physical alignment
+ * @v map DMA mapping to fill in
+ * @ret addr Buffer address, or NULL on error
+ */
+ void * ( * alloc ) ( struct dma_device *dma, size_t len, size_t align,
+ struct dma_mapping *map );
+ /**
+ * Unmap and free DMA-coherent buffer
+ *
+ * @v dma DMA device
+ * @v addr Buffer address
+ * @v len Length of buffer
+ * @v map DMA mapping
+ */
+ void ( * free ) ( struct dma_device *dma, void *addr, size_t len,
+ struct dma_mapping *map );
+ /**
+ * Set addressable space mask
+ *
+ * @v dma DMA device
+ * @v mask Addressable space mask
+ */
+ void ( * set_mask ) ( struct dma_device *dma, physaddr_t mask );
+};
+
+/** Device will read data from host memory */
+#define DMA_TX 0x01
+
+/** Device will write data to host memory */
+#define DMA_RX 0x02
+
+/** Device will both read data from and write data to host memory */
+#define DMA_BI ( DMA_TX | DMA_RX )
+
+/**
+ * Calculate static inline DMA I/O API function name
+ *
+ * @v _prefix Subsystem prefix
+ * @v _api_func API function
+ * @ret _subsys_func Subsystem API function
+ */
+#define DMAAPI_INLINE( _subsys, _api_func ) \
+ SINGLE_API_INLINE ( DMAAPI_PREFIX_ ## _subsys, _api_func )
+
+/**
+ * Provide a DMA I/O API implementation
+ *
+ * @v _prefix Subsystem prefix
+ * @v _api_func API function
+ * @v _func Implementing function
+ */
+#define PROVIDE_DMAAPI( _subsys, _api_func, _func ) \
+ PROVIDE_SINGLE_API ( DMAAPI_PREFIX_ ## _subsys, _api_func, _func )
+
+/**
+ * Provide a static inline DMA I/O API implementation
+ *
+ * @v _prefix Subsystem prefix
+ * @v _api_func API function
+ */
+#define PROVIDE_DMAAPI_INLINE( _subsys, _api_func ) \
+ PROVIDE_SINGLE_API_INLINE ( DMAAPI_PREFIX_ ## _subsys, _api_func )
+
+/**
+ * Map buffer for DMA
+ *
+ * @v dma DMA device
+ * @v addr Buffer address
+ * @v len Length of buffer
+ * @v flags Mapping flags
+ * @v map DMA mapping to fill in
+ * @ret rc Return status code
+ */
+static inline __always_inline int
+DMAAPI_INLINE ( flat, dma_map ) ( struct dma_device *dma, physaddr_t addr,
+ size_t len __unused, int flags __unused,
+ struct dma_mapping *map ) {
+
+ /* Use physical address as device address */
+ map->addr = addr;
+
+ /* Increment mapping count (for debugging) */
+ if ( DBG_LOG )
+ dma->mapped++;
+
+ return 0;
+}
+
+/**
+ * Unmap buffer
+ *
+ * @v dma DMA device
+ * @v map DMA mapping
+ */
+static inline __always_inline void
+DMAAPI_INLINE ( flat, dma_unmap ) ( struct dma_device *dma,
+ struct dma_mapping *map __unused ) {
+
+ /* Decrement mapping count (for debugging) */
+ if ( DBG_LOG )
+ dma->mapped--;
+}
+
+/**
+ * Allocate and map DMA-coherent buffer
+ *
+ * @v dma DMA device
+ * @v len Length of buffer
+ * @v align Physical alignment
+ * @v map DMA mapping to fill in
+ * @ret addr Buffer address, or NULL on error
+ */
+static inline __always_inline void *
+DMAAPI_INLINE ( flat, dma_alloc ) ( struct dma_device *dma, size_t len,
+ size_t align, struct dma_mapping *map ) {
+ void *addr;
+
+ /* Allocate buffer */
+ addr = malloc_phys ( len, align );
+ map->addr = virt_to_phys ( addr );
+
+ /* Increment allocation count (for debugging) */
+ if ( DBG_LOG && addr )
+ dma->allocated++;
+
+ return addr;
+}
+
+/**
+ * Unmap and free DMA-coherent buffer
+ *
+ * @v dma DMA device
+ * @v addr Buffer address
+ * @v len Length of buffer
+ * @v map DMA mapping
+ */
+static inline __always_inline void
+DMAAPI_INLINE ( flat, dma_free ) ( struct dma_device *dma,
+ void *addr, size_t len,
+ struct dma_mapping *map __unused ) {
+
+ /* Free buffer */
+ free_phys ( addr, len );
+
+ /* Decrement allocation count (for debugging) */
+ if ( DBG_LOG )
+ dma->allocated--;
+}
+
+/**
+ * Set addressable space mask
+ *
+ * @v dma DMA device
+ * @v mask Addressable space mask
+ */
+static inline __always_inline void
+DMAAPI_INLINE ( flat, dma_set_mask ) ( struct dma_device *dma __unused,
+ physaddr_t mask __unused ) {
+
+ /* Nothing to do */
+}
+
+/**
+ * Map buffer for DMA
+ *
+ * @v dma DMA device
+ * @v addr Buffer address
+ * @v len Length of buffer
+ * @v flags Mapping flags
+ * @v map DMA mapping to fill in
+ * @ret rc Return status code
+ */
+int dma_map ( struct dma_device *dma, physaddr_t addr, size_t len,
+ int flags, struct dma_mapping *map );
+
+/**
+ * Unmap buffer
+ *
+ * @v dma DMA device
+ * @v map DMA mapping
+ */
+void dma_unmap ( struct dma_device *dma, struct dma_mapping *map );
+
+/**
+ * Allocate and map DMA-coherent buffer
+ *
+ * @v dma DMA device
+ * @v len Length of buffer
+ * @v align Physical alignment
+ * @v map DMA mapping to fill in
+ * @ret addr Buffer address, or NULL on error
+ */
+void * dma_alloc ( struct dma_device *dma, size_t len, size_t align,
+ struct dma_mapping *map );
+
+/**
+ * Unmap and free DMA-coherent buffer
+ *
+ * @v dma DMA device
+ * @v addr Buffer address
+ * @v len Length of buffer
+ * @v map DMA mapping
+ */
+void dma_free ( struct dma_device *dma, void *addr, size_t len,
+ struct dma_mapping *map );
+
+/**
+ * Set addressable space mask
+ *
+ * @v dma DMA device
+ * @v mask Addressable space mask
+ */
+void dma_set_mask ( struct dma_device *dma, physaddr_t mask );
+
+/**
+ * Initialise DMA device
+ *
+ * @v dma DMA device
+ * @v op DMA operations
+ */
+static inline __always_inline void dma_init ( struct dma_device *dma,
+ struct dma_operations *op ) {
+
+ /* Set operations table */
+ dma->op = op;
+}
+
+/**
+ * Set 64-bit addressable space mask
+ *
+ * @v dma DMA device
+ */
+static inline __always_inline void
+dma_set_mask_64bit ( struct dma_device *dma ) {
+
+ /* Set mask to maximum physical address */
+ dma_set_mask ( dma, ~( ( physaddr_t ) 0 ) );
+}
+
+/**
+ * Map I/O buffer for transmitting data to device
+ *
+ * @v dma DMA device
+ * @v iobuf I/O buffer
+ * @v map DMA mapping to fill in
+ * @ret rc Return status code
+ */
+static inline __always_inline int
+dma_map_tx_iob ( struct dma_device *dma, struct io_buffer *iobuf,
+ struct dma_mapping *map ) {
+
+ /* Map I/O buffer */
+ return dma_map ( dma, virt_to_phys ( iobuf->data ), iob_len ( iobuf ),
+ DMA_TX, map );
+}
+
+extern struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma, size_t len,
+ struct dma_mapping *map );
+
+#endif /* _IPXE_DMA_H */