aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/sandbox/dts/test.dts8
-rw-r--r--configs/sandbox_defconfig3
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/sandbox-dma-test.c282
-rw-r--r--test/dm/Makefile1
-rw-r--r--test/dm/dma.c123
7 files changed, 425 insertions, 0 deletions
diff --git a/arch/sandbox/dts/test.dts b/arch/sandbox/dts/test.dts
index 12d39d8..082fcec 100644
--- a/arch/sandbox/dts/test.dts
+++ b/arch/sandbox/dts/test.dts
@@ -746,6 +746,14 @@
hwspinlock@0 {
compatible = "sandbox,hwspinlock";
};
+
+ dma: dma {
+ compatible = "sandbox,dma";
+ #dma-cells = <1>;
+
+ dmas = <&dma 0>, <&dma 1>, <&dma 2>;
+ dma-names = "m2m", "tx0", "rx0";
+ };
};
#include "sandbox_pmic.dtsi"
diff --git a/configs/sandbox_defconfig b/configs/sandbox_defconfig
index 2e761f9..5b65c61 100644
--- a/configs/sandbox_defconfig
+++ b/configs/sandbox_defconfig
@@ -218,3 +218,6 @@ CONFIG_UT_TIME=y
CONFIG_UT_DM=y
CONFIG_UT_ENV=y
CONFIG_UT_OVERLAY=y
+CONFIG_DMA=y
+CONFIG_DMA_CHANNELS=y
+CONFIG_SANDBOX_DMA=y
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index b9b85c6..8a4162e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -19,6 +19,13 @@ config DMA_CHANNELS
Enable channels support for DMA. Some DMA controllers have multiple
channels which can either transfer data to/from different devices.
+config SANDBOX_DMA
+ bool "Enable the sandbox DMA test driver"
+ depends on DMA && DMA_CHANNELS && SANDBOX
+ help
+ Enable support for a test DMA uclass implementation. It stimulates
+ DMA transfer by simple copying data between channels.
+
config TI_EDMA3
bool "TI EDMA3 driver"
help
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 4eaef8a..aff31f9 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_DMA) += dma-uclass.o
obj-$(CONFIG_FSLDMAFEC) += MCD_tasksInit.o MCD_dmaApi.o MCD_tasks.o
obj-$(CONFIG_APBH_DMA) += apbh_dma.o
obj-$(CONFIG_FSL_DMA) += fsl_dma.o
+obj-$(CONFIG_SANDBOX_DMA) += sandbox-dma-test.o
obj-$(CONFIG_TI_KSNAV) += keystone_nav.o keystone_nav_cfg.o
obj-$(CONFIG_TI_EDMA3) += ti-edma3.o
obj-$(CONFIG_DMA_LPC32XX) += lpc32xx_dma.o
diff --git a/drivers/dma/sandbox-dma-test.c b/drivers/dma/sandbox-dma-test.c
new file mode 100644
index 0000000..8fcef18
--- /dev/null
+++ b/drivers/dma/sandbox-dma-test.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Direct Memory Access U-Class Simulation driver
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated <www.ti.com>
+ *
+ * Author: Grygorii Strashko <grygorii.strashko@ti.com>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <dm/read.h>
+#include <dma-uclass.h>
+#include <dt-structs.h>
+#include <errno.h>
+
+#define SANDBOX_DMA_CH_CNT 3
+#define SANDBOX_DMA_BUF_SIZE 1024
+
+struct sandbox_dma_chan {
+ struct sandbox_dma_dev *ud;
+ char name[20];
+ u32 id;
+ enum dma_direction dir;
+ bool in_use;
+ bool enabled;
+};
+
+struct sandbox_dma_dev {
+ struct device *dev;
+ u32 ch_count;
+ struct sandbox_dma_chan channels[SANDBOX_DMA_CH_CNT];
+ uchar buf[SANDBOX_DMA_BUF_SIZE];
+ uchar *buf_rx;
+ size_t data_len;
+ u32 meta;
+};
+
+static int sandbox_dma_transfer(struct udevice *dev, int direction,
+ void *dst, void *src, size_t len)
+{
+ memcpy(dst, src, len);
+
+ return 0;
+}
+
+static int sandbox_dma_of_xlate(struct dma *dma,
+ struct ofnode_phandle_args *args)
+{
+ struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
+ struct sandbox_dma_chan *uc;
+
+ debug("%s(dma id=%u)\n", __func__, args->args[0]);
+
+ if (args->args[0] >= SANDBOX_DMA_CH_CNT)
+ return -EINVAL;
+
+ dma->id = args->args[0];
+
+ uc = &ud->channels[dma->id];
+
+ if (dma->id == 1)
+ uc->dir = DMA_MEM_TO_DEV;
+ else if (dma->id == 2)
+ uc->dir = DMA_DEV_TO_MEM;
+ else
+ uc->dir = DMA_MEM_TO_MEM;
+ debug("%s(dma id=%lu dir=%d)\n", __func__, dma->id, uc->dir);
+
+ return 0;
+}
+
+static int sandbox_dma_request(struct dma *dma)
+{
+ struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
+ struct sandbox_dma_chan *uc;
+
+ if (dma->id >= SANDBOX_DMA_CH_CNT)
+ return -EINVAL;
+
+ uc = &ud->channels[dma->id];
+ if (uc->in_use)
+ return -EBUSY;
+
+ uc->in_use = true;
+ debug("%s(dma id=%lu in_use=%d)\n", __func__, dma->id, uc->in_use);
+
+ return 0;
+}
+
+static int sandbox_dma_free(struct dma *dma)
+{
+ struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
+ struct sandbox_dma_chan *uc;
+
+ if (dma->id >= SANDBOX_DMA_CH_CNT)
+ return -EINVAL;
+
+ uc = &ud->channels[dma->id];
+ if (!uc->in_use)
+ return -EINVAL;
+
+ uc->in_use = false;
+ ud->buf_rx = NULL;
+ ud->data_len = 0;
+ debug("%s(dma id=%lu in_use=%d)\n", __func__, dma->id, uc->in_use);
+
+ return 0;
+}
+
+static int sandbox_dma_enable(struct dma *dma)
+{
+ struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
+ struct sandbox_dma_chan *uc;
+
+ if (dma->id >= SANDBOX_DMA_CH_CNT)
+ return -EINVAL;
+
+ uc = &ud->channels[dma->id];
+ if (!uc->in_use)
+ return -EINVAL;
+ if (uc->enabled)
+ return -EINVAL;
+
+ uc->enabled = true;
+ debug("%s(dma id=%lu enabled=%d)\n", __func__, dma->id, uc->enabled);
+
+ return 0;
+}
+
+static int sandbox_dma_disable(struct dma *dma)
+{
+ struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
+ struct sandbox_dma_chan *uc;
+
+ if (dma->id >= SANDBOX_DMA_CH_CNT)
+ return -EINVAL;
+
+ uc = &ud->channels[dma->id];
+ if (!uc->in_use)
+ return -EINVAL;
+ if (!uc->enabled)
+ return -EINVAL;
+
+ uc->enabled = false;
+ debug("%s(dma id=%lu enabled=%d)\n", __func__, dma->id, uc->enabled);
+
+ return 0;
+}
+
+static int sandbox_dma_send(struct dma *dma,
+ void *src, size_t len, void *metadata)
+{
+ struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
+ struct sandbox_dma_chan *uc;
+
+ if (dma->id >= SANDBOX_DMA_CH_CNT)
+ return -EINVAL;
+ if (!src || !metadata)
+ return -EINVAL;
+
+ debug("%s(dma id=%lu)\n", __func__, dma->id);
+
+ uc = &ud->channels[dma->id];
+ if (uc->dir != DMA_MEM_TO_DEV)
+ return -EINVAL;
+ if (!uc->in_use)
+ return -EINVAL;
+ if (!uc->enabled)
+ return -EINVAL;
+ if (len >= SANDBOX_DMA_BUF_SIZE)
+ return -EINVAL;
+
+ memcpy(ud->buf, src, len);
+ ud->data_len = len;
+ ud->meta = *((u32 *)metadata);
+
+ debug("%s(dma id=%lu len=%zu meta=%08x)\n",
+ __func__, dma->id, len, ud->meta);
+
+ return 0;
+}
+
+static int sandbox_dma_receive(struct dma *dma, void **dst, void *metadata)
+{
+ struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
+ struct sandbox_dma_chan *uc;
+
+ if (dma->id >= SANDBOX_DMA_CH_CNT)
+ return -EINVAL;
+ if (!dst || !metadata)
+ return -EINVAL;
+
+ uc = &ud->channels[dma->id];
+ if (uc->dir != DMA_DEV_TO_MEM)
+ return -EINVAL;
+ if (!uc->in_use)
+ return -EINVAL;
+ if (!uc->enabled)
+ return -EINVAL;
+ if (!ud->data_len)
+ return 0;
+
+ if (ud->buf_rx) {
+ memcpy(ud->buf_rx, ud->buf, ud->data_len);
+ *dst = ud->buf_rx;
+ } else {
+ memcpy(*dst, ud->buf, ud->data_len);
+ }
+
+ *((u32 *)metadata) = ud->meta;
+
+ debug("%s(dma id=%lu len=%zu meta=%08x %p)\n",
+ __func__, dma->id, ud->data_len, ud->meta, *dst);
+
+ return ud->data_len;
+}
+
+static int sandbox_dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
+{
+ struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
+
+ ud->buf_rx = dst;
+
+ return 0;
+}
+
+static const struct dma_ops sandbox_dma_ops = {
+ .transfer = sandbox_dma_transfer,
+ .of_xlate = sandbox_dma_of_xlate,
+ .request = sandbox_dma_request,
+ .free = sandbox_dma_free,
+ .enable = sandbox_dma_enable,
+ .disable = sandbox_dma_disable,
+ .send = sandbox_dma_send,
+ .receive = sandbox_dma_receive,
+ .prepare_rcv_buf = sandbox_dma_prepare_rcv_buf,
+};
+
+static int sandbox_dma_probe(struct udevice *dev)
+{
+ struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
+ struct sandbox_dma_dev *ud = dev_get_priv(dev);
+ int i, ret = 0;
+
+ uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM |
+ DMA_SUPPORTS_MEM_TO_DEV |
+ DMA_SUPPORTS_DEV_TO_MEM;
+
+ ud->ch_count = SANDBOX_DMA_CH_CNT;
+ ud->buf_rx = NULL;
+ ud->meta = 0;
+ ud->data_len = 0;
+
+ pr_err("Number of channels: %u\n", ud->ch_count);
+
+ for (i = 0; i < ud->ch_count; i++) {
+ struct sandbox_dma_chan *uc = &ud->channels[i];
+
+ uc->ud = ud;
+ uc->id = i;
+ sprintf(uc->name, "DMA chan%d\n", i);
+ uc->in_use = false;
+ uc->enabled = false;
+ }
+
+ return ret;
+}
+
+static const struct udevice_id sandbox_dma_ids[] = {
+ { .compatible = "sandbox,dma" },
+ { }
+};
+
+U_BOOT_DRIVER(sandbox_dma) = {
+ .name = "sandbox-dma",
+ .id = UCLASS_DMA,
+ .of_match = sandbox_dma_ids,
+ .ops = &sandbox_dma_ops,
+ .probe = sandbox_dma_probe,
+ .priv_auto_alloc_size = sizeof(struct sandbox_dma_dev),
+};
diff --git a/test/dm/Makefile b/test/dm/Makefile
index 7355fe1..2c9081e 100644
--- a/test/dm/Makefile
+++ b/test/dm/Makefile
@@ -55,4 +55,5 @@ obj-$(CONFIG_DM_SERIAL) += serial.o
obj-$(CONFIG_CPU) += cpu.o
obj-$(CONFIG_TEE) += tee.o
obj-$(CONFIG_VIRTIO_SANDBOX) += virtio.o
+obj-$(CONFIG_DMA) += dma.o
endif
diff --git a/test/dm/dma.c b/test/dm/dma.c
new file mode 100644
index 0000000..b56d177
--- /dev/null
+++ b/test/dm/dma.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Direct Memory Access U-Class tests
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated <www.ti.com>
+ * Grygorii Strashko <grygorii.strashko@ti.com>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <dm/test.h>
+#include <dma.h>
+#include <test/ut.h>
+
+static int dm_test_dma_m2m(struct unit_test_state *uts)
+{
+ struct udevice *dev;
+ struct dma dma_m2m;
+ u8 src_buf[512];
+ u8 dst_buf[512];
+ size_t len = 512;
+ int i;
+
+ ut_assertok(uclass_get_device_by_name(UCLASS_DMA, "dma", &dev));
+ ut_assertok(dma_get_by_name(dev, "m2m", &dma_m2m));
+
+ memset(dst_buf, 0, len);
+ for (i = 0; i < len; i++)
+ src_buf[i] = i;
+
+ ut_assertok(dma_memcpy(dst_buf, src_buf, len));
+
+ ut_assertok(memcmp(src_buf, dst_buf, len));
+ return 0;
+}
+DM_TEST(dm_test_dma_m2m, DM_TESTF_SCAN_FDT);
+
+static int dm_test_dma(struct unit_test_state *uts)
+{
+ struct udevice *dev;
+ struct dma dma_tx, dma_rx;
+ u8 src_buf[512];
+ u8 dst_buf[512];
+ void *dst_ptr;
+ size_t len = 512;
+ u32 meta1, meta2;
+ int i;
+
+ ut_assertok(uclass_get_device_by_name(UCLASS_DMA, "dma", &dev));
+
+ ut_assertok(dma_get_by_name(dev, "tx0", &dma_tx));
+ ut_assertok(dma_get_by_name(dev, "rx0", &dma_rx));
+
+ ut_assertok(dma_enable(&dma_tx));
+ ut_assertok(dma_enable(&dma_rx));
+
+ memset(dst_buf, 0, len);
+ for (i = 0; i < len; i++)
+ src_buf[i] = i;
+ meta1 = 0xADADDEAD;
+ meta2 = 0;
+ dst_ptr = &dst_buf;
+
+ ut_assertok(dma_send(&dma_tx, src_buf, len, &meta1));
+
+ ut_asserteq(len, dma_receive(&dma_rx, &dst_ptr, &meta2));
+ ut_asserteq(0xADADDEAD, meta2);
+
+ ut_assertok(dma_disable(&dma_tx));
+ ut_assertok(dma_disable(&dma_rx));
+
+ ut_assertok(dma_free(&dma_tx));
+ ut_assertok(dma_free(&dma_rx));
+ ut_assertok(memcmp(src_buf, dst_buf, len));
+
+ return 0;
+}
+DM_TEST(dm_test_dma, DM_TESTF_SCAN_FDT);
+
+static int dm_test_dma_rx(struct unit_test_state *uts)
+{
+ struct udevice *dev;
+ struct dma dma_tx, dma_rx;
+ u8 src_buf[512];
+ u8 dst_buf[512];
+ void *dst_ptr;
+ size_t len = 512;
+ u32 meta1, meta2;
+ int i;
+
+ ut_assertok(uclass_get_device_by_name(UCLASS_DMA, "dma", &dev));
+
+ ut_assertok(dma_get_by_name(dev, "tx0", &dma_tx));
+ ut_assertok(dma_get_by_name(dev, "rx0", &dma_rx));
+
+ ut_assertok(dma_enable(&dma_tx));
+ ut_assertok(dma_enable(&dma_rx));
+
+ memset(dst_buf, 0, len);
+ for (i = 0; i < len; i++)
+ src_buf[i] = i;
+ meta1 = 0xADADDEAD;
+ meta2 = 0;
+ dst_ptr = NULL;
+
+ ut_assertok(dma_prepare_rcv_buf(&dma_tx, dst_buf, len));
+
+ ut_assertok(dma_send(&dma_tx, src_buf, len, &meta1));
+
+ ut_asserteq(len, dma_receive(&dma_rx, &dst_ptr, &meta2));
+ ut_asserteq(0xADADDEAD, meta2);
+ ut_asserteq_ptr(dst_buf, dst_ptr);
+
+ ut_assertok(dma_disable(&dma_tx));
+ ut_assertok(dma_disable(&dma_rx));
+
+ ut_assertok(dma_free(&dma_tx));
+ ut_assertok(dma_free(&dma_rx));
+ ut_assertok(memcmp(src_buf, dst_buf, len));
+
+ return 0;
+}
+DM_TEST(dm_test_dma_rx, DM_TESTF_SCAN_FDT);