summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dmaengine.c23
-rw-r--r--drivers/dma/amd/ptdma/ptdma.h1
-rw-r--r--drivers/dma/arm-dma350.c660
-rw-r--r--drivers/dma/at_xdmac.c6
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c5
-rw-r--r--drivers/dma/fsl-edma-common.c30
-rw-r--r--drivers/dma/fsl-edma-common.h18
-rw-r--r--drivers/dma/fsl-edma-main.c114
-rw-r--r--drivers/dma/fsldma.c20
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/idxd/cdev.c10
-rw-r--r--drivers/dma/idxd/idxd.h2
-rw-r--r--drivers/dma/idxd/sysfs.c6
-rw-r--r--drivers/dma/sh/rz-dmac.c84
-rw-r--r--drivers/dma/tegra210-adma.c185
-rw-r--r--drivers/dma/ti/k3-udma.c3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c4
19 files changed, 1098 insertions, 83 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index df2d2dc00a05..db87dd2a07f7 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -93,6 +93,14 @@ config APPLE_ADMAC
help
Enable support for Audio DMA Controller found on Apple Silicon SoCs.
+config ARM_DMA350
+ tristate "Arm DMA-350 support"
+ depends on ARM || ARM64 || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the Arm DMA-350 controller.
+
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 19ba465011a6..ba9732644752 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
+obj-$(CONFIG_ARM_DMA350) += arm-dma350.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
diff --git a/drivers/dma/amd/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
index 81339664036f..628c49ce5de9 100644
--- a/drivers/dma/amd/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
@@ -566,7 +566,6 @@ int pt_dmaengine_register(struct pt_device *pt)
struct ae4_device *ae4 = NULL;
struct pt_dma_chan *chan;
char *desc_cache_name;
- char *cmd_cache_name;
int ret, i;
if (pt->ver == AE4_DMA_VERSION)
@@ -582,27 +581,17 @@ int pt_dmaengine_register(struct pt_device *pt)
if (!pt->pt_dma_chan)
return -ENOMEM;
- cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
- "%s-dmaengine-cmd-cache",
- dev_name(pt->dev));
- if (!cmd_cache_name)
- return -ENOMEM;
-
desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
"%s-dmaengine-desc-cache",
dev_name(pt->dev));
- if (!desc_cache_name) {
- ret = -ENOMEM;
- goto err_cache;
- }
+ if (!desc_cache_name)
+ return -ENOMEM;
pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
sizeof(struct pt_dma_desc), 0,
SLAB_HWCACHE_ALIGN, NULL);
- if (!pt->dma_desc_cache) {
- ret = -ENOMEM;
- goto err_cache;
- }
+ if (!pt->dma_desc_cache)
+ return -ENOMEM;
dma_dev->dev = pt->dev;
dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
@@ -656,9 +645,6 @@ int pt_dmaengine_register(struct pt_device *pt)
err_reg:
kmem_cache_destroy(pt->dma_desc_cache);
-err_cache:
- kmem_cache_destroy(pt->dma_cmd_cache);
-
return ret;
}
EXPORT_SYMBOL_GPL(pt_dmaengine_register);
@@ -670,5 +656,4 @@ void pt_dmaengine_unregister(struct pt_device *pt)
dma_async_device_unregister(dma_dev);
kmem_cache_destroy(pt->dma_desc_cache);
- kmem_cache_destroy(pt->dma_cmd_cache);
}
diff --git a/drivers/dma/amd/ptdma/ptdma.h b/drivers/dma/amd/ptdma/ptdma.h
index 0a7939105e51..ef3f55632107 100644
--- a/drivers/dma/amd/ptdma/ptdma.h
+++ b/drivers/dma/amd/ptdma/ptdma.h
@@ -254,7 +254,6 @@ struct pt_device {
/* Support for the DMA Engine capabilities */
struct dma_device dma_dev;
struct pt_dma_chan *pt_dma_chan;
- struct kmem_cache *dma_cmd_cache;
struct kmem_cache *dma_desc_cache;
wait_queue_head_t lsb_queue;
diff --git a/drivers/dma/arm-dma350.c b/drivers/dma/arm-dma350.c
new file mode 100644
index 000000000000..9efe2ca7d5ec
--- /dev/null
+++ b/drivers/dma/arm-dma350.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2024-2025 Arm Limited
+// Arm DMA-350 driver
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define DMAINFO 0x0f00
+
+#define DMA_BUILDCFG0 0xb0
+#define DMA_CFG_DATA_WIDTH GENMASK(18, 16)
+#define DMA_CFG_ADDR_WIDTH GENMASK(15, 10)
+#define DMA_CFG_NUM_CHANNELS GENMASK(9, 4)
+
+#define DMA_BUILDCFG1 0xb4
+#define DMA_CFG_NUM_TRIGGER_IN GENMASK(8, 0)
+
+#define IIDR 0xc8
+#define IIDR_PRODUCTID GENMASK(31, 20)
+#define IIDR_VARIANT GENMASK(19, 16)
+#define IIDR_REVISION GENMASK(15, 12)
+#define IIDR_IMPLEMENTER GENMASK(11, 0)
+
+#define PRODUCTID_DMA350 0x3a0
+#define IMPLEMENTER_ARM 0x43b
+
+#define DMACH(n) (0x1000 + 0x0100 * (n))
+
+#define CH_CMD 0x00
+#define CH_CMD_RESUME BIT(5)
+#define CH_CMD_PAUSE BIT(4)
+#define CH_CMD_STOP BIT(3)
+#define CH_CMD_DISABLE BIT(2)
+#define CH_CMD_CLEAR BIT(1)
+#define CH_CMD_ENABLE BIT(0)
+
+#define CH_STATUS 0x04
+#define CH_STAT_RESUMEWAIT BIT(21)
+#define CH_STAT_PAUSED BIT(20)
+#define CH_STAT_STOPPED BIT(19)
+#define CH_STAT_DISABLED BIT(18)
+#define CH_STAT_ERR BIT(17)
+#define CH_STAT_DONE BIT(16)
+#define CH_STAT_INTR_ERR BIT(1)
+#define CH_STAT_INTR_DONE BIT(0)
+
+#define CH_INTREN 0x08
+#define CH_INTREN_ERR BIT(1)
+#define CH_INTREN_DONE BIT(0)
+
+#define CH_CTRL 0x0c
+#define CH_CTRL_USEDESTRIGIN BIT(26)
+#define CH_CTRL_USESRCTRIGIN BIT(26)
+#define CH_CTRL_DONETYPE GENMASK(23, 21)
+#define CH_CTRL_REGRELOADTYPE GENMASK(20, 18)
+#define CH_CTRL_XTYPE GENMASK(11, 9)
+#define CH_CTRL_TRANSIZE GENMASK(2, 0)
+
+#define CH_SRCADDR 0x10
+#define CH_SRCADDRHI 0x14
+#define CH_DESADDR 0x18
+#define CH_DESADDRHI 0x1c
+#define CH_XSIZE 0x20
+#define CH_XSIZEHI 0x24
+#define CH_SRCTRANSCFG 0x28
+#define CH_DESTRANSCFG 0x2c
+#define CH_CFG_MAXBURSTLEN GENMASK(19, 16)
+#define CH_CFG_PRIVATTR BIT(11)
+#define CH_CFG_SHAREATTR GENMASK(9, 8)
+#define CH_CFG_MEMATTR GENMASK(7, 0)
+
+#define TRANSCFG_DEVICE \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_DEVICE)
+#define TRANSCFG_NC \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_NC)
+#define TRANSCFG_WB \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_ISH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_WB)
+
+#define CH_XADDRINC 0x30
+#define CH_XY_DES GENMASK(31, 16)
+#define CH_XY_SRC GENMASK(15, 0)
+
+#define CH_FILLVAL 0x38
+#define CH_SRCTRIGINCFG 0x4c
+#define CH_DESTRIGINCFG 0x50
+#define CH_LINKATTR 0x70
+#define CH_LINK_SHAREATTR GENMASK(9, 8)
+#define CH_LINK_MEMATTR GENMASK(7, 0)
+
+#define CH_AUTOCFG 0x74
+#define CH_LINKADDR 0x78
+#define CH_LINKADDR_EN BIT(0)
+
+#define CH_LINKADDRHI 0x7c
+#define CH_ERRINFO 0x90
+#define CH_ERRINFO_AXIRDPOISERR BIT(18)
+#define CH_ERRINFO_AXIWRRESPERR BIT(17)
+#define CH_ERRINFO_AXIRDRESPERR BIT(16)
+
+#define CH_BUILDCFG0 0xf8
+#define CH_CFG_INC_WIDTH GENMASK(29, 26)
+#define CH_CFG_DATA_WIDTH GENMASK(24, 22)
+#define CH_CFG_DATA_BUF_SIZE GENMASK(7, 0)
+
+#define CH_BUILDCFG1 0xfc
+#define CH_CFG_HAS_CMDLINK BIT(8)
+#define CH_CFG_HAS_TRIGSEL BIT(7)
+#define CH_CFG_HAS_TRIGIN BIT(5)
+#define CH_CFG_HAS_WRAP BIT(1)
+
+
+#define LINK_REGCLEAR BIT(0)
+#define LINK_INTREN BIT(2)
+#define LINK_CTRL BIT(3)
+#define LINK_SRCADDR BIT(4)
+#define LINK_SRCADDRHI BIT(5)
+#define LINK_DESADDR BIT(6)
+#define LINK_DESADDRHI BIT(7)
+#define LINK_XSIZE BIT(8)
+#define LINK_XSIZEHI BIT(9)
+#define LINK_SRCTRANSCFG BIT(10)
+#define LINK_DESTRANSCFG BIT(11)
+#define LINK_XADDRINC BIT(12)
+#define LINK_FILLVAL BIT(14)
+#define LINK_SRCTRIGINCFG BIT(19)
+#define LINK_DESTRIGINCFG BIT(20)
+#define LINK_AUTOCFG BIT(29)
+#define LINK_LINKADDR BIT(30)
+#define LINK_LINKADDRHI BIT(31)
+
+
+enum ch_ctrl_donetype {
+ CH_CTRL_DONETYPE_NONE = 0,
+ CH_CTRL_DONETYPE_CMD = 1,
+ CH_CTRL_DONETYPE_CYCLE = 3
+};
+
+enum ch_ctrl_xtype {
+ CH_CTRL_XTYPE_DISABLE = 0,
+ CH_CTRL_XTYPE_CONTINUE = 1,
+ CH_CTRL_XTYPE_WRAP = 2,
+ CH_CTRL_XTYPE_FILL = 3
+};
+
+enum ch_cfg_shareattr {
+ SHAREATTR_NSH = 0,
+ SHAREATTR_OSH = 2,
+ SHAREATTR_ISH = 3
+};
+
+enum ch_cfg_memattr {
+ MEMATTR_DEVICE = 0x00,
+ MEMATTR_NC = 0x44,
+ MEMATTR_WB = 0xff
+};
+
+struct d350_desc {
+ struct virt_dma_desc vd;
+ u32 command[16];
+ u16 xsize;
+ u16 xsizehi;
+ u8 tsz;
+};
+
+struct d350_chan {
+ struct virt_dma_chan vc;
+ struct d350_desc *desc;
+ void __iomem *base;
+ int irq;
+ enum dma_status status;
+ dma_cookie_t cookie;
+ u32 residue;
+ u8 tsz;
+ bool has_trig;
+ bool has_wrap;
+ bool coherent;
+};
+
+struct d350 {
+ struct dma_device dma;
+ int nchan;
+ int nreq;
+ struct d350_chan channels[] __counted_by(nchan);
+};
+
+static inline struct d350_chan *to_d350_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct d350_chan, vc.chan);
+}
+
+static inline struct d350_desc *to_d350_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct d350_desc, vd);
+}
+
+static void d350_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_d350_desc(vd));
+}
+
+static struct dma_async_tx_descriptor *d350_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct d350_desc *desc;
+ u32 *cmd;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->tsz = __ffs(len | dest | src | (1 << dch->tsz));
+ desc->xsize = lower_16_bits(len >> desc->tsz);
+ desc->xsizehi = upper_16_bits(len >> desc->tsz);
+
+ cmd = desc->command;
+ cmd[0] = LINK_CTRL | LINK_SRCADDR | LINK_SRCADDRHI | LINK_DESADDR |
+ LINK_DESADDRHI | LINK_XSIZE | LINK_XSIZEHI | LINK_SRCTRANSCFG |
+ LINK_DESTRANSCFG | LINK_XADDRINC | LINK_LINKADDR;
+
+ cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
+ FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_CONTINUE) |
+ FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
+
+ cmd[2] = lower_32_bits(src);
+ cmd[3] = upper_32_bits(src);
+ cmd[4] = lower_32_bits(dest);
+ cmd[5] = upper_32_bits(dest);
+ cmd[6] = FIELD_PREP(CH_XY_SRC, desc->xsize) | FIELD_PREP(CH_XY_DES, desc->xsize);
+ cmd[7] = FIELD_PREP(CH_XY_SRC, desc->xsizehi) | FIELD_PREP(CH_XY_DES, desc->xsizehi);
+ cmd[8] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[9] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[10] = FIELD_PREP(CH_XY_SRC, 1) | FIELD_PREP(CH_XY_DES, 1);
+ cmd[11] = 0;
+
+ return vchan_tx_prep(&dch->vc, &desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *d350_prep_memset(struct dma_chan *chan,
+ dma_addr_t dest, int value, size_t len, unsigned long flags)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct d350_desc *desc;
+ u32 *cmd;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->tsz = __ffs(len | dest | (1 << dch->tsz));
+ desc->xsize = lower_16_bits(len >> desc->tsz);
+ desc->xsizehi = upper_16_bits(len >> desc->tsz);
+
+ cmd = desc->command;
+ cmd[0] = LINK_CTRL | LINK_DESADDR | LINK_DESADDRHI |
+ LINK_XSIZE | LINK_XSIZEHI | LINK_DESTRANSCFG |
+ LINK_XADDRINC | LINK_FILLVAL | LINK_LINKADDR;
+
+ cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
+ FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_FILL) |
+ FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
+
+ cmd[2] = lower_32_bits(dest);
+ cmd[3] = upper_32_bits(dest);
+ cmd[4] = FIELD_PREP(CH_XY_DES, desc->xsize);
+ cmd[5] = FIELD_PREP(CH_XY_DES, desc->xsizehi);
+ cmd[6] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[7] = FIELD_PREP(CH_XY_DES, 1);
+ cmd[8] = (u8)value * 0x01010101;
+ cmd[9] = 0;
+
+ return vchan_tx_prep(&dch->vc, &desc->vd, flags);
+}
+
+static int d350_pause(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (dch->status == DMA_IN_PROGRESS) {
+ writel_relaxed(CH_CMD_PAUSE, dch->base + CH_CMD);
+ dch->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static int d350_resume(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (dch->status == DMA_PAUSED) {
+ writel_relaxed(CH_CMD_RESUME, dch->base + CH_CMD);
+ dch->status = DMA_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static u32 d350_get_residue(struct d350_chan *dch)
+{
+ u32 res, xsize, xsizehi, hi_new;
+ int retries = 3; /* 1st time unlucky, 2nd improbable, 3rd just broken */
+
+ hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
+ do {
+ xsizehi = hi_new;
+ xsize = readl_relaxed(dch->base + CH_XSIZE);
+ hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
+ } while (xsizehi != hi_new && --retries);
+
+ res = FIELD_GET(CH_XY_DES, xsize);
+ res |= FIELD_GET(CH_XY_DES, xsizehi) << 16;
+
+ return res << dch->desc->tsz;
+}
+
+static int d350_terminate_all(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ writel_relaxed(CH_CMD_STOP, dch->base + CH_CMD);
+ if (dch->desc) {
+ if (dch->status != DMA_ERROR)
+ vchan_terminate_vdesc(&dch->desc->vd);
+ dch->desc = NULL;
+ dch->status = DMA_COMPLETE;
+ }
+ vchan_get_all_descriptors(&dch->vc, &list);
+ list_splice_tail(&list, &dch->vc.desc_terminated);
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static void d350_synchronize(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+
+ vchan_synchronize(&dch->vc);
+}
+
+static u32 d350_desc_bytes(struct d350_desc *desc)
+{
+ return ((u32)desc->xsizehi << 16 | desc->xsize) << desc->tsz;
+}
+
+static enum dma_status d350_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status status;
+ unsigned long flags;
+ u32 residue = 0;
+
+ status = dma_cookie_status(chan, cookie, state);
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (cookie == dch->cookie) {
+ status = dch->status;
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED)
+ dch->residue = d350_get_residue(dch);
+ residue = dch->residue;
+ } else if ((vd = vchan_find_desc(&dch->vc, cookie))) {
+ residue = d350_desc_bytes(to_d350_desc(vd));
+ } else if (status == DMA_IN_PROGRESS) {
+ /* Somebody else terminated it? */
+ status = DMA_ERROR;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ dma_set_residue(state, residue);
+ return status;
+}
+
+static void d350_start_next(struct d350_chan *dch)
+{
+ u32 hdr, *reg;
+
+ dch->desc = to_d350_desc(vchan_next_desc(&dch->vc));
+ if (!dch->desc)
+ return;
+
+ list_del(&dch->desc->vd.node);
+ dch->status = DMA_IN_PROGRESS;
+ dch->cookie = dch->desc->vd.tx.cookie;
+ dch->residue = d350_desc_bytes(dch->desc);
+
+ hdr = dch->desc->command[0];
+ reg = &dch->desc->command[1];
+
+ if (hdr & LINK_INTREN)
+ writel_relaxed(*reg++, dch->base + CH_INTREN);
+ if (hdr & LINK_CTRL)
+ writel_relaxed(*reg++, dch->base + CH_CTRL);
+ if (hdr & LINK_SRCADDR)
+ writel_relaxed(*reg++, dch->base + CH_SRCADDR);
+ if (hdr & LINK_SRCADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_SRCADDRHI);
+ if (hdr & LINK_DESADDR)
+ writel_relaxed(*reg++, dch->base + CH_DESADDR);
+ if (hdr & LINK_DESADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_DESADDRHI);
+ if (hdr & LINK_XSIZE)
+ writel_relaxed(*reg++, dch->base + CH_XSIZE);
+ if (hdr & LINK_XSIZEHI)
+ writel_relaxed(*reg++, dch->base + CH_XSIZEHI);
+ if (hdr & LINK_SRCTRANSCFG)
+ writel_relaxed(*reg++, dch->base + CH_SRCTRANSCFG);
+ if (hdr & LINK_DESTRANSCFG)
+ writel_relaxed(*reg++, dch->base + CH_DESTRANSCFG);
+ if (hdr & LINK_XADDRINC)
+ writel_relaxed(*reg++, dch->base + CH_XADDRINC);
+ if (hdr & LINK_FILLVAL)
+ writel_relaxed(*reg++, dch->base + CH_FILLVAL);
+ if (hdr & LINK_SRCTRIGINCFG)
+ writel_relaxed(*reg++, dch->base + CH_SRCTRIGINCFG);
+ if (hdr & LINK_DESTRIGINCFG)
+ writel_relaxed(*reg++, dch->base + CH_DESTRIGINCFG);
+ if (hdr & LINK_AUTOCFG)
+ writel_relaxed(*reg++, dch->base + CH_AUTOCFG);
+ if (hdr & LINK_LINKADDR)
+ writel_relaxed(*reg++, dch->base + CH_LINKADDR);
+ if (hdr & LINK_LINKADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_LINKADDRHI);
+
+ writel(CH_CMD_ENABLE, dch->base + CH_CMD);
+}
+
+static void d350_issue_pending(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (vchan_issue_pending(&dch->vc) && !dch->desc)
+ d350_start_next(dch);
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+}
+
+static irqreturn_t d350_irq(int irq, void *data)
+{
+ struct d350_chan *dch = data;
+ struct device *dev = dch->vc.chan.device->dev;
+ struct virt_dma_desc *vd = &dch->desc->vd;
+ u32 ch_status;
+
+ ch_status = readl(dch->base + CH_STATUS);
+ if (!ch_status)
+ return IRQ_NONE;
+
+ if (ch_status & CH_STAT_INTR_ERR) {
+ u32 errinfo = readl_relaxed(dch->base + CH_ERRINFO);
+
+ if (errinfo & (CH_ERRINFO_AXIRDPOISERR | CH_ERRINFO_AXIRDRESPERR))
+ vd->tx_result.result = DMA_TRANS_READ_FAILED;
+ else if (errinfo & CH_ERRINFO_AXIWRRESPERR)
+ vd->tx_result.result = DMA_TRANS_WRITE_FAILED;
+ else
+ vd->tx_result.result = DMA_TRANS_ABORTED;
+
+ vd->tx_result.residue = d350_get_residue(dch);
+ } else if (!(ch_status & CH_STAT_INTR_DONE)) {
+ dev_warn(dev, "Unexpected IRQ source? 0x%08x\n", ch_status);
+ }
+ writel_relaxed(ch_status, dch->base + CH_STATUS);
+
+ spin_lock(&dch->vc.lock);
+ vchan_cookie_complete(vd);
+ if (ch_status & CH_STAT_INTR_DONE) {
+ dch->status = DMA_COMPLETE;
+ dch->residue = 0;
+ d350_start_next(dch);
+ } else {
+ dch->status = DMA_ERROR;
+ dch->residue = vd->tx_result.residue;
+ }
+ spin_unlock(&dch->vc.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int d350_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ int ret = request_irq(dch->irq, d350_irq, IRQF_SHARED,
+ dev_name(&dch->vc.chan.dev->device), dch);
+ if (!ret)
+ writel_relaxed(CH_INTREN_DONE | CH_INTREN_ERR, dch->base + CH_INTREN);
+
+ return ret;
+}
+
+static void d350_free_chan_resources(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+
+ writel_relaxed(0, dch->base + CH_INTREN);
+ free_irq(dch->irq, dch);
+ vchan_free_chan_resources(&dch->vc);
+}
+
+static int d350_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct d350 *dmac;
+ void __iomem *base;
+ u32 reg;
+ int ret, nchan, dw, aw, r, p;
+ bool coherent, memset;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ reg = readl_relaxed(base + DMAINFO + IIDR);
+ r = FIELD_GET(IIDR_VARIANT, reg);
+ p = FIELD_GET(IIDR_REVISION, reg);
+ if (FIELD_GET(IIDR_IMPLEMENTER, reg) != IMPLEMENTER_ARM ||
+ FIELD_GET(IIDR_PRODUCTID, reg) != PRODUCTID_DMA350)
+ return dev_err_probe(dev, -ENODEV, "Not a DMA-350!");
+
+ reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG0);
+ nchan = FIELD_GET(DMA_CFG_NUM_CHANNELS, reg) + 1;
+ dw = 1 << FIELD_GET(DMA_CFG_DATA_WIDTH, reg);
+ aw = FIELD_GET(DMA_CFG_ADDR_WIDTH, reg) + 1;
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(aw));
+ coherent = device_get_dma_attr(dev) == DEV_DMA_COHERENT;
+
+ dmac = devm_kzalloc(dev, struct_size(dmac, channels, nchan), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->nchan = nchan;
+
+ reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG1);
+ dmac->nreq = FIELD_GET(DMA_CFG_NUM_TRIGGER_IN, reg);
+
+ dev_dbg(dev, "DMA-350 r%dp%d with %d channels, %d requests\n", r, p, dmac->nchan, dmac->nreq);
+
+ dmac->dma.dev = dev;
+ for (int i = min(dw, 16); i > 0; i /= 2) {
+ dmac->dma.src_addr_widths |= BIT(i);
+ dmac->dma.dst_addr_widths |= BIT(i);
+ }
+ dmac->dma.directions = BIT(DMA_MEM_TO_MEM);
+ dmac->dma.descriptor_reuse = true;
+ dmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dmac->dma.device_alloc_chan_resources = d350_alloc_chan_resources;
+ dmac->dma.device_free_chan_resources = d350_free_chan_resources;
+ dma_cap_set(DMA_MEMCPY, dmac->dma.cap_mask);
+ dmac->dma.device_prep_dma_memcpy = d350_prep_memcpy;
+ dmac->dma.device_pause = d350_pause;
+ dmac->dma.device_resume = d350_resume;
+ dmac->dma.device_terminate_all = d350_terminate_all;
+ dmac->dma.device_synchronize = d350_synchronize;
+ dmac->dma.device_tx_status = d350_tx_status;
+ dmac->dma.device_issue_pending = d350_issue_pending;
+ INIT_LIST_HEAD(&dmac->dma.channels);
+
+ /* Would be nice to have per-channel caps for this... */
+ memset = true;
+ for (int i = 0; i < nchan; i++) {
+ struct d350_chan *dch = &dmac->channels[i];
+
+ dch->base = base + DMACH(i);
+ writel_relaxed(CH_CMD_CLEAR, dch->base + CH_CMD);
+
+ reg = readl_relaxed(dch->base + CH_BUILDCFG1);
+ if (!(FIELD_GET(CH_CFG_HAS_CMDLINK, reg))) {
+ dev_warn(dev, "No command link support on channel %d\n", i);
+ continue;
+ }
+ dch->irq = platform_get_irq(pdev, i);
+ if (dch->irq < 0)
+ return dev_err_probe(dev, dch->irq,
+ "Failed to get IRQ for channel %d\n", i);
+
+ dch->has_wrap = FIELD_GET(CH_CFG_HAS_WRAP, reg);
+ dch->has_trig = FIELD_GET(CH_CFG_HAS_TRIGIN, reg) &
+ FIELD_GET(CH_CFG_HAS_TRIGSEL, reg);
+
+ /* Fill is a special case of Wrap */
+ memset &= dch->has_wrap;
+
+ reg = readl_relaxed(dch->base + CH_BUILDCFG0);
+ dch->tsz = FIELD_GET(CH_CFG_DATA_WIDTH, reg);
+
+ reg = FIELD_PREP(CH_LINK_SHAREATTR, coherent ? SHAREATTR_ISH : SHAREATTR_OSH);
+ reg |= FIELD_PREP(CH_LINK_MEMATTR, coherent ? MEMATTR_WB : MEMATTR_NC);
+ writel_relaxed(reg, dch->base + CH_LINKATTR);
+
+ dch->vc.desc_free = d350_desc_free;
+ vchan_init(&dch->vc, &dmac->dma);
+ }
+
+ if (memset) {
+ dma_cap_set(DMA_MEMSET, dmac->dma.cap_mask);
+ dmac->dma.device_prep_dma_memset = d350_prep_memset;
+ }
+
+ platform_set_drvdata(pdev, dmac);
+
+ ret = dma_async_device_register(&dmac->dma);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register DMA device\n");
+
+ return 0;
+}
+
+static void d350_remove(struct platform_device *pdev)
+{
+ struct d350 *dmac = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&dmac->dma);
+}
+
+static const struct of_device_id d350_of_match[] __maybe_unused = {
+ { .compatible = "arm,dma-350" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, d350_of_match);
+
+static struct platform_driver d350_driver = {
+ .driver = {
+ .name = "arm-dma350",
+ .of_match_table = of_match_ptr(d350_of_match),
+ },
+ .probe = d350_probe,
+ .remove = d350_remove,
+};
+module_platform_driver(d350_driver);
+
+MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
+MODULE_DESCRIPTION("Arm DMA-350 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index ba25c23164e7..3fbc74710a13 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -2033,10 +2033,8 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
* at_xdmac_start_xfer() for this descriptor. Now it's time
* to release it.
*/
- if (desc->active_xfer) {
- pm_runtime_put_autosuspend(atxdmac->dev);
- pm_runtime_mark_last_busy(atxdmac->dev);
- }
+ if (desc->active_xfer)
+ pm_runtime_put_noidle(atxdmac->dev);
}
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 1c6043751dc9..49f09998e5c0 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -136,7 +136,8 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
if (map != EDMA_MF_EDMA_LEGACY &&
map != EDMA_MF_EDMA_UNROLL &&
- map != EDMA_MF_HDMA_COMPAT)
+ map != EDMA_MF_HDMA_COMPAT &&
+ map != EDMA_MF_HDMA_NATIVE)
return;
pdata->mf = map;
@@ -291,6 +292,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf);
else if (chip->mf == EDMA_MF_HDMA_COMPAT)
pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf);
+ else if (chip->mf == EDMA_MF_HDMA_NATIVE)
+ pci_dbg(pdev, "Version:\tHDMA Native (0x%x)\n", chip->mf);
else
pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 443b2430466c..4976d7dde080 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -95,7 +95,7 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
}
val = edma_readl_chreg(fsl_chan, ch_csr);
- val |= EDMA_V3_CH_CSR_ERQ;
+ val |= EDMA_V3_CH_CSR_ERQ | EDMA_V3_CH_CSR_EEI;
edma_writel_chreg(fsl_chan, val, ch_csr);
}
@@ -821,7 +821,7 @@ void fsl_edma_issue_pending(struct dma_chan *chan)
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- int ret;
+ int ret = 0;
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
clk_prepare_enable(fsl_chan->clk);
@@ -831,17 +831,29 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
32, 0);
- if (fsl_chan->txirq) {
+ if (fsl_chan->txirq)
ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED,
fsl_chan->chan_name, fsl_chan);
- if (ret) {
- dma_pool_destroy(fsl_chan->tcd_pool);
- return ret;
- }
- }
+ if (ret)
+ goto err_txirq;
+
+ if (fsl_chan->errirq > 0)
+ ret = request_irq(fsl_chan->errirq, fsl_chan->errirq_handler, IRQF_SHARED,
+ fsl_chan->errirq_name, fsl_chan);
+
+ if (ret)
+ goto err_errirq;
return 0;
+
+err_errirq:
+ if (fsl_chan->txirq)
+ free_irq(fsl_chan->txirq, fsl_chan);
+err_txirq:
+ dma_pool_destroy(fsl_chan->tcd_pool);
+
+ return ret;
}
void fsl_edma_free_chan_resources(struct dma_chan *chan)
@@ -862,6 +874,8 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
if (fsl_chan->txirq)
free_irq(fsl_chan->txirq, fsl_chan);
+ if (fsl_chan->errirq)
+ free_irq(fsl_chan->errirq, fsl_chan);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
dma_pool_destroy(fsl_chan->tcd_pool);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 10a5565ddfd7..205a96489094 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -71,6 +71,18 @@
#define EDMA_V3_CH_ES_ERR BIT(31)
#define EDMA_V3_MP_ES_VLD BIT(31)
+#define EDMA_V3_CH_ERR_DBE BIT(0)
+#define EDMA_V3_CH_ERR_SBE BIT(1)
+#define EDMA_V3_CH_ERR_SGE BIT(2)
+#define EDMA_V3_CH_ERR_NCE BIT(3)
+#define EDMA_V3_CH_ERR_DOE BIT(4)
+#define EDMA_V3_CH_ERR_DAE BIT(5)
+#define EDMA_V3_CH_ERR_SOE BIT(6)
+#define EDMA_V3_CH_ERR_SAE BIT(7)
+#define EDMA_V3_CH_ERR_ECX BIT(8)
+#define EDMA_V3_CH_ERR_UCE BIT(9)
+#define EDMA_V3_CH_ERR BIT(31)
+
enum fsl_edma_pm_state {
RUNNING = 0,
SUSPENDED,
@@ -162,6 +174,7 @@ struct fsl_edma_chan {
u32 dma_dev_size;
enum dma_data_direction dma_dir;
char chan_name[32];
+ char errirq_name[36];
void __iomem *tcd;
void __iomem *mux_addr;
u32 real_count;
@@ -174,7 +187,9 @@ struct fsl_edma_chan {
int priority;
int hw_chanid;
int txirq;
+ int errirq;
irqreturn_t (*irq_handler)(int irq, void *dev_id);
+ irqreturn_t (*errirq_handler)(int irq, void *dev_id);
bool is_rxchan;
bool is_remote;
bool is_multi_fifo;
@@ -208,6 +223,9 @@ struct fsl_edma_desc {
/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
#define FSL_EDMA_DRV_TCD64 BIT(15)
+/* All channel ERR IRQ share one IRQ line */
+#define FSL_EDMA_DRV_ERRIRQ_SHARE BIT(16)
+
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
FSL_EDMA_DRV_BUS_8BYTE | \
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 66bfa28d984e..97583c7d51a2 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -50,6 +50,83 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void fsl_edma3_err_check(struct fsl_edma_chan *fsl_chan)
+{
+ unsigned int ch_err;
+ u32 val;
+
+ scoped_guard(spinlock, &fsl_chan->vchan.lock) {
+ ch_err = edma_readl_chreg(fsl_chan, ch_es);
+ if (!(ch_err & EDMA_V3_CH_ERR))
+ return;
+
+ edma_writel_chreg(fsl_chan, EDMA_V3_CH_ERR, ch_es);
+ val = edma_readl_chreg(fsl_chan, ch_csr);
+ val &= ~EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+ }
+
+ /* Ignore this interrupt since channel has been disabled already */
+ if (!fsl_chan->edesc)
+ return;
+
+ if (ch_err & EDMA_V3_CH_ERR_DBE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Bus Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SBE)
+ dev_err(&fsl_chan->pdev->dev, "Source Bus Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SGE)
+ dev_err(&fsl_chan->pdev->dev, "Scatter/Gather Configuration Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_NCE)
+ dev_err(&fsl_chan->pdev->dev, "NBYTES/CITER Configuration Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_DOE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Offset Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_DAE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Address Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SOE)
+ dev_err(&fsl_chan->pdev->dev, "Source Offset Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SAE)
+ dev_err(&fsl_chan->pdev->dev, "Source Address Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_ECX)
+ dev_err(&fsl_chan->pdev->dev, "Transfer Canceled interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_UCE)
+ dev_err(&fsl_chan->pdev->dev, "Uncorrectable TCD error during channel execution interrupt.\n");
+
+ fsl_chan->status = DMA_ERROR;
+}
+
+static irqreturn_t fsl_edma3_err_handler_per_chan(int irq, void *dev_id)
+{
+ struct fsl_edma_chan *fsl_chan = dev_id;
+
+ fsl_edma3_err_check(fsl_chan);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma3_err_handler_shared(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ unsigned int ch;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ if (fsl_edma->chan_masked & BIT(ch))
+ continue;
+
+ fsl_edma3_err_check(&fsl_edma->chans[ch]);
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_chan *fsl_chan = dev_id;
@@ -309,7 +386,8 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
- int i;
+ char *errirq_name;
+ int i, ret;
for (i = 0; i < fsl_edma->n_chans; i++) {
@@ -324,6 +402,27 @@ static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engi
return -EINVAL;
fsl_chan->irq_handler = fsl_edma3_tx_handler;
+
+ if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE)) {
+ fsl_chan->errirq = fsl_chan->txirq;
+ fsl_chan->errirq_handler = fsl_edma3_err_handler_per_chan;
+ }
+ }
+
+ /* All channel err use one irq number */
+ if (fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE) {
+ /* last one is error irq */
+ fsl_edma->errirq = platform_get_irq_optional(pdev, fsl_edma->n_chans);
+ if (fsl_edma->errirq < 0)
+ return 0; /* dts miss err irq, treat as no err irq case */
+
+ errirq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s-err",
+ dev_name(&pdev->dev));
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, fsl_edma3_err_handler_shared,
+ 0, errirq_name, fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Can't register eDMA err IRQ.\n");
}
return 0;
@@ -464,7 +563,8 @@ static struct fsl_edma_drvdata imx7ulp_data = {
};
static struct fsl_edma_drvdata imx8qm_data = {
- .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE,
+ .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE
+ | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
@@ -481,14 +581,15 @@ static struct fsl_edma_drvdata imx8ulp_data = {
};
static struct fsl_edma_drvdata imx93_data3 = {
- .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
};
static struct fsl_edma_drvdata imx93_data4 = {
- .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4
+ | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
@@ -498,7 +599,7 @@ static struct fsl_edma_drvdata imx93_data4 = {
static struct fsl_edma_drvdata imx95_data5 = {
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 |
- FSL_EDMA_DRV_TCD64,
+ FSL_EDMA_DRV_TCD64 | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x200,
@@ -700,6 +801,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
dev_name(&pdev->dev), i);
+ snprintf(fsl_chan->errirq_name, sizeof(fsl_chan->errirq_name),
+ "%s-CH%02d-err", dev_name(&pdev->dev), i);
+
fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING;
fsl_chan->srcid = 0;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index b5e7d18b9766..9b126a260267 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1226,6 +1226,8 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->dev = &op->dev;
INIT_LIST_HEAD(&fdev->common.channels);
+ /* The DMA address bits supported for this device. */
+ fdev->addr_bits = (long)device_get_match_data(fdev->dev);
/* ioremap the registers for use */
fdev->regs = of_iomap(op->dev.of_node, 0);
@@ -1254,7 +1256,7 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
+ dma_set_mask(&(op->dev), DMA_BIT_MASK(fdev->addr_bits));
platform_set_drvdata(op, fdev);
@@ -1387,10 +1389,20 @@ static const struct dev_pm_ops fsldma_pm_ops = {
};
#endif
+/* The .data field is used for dma-bit-mask. */
static const struct of_device_id fsldma_of_ids[] = {
- { .compatible = "fsl,elo3-dma", },
- { .compatible = "fsl,eloplus-dma", },
- { .compatible = "fsl,elo-dma", },
+ {
+ .compatible = "fsl,elo3-dma",
+ .data = (void *)40,
+ },
+ {
+ .compatible = "fsl,eloplus-dma",
+ .data = (void *)36,
+ },
+ {
+ .compatible = "fsl,elo-dma",
+ .data = (void *)32,
+ },
{}
};
MODULE_DEVICE_TABLE(of, fsldma_of_ids);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 308bed0a560a..d7b7a3138b85 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -124,6 +124,7 @@ struct fsldma_device {
struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */
+ int addr_bits; /* DMA addressing bits supported */
};
/* Define macros for fsldma_chan->feature property */
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 6d12033649f8..7e4715f92773 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -349,7 +349,9 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- drain_workqueue(wq->wq);
+ if (wq->wq)
+ drain_workqueue(wq->wq);
+
mutex_unlock(&evl->lock);
}
@@ -442,10 +444,12 @@ static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
* DSA devices are capable of indirect ("batch") command submission.
* On devices where direct user submissions are not safe, we cannot
* allow this since there is no good way for us to verify these
- * indirect commands.
+ * indirect commands. Narrow the restriction of operations with the
+ * BATCH opcode to only DSA version 1 devices.
*/
if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
- !wq->idxd->user_submission_safe)
+ wq->idxd->hw.version == DEVICE_VERSION_1 &&
+ !wq->idxd->user_submission_safe)
return -EINVAL;
/*
* As per the programming specification, the completion address must be
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 214b8039439f..74e6695881e6 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -19,7 +19,6 @@
#define IDXD_DRIVER_VERSION "1.00"
-extern struct kmem_cache *idxd_desc_pool;
extern bool tc_override;
struct idxd_wq;
@@ -171,7 +170,6 @@ struct idxd_cdev {
#define DRIVER_NAME_SIZE 128
-#define IDXD_ALLOCATED_BATCH_SIZE 128U
#define WQ_NAME_SIZE 1024
#define WQ_TYPE_SIZE 10
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 6af493f6ba77..9f0701021af0 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1208,9 +1208,11 @@ static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *
/* On systems where direct user submissions are not safe, we need to clear out
* the BATCH capability from the capability mask in sysfs since we cannot support
- * that command on such systems.
+ * that command on such systems. Narrow the restriction of operations with the
+ * BATCH opcode to only DSA version 1 devices.
*/
- if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
+ if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe &&
+ confdev_to_idxd(dev)->hw.version == DEVICE_VERSION_1)
clear_bit(DSA_OPCODE_BATCH % 64, &val);
pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 9235db551026..1f687b08d6b8 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -14,6 +14,7 @@
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/irqchip/irq-renesas-rzv2h.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -89,8 +90,14 @@ struct rz_dmac_chan {
#define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
+struct rz_dmac_icu {
+ struct platform_device *pdev;
+ u8 dmac_index;
+};
+
struct rz_dmac {
struct dma_device engine;
+ struct rz_dmac_icu icu;
struct device *dev;
struct reset_control *rstc;
void __iomem *base;
@@ -99,6 +106,8 @@ struct rz_dmac {
unsigned int n_channels;
struct rz_dmac_chan *channels;
+ bool has_icu;
+
DECLARE_BITMAP(modules, 1024);
};
@@ -167,6 +176,9 @@ struct rz_dmac {
#define RZ_DMAC_MAX_CHANNELS 16
#define DMAC_NR_LMDESC 64
+/* RZ/V2H ICU related */
+#define RZV2H_MAX_DMAC_INDEX 4
+
/*
* -----------------------------------------------------------------------------
* Device access
@@ -324,7 +336,13 @@ static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
lmdesc->chext = 0;
lmdesc->header = HEADER_LV;
- rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index,
+ RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ }
channel->chcfg = chcfg;
channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
@@ -375,7 +393,13 @@ static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
channel->lmdesc.tail = lmdesc;
- rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index, channel->mid_rid);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ }
+
channel->chctrl = CHCTRL_SETEN;
}
@@ -647,7 +671,13 @@ static void rz_dmac_device_synchronize(struct dma_chan *chan)
if (ret < 0)
dev_warn(dmac->dev, "DMA Timeout");
- rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index,
+ RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ }
}
/*
@@ -748,7 +778,8 @@ static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec);
+ return __dma_request_channel(&mask, rz_dmac_chan_filter, dma_spec,
+ ofdma->of_node);
}
/*
@@ -823,6 +854,38 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
return 0;
}
+static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ uint32_t dmac_index;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args);
+ if (ret == -ENOENT)
+ return 0;
+ if (ret)
+ return ret;
+
+ dmac->has_icu = true;
+
+ dmac->icu.pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!dmac->icu.pdev) {
+ dev_err(dev, "ICU device not found.\n");
+ return -ENODEV;
+ }
+
+ dmac_index = args.args[0];
+ if (dmac_index > RZV2H_MAX_DMAC_INDEX) {
+ dev_err(dev, "DMAC index %u invalid.\n", dmac_index);
+ return -EINVAL;
+ }
+ dmac->icu.dmac_index = dmac_index;
+
+ return 0;
+}
+
static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
{
struct device_node *np = dev->of_node;
@@ -839,7 +902,7 @@ static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
return -EINVAL;
}
- return 0;
+ return rz_dmac_parse_of_icu(dev, dmac);
}
static int rz_dmac_probe(struct platform_device *pdev)
@@ -873,9 +936,11 @@ static int rz_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);
- dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(dmac->ext_base))
- return PTR_ERR(dmac->ext_base);
+ if (!dmac->has_icu) {
+ dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmac->ext_base))
+ return PTR_ERR(dmac->ext_base);
+ }
/* Register interrupt handler for error */
irq = platform_get_irq_byname(pdev, irqname);
@@ -990,9 +1055,12 @@ static void rz_dmac_remove(struct platform_device *pdev)
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+ platform_device_put(dmac->icu.pdev);
}
static const struct of_device_id of_rz_dmac_match[] = {
+ { .compatible = "renesas,r9a09g057-dmac", },
{ .compatible = "renesas,rz-dmac", },
{ /* Sentinel */ }
};
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index ce80ac4b1a1b..fad896ff29a2 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -27,10 +27,10 @@
#define ADMA_CH_INT_CLEAR 0x1c
#define ADMA_CH_CTRL 0x24
-#define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
+#define ADMA_CH_CTRL_DIR(val, mask, shift) (((val) & (mask)) << (shift))
#define ADMA_CH_CTRL_DIR_AHUB2MEM 2
#define ADMA_CH_CTRL_DIR_MEM2AHUB 4
-#define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
+#define ADMA_CH_CTRL_MODE_CONTINUOUS(shift) (2 << (shift))
#define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
#define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0
@@ -41,15 +41,27 @@
#define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
#define ADMA_CH_CONFIG_MAX_BUFS 8
-#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
+#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 4)
+
+#define ADMA_GLOBAL_CH_CONFIG 0x400
+#define ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0x7)
+#define ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 8)
#define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30
#define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70
#define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84
+#define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 0x44
+#define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 0x48
+#define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 0x100
+#define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 0x104
+#define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 0x180
+#define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 0x184
+#define TEGRA264_ADMA_GLOBAL_PAGE_OFFSET 0x8
#define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_TX_FIFO_SIZE_SHIFT 8
#define ADMA_CH_RX_FIFO_SIZE_SHIFT 0
+#define ADMA_GLOBAL_CH_FIFO_CTRL 0x300
#define ADMA_CH_LOWER_SRC_ADDR 0x34
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
@@ -73,36 +85,48 @@ struct tegra_adma;
* @adma_get_burst_config: Function callback used to set DMA burst size.
* @global_reg_offset: Register offset of DMA global register.
* @global_int_clear: Register offset of DMA global interrupt clear.
+ * @global_ch_fifo_base: Global channel fifo ctrl base offset
+ * @global_ch_config_base: Global channel config base offset
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
+ * @ch_dir_shift: Channel direction bit position.
+ * @ch_mode_shift: Channel mode bit position.
* @ch_base_offset: Register offset of DMA channel registers.
+ * @ch_tc_offset_diff: From TC register onwards offset differs for Tegra264
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
+ * @ch_config: Outstanding and WRR config values
* @ch_req_mask: Mask for Tx or Rx channel select.
+ * @ch_dir_mask: Mask for channel direction.
* @ch_req_max: Maximum number of Tx or Rx channels available.
* @ch_reg_size: Size of DMA channel register space.
* @nr_channels: Number of DMA channels available.
* @ch_fifo_size_mask: Mask for FIFO size field.
* @sreq_index_offset: Slave channel index offset.
* @max_page: Maximum ADMA Channel Page.
- * @has_outstanding_reqs: If DMA channel can have outstanding requests.
* @set_global_pg_config: Global page programming.
*/
struct tegra_adma_chip_data {
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
unsigned int global_reg_offset;
unsigned int global_int_clear;
+ unsigned int global_ch_fifo_base;
+ unsigned int global_ch_config_base;
unsigned int ch_req_tx_shift;
unsigned int ch_req_rx_shift;
+ unsigned int ch_dir_shift;
+ unsigned int ch_mode_shift;
unsigned int ch_base_offset;
+ unsigned int ch_tc_offset_diff;
unsigned int ch_fifo_ctrl;
+ unsigned int ch_config;
unsigned int ch_req_mask;
+ unsigned int ch_dir_mask;
unsigned int ch_req_max;
unsigned int ch_reg_size;
unsigned int nr_channels;
unsigned int ch_fifo_size_mask;
unsigned int sreq_index_offset;
unsigned int max_page;
- bool has_outstanding_reqs;
void (*set_global_pg_config)(struct tegra_adma *tdma);
};
@@ -112,6 +136,7 @@ struct tegra_adma_chip_data {
struct tegra_adma_chan_regs {
unsigned int ctrl;
unsigned int config;
+ unsigned int global_config;
unsigned int src_addr;
unsigned int trg_addr;
unsigned int fifo_ctrl;
@@ -150,6 +175,9 @@ struct tegra_adma_chan {
/* Transfer count and position info */
unsigned int tx_buf_count;
unsigned int tx_buf_pos;
+
+ unsigned int global_ch_fifo_offset;
+ unsigned int global_ch_config_offset;
};
/*
@@ -246,6 +274,29 @@ static void tegra186_adma_global_page_config(struct tegra_adma *tdma)
tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff);
}
+static void tegra264_adma_global_page_config(struct tegra_adma *tdma)
+{
+ u32 global_page_offset = tdma->ch_page_no * TEGRA264_ADMA_GLOBAL_PAGE_OFFSET;
+
+ /* If the default page (page1) is not used, then clear page1 registers */
+ if (tdma->ch_page_no) {
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1, 0);
+ }
+
+ /* Program global registers for selected page */
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 + global_page_offset, 0x1);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 + global_page_offset, 0x1);
+}
+
static int tegra_adma_init(struct tegra_adma *tdma)
{
u32 status;
@@ -404,11 +455,21 @@ static void tegra_adma_start(struct tegra_adma_chan *tdc)
tdc->tx_buf_pos = 0;
tdc->tx_buf_count = 0;
- tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc);
+ tdma_ch_write(tdc, ADMA_CH_TC - tdc->tdma->cdata->ch_tc_offset_diff, ch_regs->tc);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
- tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_regs->src_addr);
- tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_regs->trg_addr);
- tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdc->tdma->cdata->ch_tc_offset_diff,
+ ch_regs->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdc->tdma->cdata->ch_tc_offset_diff,
+ ch_regs->trg_addr);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
+ else if (tdc->global_ch_fifo_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_regs->fifo_ctrl);
+
+ if (tdc->global_ch_config_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_regs->global_config);
+
tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config);
/* Start ADMA */
@@ -421,7 +482,8 @@ static unsigned int tegra_adma_get_residue(struct tegra_adma_chan *tdc)
{
struct tegra_adma_desc *desc = tdc->desc;
unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1;
- unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS);
+ unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS -
+ tdc->tdma->cdata->ch_tc_offset_diff);
unsigned int periods_remaining;
/*
@@ -627,13 +689,16 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
return -EINVAL;
}
- ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) |
- ADMA_CH_CTRL_MODE_CONTINUOUS |
+ ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir, cdata->ch_dir_mask,
+ cdata->ch_dir_shift) |
+ ADMA_CH_CTRL_MODE_CONTINUOUS(cdata->ch_mode_shift) |
ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= cdata->adma_get_burst_config(burst_size);
- ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
- if (cdata->has_outstanding_reqs)
- ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
+
+ if (cdata->global_ch_config_base)
+ ch_regs->global_config |= cdata->ch_config;
+ else
+ ch_regs->config |= cdata->ch_config;
/*
* 'sreq_index' represents the current ADMAIF channel number and as per
@@ -788,12 +853,23 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
/* skip if channel is not active */
if (!ch_reg->cmd)
continue;
- ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC);
- ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR);
- ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR);
+ ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff);
+ ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR -
+ tdma->cdata->ch_tc_offset_diff);
+ ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR -
+ tdma->cdata->ch_tc_offset_diff);
ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
- ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
+
+ if (tdc->global_ch_config_offset)
+ ch_reg->global_config = tdma_read(tdc->tdma, tdc->global_ch_config_offset);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
+ else if (tdc->global_ch_fifo_offset)
+ ch_reg->fifo_ctrl = tdma_read(tdc->tdma, tdc->global_ch_fifo_offset);
+
ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG);
+
}
clk_disable:
@@ -832,12 +908,23 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
/* skip if channel was not active earlier */
if (!ch_reg->cmd)
continue;
- tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc);
- tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr);
- tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr);
+ tdma_ch_write(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff, ch_reg->tc);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdma->cdata->ch_tc_offset_diff,
+ ch_reg->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdma->cdata->ch_tc_offset_diff,
+ ch_reg->trg_addr);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl);
- tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
+ else if (tdc->global_ch_fifo_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_reg->fifo_ctrl);
+
+ if (tdc->global_ch_config_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_reg->global_config);
+
tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config);
+
tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd);
}
@@ -848,17 +935,23 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.adma_get_burst_config = tegra210_adma_get_burst_config,
.global_reg_offset = 0xc00,
.global_int_clear = 0x20,
+ .global_ch_fifo_base = 0,
+ .global_ch_config_base = 0,
.ch_req_tx_shift = 28,
.ch_req_rx_shift = 24,
+ .ch_dir_shift = 12,
+ .ch_mode_shift = 8,
.ch_base_offset = 0,
+ .ch_tc_offset_diff = 0,
+ .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1),
.ch_req_mask = 0xf,
+ .ch_dir_mask = 0xf,
.ch_req_max = 10,
.ch_reg_size = 0x80,
.nr_channels = 22,
.ch_fifo_size_mask = 0xf,
.sreq_index_offset = 2,
.max_page = 0,
- .has_outstanding_reqs = false,
.set_global_pg_config = NULL,
};
@@ -866,23 +959,56 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.adma_get_burst_config = tegra186_adma_get_burst_config,
.global_reg_offset = 0,
.global_int_clear = 0x402c,
+ .global_ch_fifo_base = 0,
+ .global_ch_config_base = 0,
.ch_req_tx_shift = 27,
.ch_req_rx_shift = 22,
+ .ch_dir_shift = 12,
+ .ch_mode_shift = 8,
.ch_base_offset = 0x10000,
+ .ch_tc_offset_diff = 0,
+ .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1) |
+ TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8),
.ch_req_mask = 0x1f,
+ .ch_dir_mask = 0xf,
.ch_req_max = 20,
.ch_reg_size = 0x100,
.nr_channels = 32,
.ch_fifo_size_mask = 0x1f,
.sreq_index_offset = 4,
.max_page = 4,
- .has_outstanding_reqs = true,
.set_global_pg_config = tegra186_adma_global_page_config,
};
+static const struct tegra_adma_chip_data tegra264_chip_data = {
+ .adma_get_burst_config = tegra186_adma_get_burst_config,
+ .global_reg_offset = 0,
+ .global_int_clear = 0x800c,
+ .global_ch_fifo_base = ADMA_GLOBAL_CH_FIFO_CTRL,
+ .global_ch_config_base = ADMA_GLOBAL_CH_CONFIG,
+ .ch_req_tx_shift = 26,
+ .ch_req_rx_shift = 20,
+ .ch_dir_shift = 10,
+ .ch_mode_shift = 7,
+ .ch_base_offset = 0x10000,
+ .ch_tc_offset_diff = 4,
+ .ch_config = ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(1) |
+ ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(8),
+ .ch_req_mask = 0x3f,
+ .ch_dir_mask = 7,
+ .ch_req_max = 32,
+ .ch_reg_size = 0x100,
+ .nr_channels = 64,
+ .ch_fifo_size_mask = 0x7f,
+ .sreq_index_offset = 0,
+ .max_page = 10,
+ .set_global_pg_config = tegra264_adma_global_page_config,
+};
+
static const struct of_device_id tegra_adma_of_match[] = {
{ .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data },
{ .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data },
+ { .compatible = "nvidia,tegra264-adma", .data = &tegra264_chip_data },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_adma_of_match);
@@ -985,6 +1111,15 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i);
+ if (tdma->base_addr) {
+ if (cdata->global_ch_fifo_base)
+ tdc->global_ch_fifo_offset = cdata->global_ch_fifo_base + (4 * i);
+
+ if (cdata->global_ch_config_base)
+ tdc->global_ch_config_offset =
+ cdata->global_ch_config_base + (4 * i);
+ }
+
tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq <= 0) {
ret = tdc->irq ?: -ENXIO;
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index b6255c0601bb..aa2dc762140f 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -5624,7 +5624,8 @@ static int udma_probe(struct platform_device *pdev)
uc->config.dir = DMA_MEM_TO_MEM;
uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
dev_name(dev), i);
-
+ if (!uc->name)
+ return -ENOMEM;
vchan_init(&uc->vc, &ud->ddev);
/* Use custom vchan completion handling */
tasklet_setup(&uc->vc.task, udma_vchan_complete);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 3ad44afd0e74..a34d8f0ceed8 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2909,6 +2909,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
return -EINVAL;
}
+ xdev->common.directions |= chan->direction;
+
/* Request the interrupt */
chan->irq = of_irq_get(node, chan->tdest);
if (chan->irq < 0)
@@ -3115,6 +3117,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
}
+ dma_set_max_seg_size(xdev->dev, xdev->max_buffer_len);
+
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xdev->has_axistream_connected =
of_property_read_bool(node, "xlnx,axistream-connected");