diff options
Diffstat (limited to 'drivers/dma')
28 files changed, 7985 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig new file mode 100644 index 00000000000..1fccbc96f07 --- /dev/null +++ b/drivers/dma/Kconfig @@ -0,0 +1,96 @@ +menu "DMA Support" + +config DMA + bool "Enable Driver Model for DMA drivers" + depends on DM + help + Enable driver model for DMA. DMA engines can do + asynchronous data transfers without involving the host + CPU. Currently, this framework can be used to offload + memory copies to and from devices like qspi, ethernet + etc Drivers provide methods to access the DMA devices + buses that is used to transfer data to and from memory. + The uclass interface is defined in include/dma.h. + +config DMA_CHANNELS + bool "Enable DMA channels support" + depends on DMA + help + Enable channels support for DMA. Some DMA controllers have multiple + channels which can either transfer data to/from different devices. + +config SANDBOX_DMA + bool "Enable the sandbox DMA test driver" + depends on DMA && DMA_CHANNELS && SANDBOX + help + Enable support for a test DMA uclass implementation. It stimulates + DMA transfer by simple copying data between channels. + +config BCM6348_IUDMA + bool "BCM6348 IUDMA driver" + depends on ARCH_BMIPS + select DMA_CHANNELS + help + Enable the BCM6348 IUDMA driver. + This driver support data transfer from devices to + memory and from memory to devices. + +config TI_EDMA3 + bool "TI EDMA3 driver" + depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE + select DMA_LEGACY + help + Enable the TI EDMA3 driver for DRA7xx and AM43xx evms. + This driver support data transfer between memory + regions. + +config TI_KSNAV + bool "TI Keystone Navigator DMA driver" + depends on ARCH_KEYSTONE + default y + select DMA_LEGACY + help + Enable the Keystone Navigator driver for Keystone 2 platforms. + +config APBH_DMA + bool "Support APBH DMA" + depends on MX23 || MX28 || MX6 || MX7 || IMX8 || IMX8M + select DMA_LEGACY + help + Enable APBH DMA driver. + +config XILINX_DPDMA + bool "Enable ZynqMP Display Port DMA driver" + depends on DMA && ZYNQMP_POWER_DOMAIN + help + Enable support for Xilinx ZynqMP Display DMA driver. Currently + this file is used as placeholder for driver. The main reason is + to record compatible string and calling power domain driver. + +config ADI_DMA + bool "ADI DMA driver" + depends on DMA && DMA_CHANNELS + help + Enable DMA support for Analog Devices SOCs, such as the SC5xx. + Currently this is a minimalistic driver tested against OSPI use only. + +if APBH_DMA +config APBH_DMA_BURST + bool "Enable DMA BURST" + +config APBH_DMA_BURST8 + bool "Enable DMA BURST8" + +endif + +config DMA_LEGACY + bool "Legacy DMA support" + help + Enable legacy DMA support. This does not use driver model and should + be migrated to the new API. + + It is required for some PowerPC boards. + +source "drivers/dma/ti/Kconfig" + +endmenu # menu "DMA Support" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile new file mode 100644 index 00000000000..fde26c2a886 --- /dev/null +++ b/drivers/dma/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# (C) Copyright 2006 +# Wolfgang Denk, DENX Software Engineering, wd@denx.de. + +obj-$(CONFIG_DMA) += dma-uclass.o + +obj-$(CONFIG_APBH_DMA) += apbh_dma.o +obj-$(CONFIG_BCM6348_IUDMA) += bcm6348-iudma.o +obj-$(CONFIG_FSL_DMA) += fsl_dma.o +obj-$(CONFIG_SANDBOX_DMA) += sandbox-dma-test.o +obj-$(CONFIG_TI_KSNAV) += keystone_nav.o keystone_nav_cfg.o +obj-$(CONFIG_TI_EDMA3) += ti-edma3.o +obj-$(CONFIG_XILINX_DPDMA) += xilinx_dpdma.o +obj-$(CONFIG_ADI_DMA) += adi_dma.o + +obj-y += ti/ diff --git a/drivers/dma/adi_dma.c b/drivers/dma/adi_dma.c new file mode 100644 index 00000000000..28afe488db0 --- /dev/null +++ b/drivers/dma/adi_dma.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Analog Devices DMA controller driver + * + * (C) Copyright 2024 - Analog Devices, Inc. + * + * Written and/or maintained by Timesys Corporation + * + * Contact: Nathan Barrett-Morrison <nathan.morrison@timesys.com> + * Contact: Greg Malysa <greg.malysa@timesys.com> + * Contact: Ian Roberts <ian.roberts@timesys.com> + * + */ +#include <dm.h> +#include <dma.h> +#include <dma-uclass.h> +#include <dm/device_compat.h> +#include <linux/errno.h> +#include <linux/io.h> + +#define HAS_MDMA BIT(0) + +#define REG_ADDRSTART 0x04 +#define REG_CFG 0x08 +#define REG_XCNT 0x0C +#define REG_XMOD 0x10 +#define REG_STAT 0x30 + +#define BITP_DMA_CFG_MSIZE 8 +#define BITP_DMA_CFG_PSIZE 4 +#define BITM_DMA_CFG_WNR 0x00000002 +#define BITM_DMA_CFG_EN 0x00000001 +#define ENUM_DMA_CFG_XCNT_INT 0x00100000 + +#define BITP_DMA_STAT_PBWID 12 +#define BITP_DMA_STAT_ERRC 4 +#define BITM_DMA_STAT_PBWID 0x00003000 +#define BITM_DMA_STAT_ERRC 0x00000070 +#define BITM_DMA_STAT_PIRQ 0x00000004 +#define BITM_DMA_STAT_IRQERR 0x00000002 +#define BITM_DMA_STAT_IRQDONE 0x00000001 + +#define DMA_MDMA_SRC_DEFAULT_CONFIG(psize, msize) \ + (BITM_DMA_CFG_EN | ((psize) << BITP_DMA_CFG_PSIZE) | ((msize) << BITP_DMA_CFG_MSIZE)) +#define DMA_MDMA_DST_DEFAULT_CONFIG(psize, msize) \ + (BITM_DMA_CFG_EN | BITM_DMA_CFG_WNR | ENUM_DMA_CFG_XCNT_INT | \ + ((psize) << BITP_DMA_CFG_PSIZE) | ((msize) << BITP_DMA_CFG_MSIZE)) + +struct adi_dma_channel { + int id; + struct adi_dma *dma; + void __iomem *iosrc; + void __iomem *iodest; +}; + +struct adi_dma { + struct udevice *dev; + struct adi_dma_channel channels[1]; + void __iomem *ioaddr; + unsigned long hw_cfg; +}; + +static const struct udevice_id dma_dt_ids[] = { + { .compatible = "adi,mdma-controller", .data = HAS_MDMA }, + { } +}; + +static u8 adi_dma_get_msize(u32 n_bytecount, u32 n_address) +{ + /* Calculate MSIZE, PSIZE, XCNT and XMOD */ + u8 n_msize = 0; + u32 n_value = n_bytecount | n_address; + u32 n_mask = 0x1; + + for (n_msize = 0; n_msize < 5; n_msize++, n_mask <<= 1) { + if ((n_value & n_mask) == n_mask) + break; + } + + return n_msize; +} + +static int adi_dma_get_ch_error(void __iomem *ch) +{ + u32 cause = (ioread32(ch + REG_STAT) & BITM_DMA_STAT_ERRC) >> + BITP_DMA_STAT_ERRC; + switch (cause) { + case 0: + return -EINVAL; + case 1: + return -EBUSY; + case 2: + return -EFAULT; + case 3: + fallthrough; + case 5: + fallthrough; + case 6: + fallthrough; + default: + return -EIO; + } +} + +static int adi_mdma_transfer(struct udevice *dev, int direction, + dma_addr_t dst, dma_addr_t src, size_t len) +{ + struct adi_dma *priv = dev_get_priv(dev); + void __iomem *chsrc = priv->channels[0].iosrc; + void __iomem *chdst = priv->channels[0].iodest; + + int result = 0; + u32 reg; + u32 bytecount = len; + + u8 n_srcmsize; + u8 n_dstmsize; + u8 n_srcpsize; + u8 n_dstpsize; + u8 n_psize; + u32 srcconfig; + u32 dstconfig; + u8 srcpsizemax = (ioread32(chsrc + REG_STAT) & BITM_DMA_STAT_PBWID) >> + BITP_DMA_STAT_PBWID; + u8 dstpsizemax = (ioread32(chdst + REG_STAT) & BITM_DMA_STAT_PBWID) >> + BITP_DMA_STAT_PBWID; + + const u32 CLRSTAT = (BITM_DMA_STAT_IRQDONE | BITM_DMA_STAT_IRQERR | + BITM_DMA_STAT_PIRQ); + + if (len == 0) + return -EINVAL; + + /* Clear DMA status */ + iowrite32(CLRSTAT, chsrc + REG_STAT); + iowrite32(CLRSTAT, chdst + REG_STAT); + + /* Calculate MSIZE, PSIZE, XCNT and XMOD */ + n_srcmsize = adi_dma_get_msize(bytecount, src); + n_dstmsize = adi_dma_get_msize(bytecount, dst); + n_srcpsize = min(n_srcmsize, srcpsizemax); + n_dstpsize = min(n_dstmsize, dstpsizemax); + n_psize = min(n_srcpsize, n_dstpsize); + + srcconfig = DMA_MDMA_SRC_DEFAULT_CONFIG(n_psize, n_srcmsize); + dstconfig = DMA_MDMA_DST_DEFAULT_CONFIG(n_psize, n_dstmsize); + + /* Load the DMA descriptors */ + iowrite32(src, chsrc + REG_ADDRSTART); + iowrite32(bytecount >> n_srcmsize, chsrc + REG_XCNT); + iowrite32(1 << n_srcmsize, chsrc + REG_XMOD); + iowrite32(dst, chdst + REG_ADDRSTART); + iowrite32(bytecount >> n_dstmsize, chdst + REG_XCNT); + iowrite32(1 << n_dstmsize, chdst + REG_XMOD); + + iowrite32(dstconfig, chdst + REG_CFG); + iowrite32(srcconfig, chsrc + REG_CFG); + + /* Wait for DMA to complete while checking for a DMA error */ + do { + reg = ioread32(chsrc + REG_STAT); + if ((reg & BITM_DMA_STAT_IRQERR) == BITM_DMA_STAT_IRQERR) { + result = adi_dma_get_ch_error(chsrc); + break; + } + reg = ioread32(chdst + REG_STAT); + if ((reg & BITM_DMA_STAT_IRQERR) == BITM_DMA_STAT_IRQERR) { + result = adi_dma_get_ch_error(chdst); + break; + } + } while ((reg & BITM_DMA_STAT_IRQDONE) == 0); + + clrbits_32(chsrc + REG_CFG, 1); + clrbits_32(chdst + REG_CFG, 1); + + return result; +} + +static int adi_dma_init_channel(struct adi_dma *dma, + struct adi_dma_channel *channel, ofnode node) +{ + u32 offset; + + if (ofnode_read_u32(node, "adi,id", &channel->id)) { + dev_err(dma->dev, "Missing adi,id for channel %s\n", + ofnode_get_name(node)); + return -ENOENT; + } + + if (ofnode_read_u32(node, "adi,src-offset", &offset)) { + dev_err(dma->dev, "Missing adi,src-offset for channel %s\n", + ofnode_get_name(node)); + return -ENOENT; + } + + channel->iosrc = dma->ioaddr + offset; + channel->dma = dma; + + if (dma->hw_cfg & HAS_MDMA) { + if (ofnode_read_u32(node, "adi,dest-offset", &offset)) { + dev_err(dma->dev, + "Missing adi,dest-offset for channel %s\n", + ofnode_get_name(node)); + return -ENOENT; + } + channel->iodest = dma->ioaddr + offset; + } + + return 0; +} + +static int adi_dma_probe(struct udevice *dev) +{ + struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); + struct adi_dma *priv = dev_get_priv(dev); + ofnode node, child; + + priv->hw_cfg = dev_get_driver_data(dev); + if (priv->hw_cfg & HAS_MDMA) + uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM; + + priv->ioaddr = dev_remap_addr(dev); + if (!priv->ioaddr) + return -EINVAL; + + node = dev_read_first_subnode(dev); + if (!ofnode_valid(node)) { + dev_err(dev, + "Error: device tree DMA channel config missing!\n"); + return -ENODEV; + } + + node = dev_ofnode(dev); + ofnode_for_each_subnode(child, node) { + adi_dma_init_channel(priv, priv->channels, child); + break; //Only 1 channel supported for now + } + + return 0; +} + +static const struct dma_ops adi_dma_ops = { + .transfer = adi_mdma_transfer, +}; + +U_BOOT_DRIVER(adi_dma) = { + .name = "adi_dma", + .id = UCLASS_DMA, + .of_match = dma_dt_ids, + .ops = &adi_dma_ops, + .probe = adi_dma_probe, + .priv_auto = sizeof(struct adi_dma), +}; diff --git a/drivers/dma/apbh_dma.c b/drivers/dma/apbh_dma.c new file mode 100644 index 00000000000..331815c469f --- /dev/null +++ b/drivers/dma/apbh_dma.c @@ -0,0 +1,619 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Freescale i.MX28 APBH DMA driver + * + * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> + * on behalf of DENX Software Engineering GmbH + * + * Based on code from LTIB: + * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2017 NXP + * + */ + +#include <cpu_func.h> +#include <asm/cache.h> +#include <linux/list.h> + +#include <malloc.h> +#include <linux/errno.h> +#include <asm/io.h> +#include <asm/arch/clock.h> +#include <asm/arch/imx-regs.h> +#include <asm/arch/sys_proto.h> +#include <asm/mach-imx/dma.h> +#include <asm/mach-imx/regs-apbh.h> + +static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS]; + +/* + * Test is the DMA channel is valid channel + */ +int mxs_dma_validate_chan(int channel) +{ + struct mxs_dma_chan *pchan; + + if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS)) + return -EINVAL; + + pchan = mxs_dma_channels + channel; + if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED)) + return -EINVAL; + + return 0; +} + +/* + * Return the address of the command within a descriptor. + */ +static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc) +{ + return desc->address + offsetof(struct mxs_dma_desc, cmd); +} + +/* + * Read a DMA channel's hardware semaphore. + * + * As used by the MXS platform's DMA software, the DMA channel's hardware + * semaphore reflects the number of DMA commands the hardware will process, but + * has not yet finished. This is a volatile value read directly from hardware, + * so it must be be viewed as immediately stale. + * + * If the channel is not marked busy, or has finished processing all its + * commands, this value should be zero. + * + * See mxs_dma_append() for details on how DMA command blocks must be configured + * to maintain the expected behavior of the semaphore's value. + */ +static int mxs_dma_read_semaphore(int channel) +{ + struct mxs_apbh_regs *apbh_regs = + (struct mxs_apbh_regs *)MXS_APBH_BASE; + uint32_t tmp; + int ret; + + ret = mxs_dma_validate_chan(channel); + if (ret) + return ret; + + tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema); + + tmp &= APBH_CHn_SEMA_PHORE_MASK; + tmp >>= APBH_CHn_SEMA_PHORE_OFFSET; + + return tmp; +} + +#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) +void mxs_dma_flush_desc(struct mxs_dma_desc *desc) +{ + uint32_t addr; + uint32_t size; + + addr = (uintptr_t)desc; + size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT); + + flush_dcache_range(addr, addr + size); +} +#else +inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {} +#endif + +/* + * Enable a DMA channel. + * + * If the given channel has any DMA descriptors on its active list, this + * function causes the DMA hardware to begin processing them. + * + * This function marks the DMA channel as "busy," whether or not there are any + * descriptors to process. + */ +static int mxs_dma_enable(int channel) +{ + struct mxs_apbh_regs *apbh_regs = + (struct mxs_apbh_regs *)MXS_APBH_BASE; + unsigned int sem; + struct mxs_dma_chan *pchan; + struct mxs_dma_desc *pdesc; + int ret; + + ret = mxs_dma_validate_chan(channel); + if (ret) + return ret; + + pchan = mxs_dma_channels + channel; + + if (pchan->pending_num == 0) { + pchan->flags |= MXS_DMA_FLAGS_BUSY; + return 0; + } + + pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node); + if (pdesc == NULL) + return -EFAULT; + + if (pchan->flags & MXS_DMA_FLAGS_BUSY) { + if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN)) + return 0; + + sem = mxs_dma_read_semaphore(channel); + if (sem == 0) + return 0; + + if (sem == 1) { + pdesc = list_entry(pdesc->node.next, + struct mxs_dma_desc, node); + writel(mxs_dma_cmd_address(pdesc), + &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar); + } + writel(pchan->pending_num, + &apbh_regs->ch[channel].hw_apbh_ch_sema); + pchan->active_num += pchan->pending_num; + pchan->pending_num = 0; + } else { + pchan->active_num += pchan->pending_num; + pchan->pending_num = 0; + writel(mxs_dma_cmd_address(pdesc), + &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar); + writel(pchan->active_num, + &apbh_regs->ch[channel].hw_apbh_ch_sema); + writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET), + &apbh_regs->hw_apbh_ctrl0_clr); + } + + pchan->flags |= MXS_DMA_FLAGS_BUSY; + return 0; +} + +/* + * Disable a DMA channel. + * + * This function shuts down a DMA channel and marks it as "not busy." Any + * descriptors on the active list are immediately moved to the head of the + * "done" list, whether or not they have actually been processed by the + * hardware. The "ready" flags of these descriptors are NOT cleared, so they + * still appear to be active. + * + * This function immediately shuts down a DMA channel's hardware, aborting any + * I/O that may be in progress, potentially leaving I/O hardware in an undefined + * state. It is unwise to call this function if there is ANY chance the hardware + * is still processing a command. + */ +static int mxs_dma_disable(int channel) +{ + struct mxs_dma_chan *pchan; + struct mxs_apbh_regs *apbh_regs = + (struct mxs_apbh_regs *)MXS_APBH_BASE; + int ret; + + ret = mxs_dma_validate_chan(channel); + if (ret) + return ret; + + pchan = mxs_dma_channels + channel; + + if (!(pchan->flags & MXS_DMA_FLAGS_BUSY)) + return -EINVAL; + + writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET), + &apbh_regs->hw_apbh_ctrl0_set); + + pchan->flags &= ~MXS_DMA_FLAGS_BUSY; + pchan->active_num = 0; + pchan->pending_num = 0; + list_splice_init(&pchan->active, &pchan->done); + + return 0; +} + +/* + * Resets the DMA channel hardware. + */ +static int mxs_dma_reset(int channel) +{ + struct mxs_apbh_regs *apbh_regs = + (struct mxs_apbh_regs *)MXS_APBH_BASE; + int ret; +#if defined(CONFIG_MX23) + uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set); + uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET; +#elif defined(CONFIG_MX28) || defined(CONFIG_MX6) || defined(CONFIG_MX7) || \ + defined(CONFIG_IMX8) || defined(CONFIG_IMX8M) + u32 setreg = (uintptr_t)(&apbh_regs->hw_apbh_channel_ctrl_set); + u32 offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET; +#endif + + ret = mxs_dma_validate_chan(channel); + if (ret) + return ret; + + writel(1 << (channel + offset), (uintptr_t)setreg); + + return 0; +} + +/* + * Enable or disable DMA interrupt. + * + * This function enables the given DMA channel to interrupt the CPU. + */ +static int mxs_dma_enable_irq(int channel, int enable) +{ + struct mxs_apbh_regs *apbh_regs = + (struct mxs_apbh_regs *)MXS_APBH_BASE; + int ret; + + ret = mxs_dma_validate_chan(channel); + if (ret) + return ret; + + if (enable) + writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET), + &apbh_regs->hw_apbh_ctrl1_set); + else + writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET), + &apbh_regs->hw_apbh_ctrl1_clr); + + return 0; +} + +/* + * Clear DMA interrupt. + * + * The software that is using the DMA channel must register to receive its + * interrupts and, when they arrive, must call this function to clear them. + */ +static int mxs_dma_ack_irq(int channel) +{ + struct mxs_apbh_regs *apbh_regs = + (struct mxs_apbh_regs *)MXS_APBH_BASE; + int ret; + + ret = mxs_dma_validate_chan(channel); + if (ret) + return ret; + + writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr); + writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr); + + return 0; +} + +/* + * Request to reserve a DMA channel + */ +static int mxs_dma_request(int channel) +{ + struct mxs_dma_chan *pchan; + + if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS)) + return -EINVAL; + + pchan = mxs_dma_channels + channel; + if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID) + return -ENODEV; + + if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED) + return -EBUSY; + + pchan->flags |= MXS_DMA_FLAGS_ALLOCATED; + pchan->active_num = 0; + pchan->pending_num = 0; + + INIT_LIST_HEAD(&pchan->active); + INIT_LIST_HEAD(&pchan->done); + + return 0; +} + +/* + * Release a DMA channel. + * + * This function releases a DMA channel from its current owner. + * + * The channel will NOT be released if it's marked "busy" (see + * mxs_dma_enable()). + */ +int mxs_dma_release(int channel) +{ + struct mxs_dma_chan *pchan; + int ret; + + ret = mxs_dma_validate_chan(channel); + if (ret) + return ret; + + pchan = mxs_dma_channels + channel; + + if (pchan->flags & MXS_DMA_FLAGS_BUSY) + return -EBUSY; + + pchan->dev = 0; + pchan->active_num = 0; + pchan->pending_num = 0; + pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED; + + return 0; +} + +/* + * Allocate DMA descriptor + */ +struct mxs_dma_desc *mxs_dma_desc_alloc(void) +{ + struct mxs_dma_desc *pdesc; + uint32_t size; + + size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT); + pdesc = memalign(MXS_DMA_ALIGNMENT, size); + + if (pdesc == NULL) + return NULL; + + memset(pdesc, 0, sizeof(*pdesc)); + pdesc->address = (dma_addr_t)pdesc; + + return pdesc; +}; + +/* + * Free DMA descriptor + */ +void mxs_dma_desc_free(struct mxs_dma_desc *pdesc) +{ + if (pdesc == NULL) + return; + + free(pdesc); +} + +/* + * Add a DMA descriptor to a channel. + * + * If the descriptor list for this channel is not empty, this function sets the + * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so + * it will chain to the new descriptor's command. + * + * Then, this function marks the new descriptor as "ready," adds it to the end + * of the active descriptor list, and increments the count of pending + * descriptors. + * + * The MXS platform DMA software imposes some rules on DMA commands to maintain + * important invariants. These rules are NOT checked, but they must be carefully + * applied by software that uses MXS DMA channels. + * + * Invariant: + * The DMA channel's hardware semaphore must reflect the number of DMA + * commands the hardware will process, but has not yet finished. + * + * Explanation: + * A DMA channel begins processing commands when its hardware semaphore is + * written with a value greater than zero, and it stops processing commands + * when the semaphore returns to zero. + * + * When a channel finishes a DMA command, it will decrement its semaphore if + * the DECREMENT_SEMAPHORE bit is set in that command's flags bits. + * + * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set, + * unless it suits the purposes of the software. For example, one could + * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE + * bit set only in the last one. Then, setting the DMA channel's hardware + * semaphore to one would cause the entire series of five commands to be + * processed. However, this example would violate the invariant given above. + * + * Rule: + * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA + * channel's hardware semaphore will be decremented EVERY time a command is + * processed. + */ +int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc) +{ + struct mxs_dma_chan *pchan; + struct mxs_dma_desc *last; + int ret; + + ret = mxs_dma_validate_chan(channel); + if (ret) + return ret; + + pchan = mxs_dma_channels + channel; + + pdesc->cmd.next = mxs_dma_cmd_address(pdesc); + pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST; + + if (!list_empty(&pchan->active)) { + last = list_entry(pchan->active.prev, struct mxs_dma_desc, + node); + + pdesc->flags &= ~MXS_DMA_DESC_FIRST; + last->flags &= ~MXS_DMA_DESC_LAST; + + last->cmd.next = mxs_dma_cmd_address(pdesc); + last->cmd.data |= MXS_DMA_DESC_CHAIN; + + mxs_dma_flush_desc(last); + } + pdesc->flags |= MXS_DMA_DESC_READY; + if (pdesc->flags & MXS_DMA_DESC_FIRST) + pchan->pending_num++; + list_add_tail(&pdesc->node, &pchan->active); + + mxs_dma_flush_desc(pdesc); + + return ret; +} + +/* + * Clean up processed DMA descriptors. + * + * This function removes processed DMA descriptors from the "active" list. Pass + * in a non-NULL list head to get the descriptors moved to your list. Pass NULL + * to get the descriptors moved to the channel's "done" list. Descriptors on + * the "done" list can be retrieved with mxs_dma_get_finished(). + * + * This function marks the DMA channel as "not busy" if no unprocessed + * descriptors remain on the "active" list. + */ +static int mxs_dma_finish(int channel, struct list_head *head) +{ + int sem; + struct mxs_dma_chan *pchan; + struct list_head *p, *q; + struct mxs_dma_desc *pdesc; + int ret; + + ret = mxs_dma_validate_chan(channel); + if (ret) + return ret; + + pchan = mxs_dma_channels + channel; + + sem = mxs_dma_read_semaphore(channel); + if (sem < 0) + return sem; + + if (sem == pchan->active_num) + return 0; + + list_for_each_safe(p, q, &pchan->active) { + if ((pchan->active_num) <= sem) + break; + + pdesc = list_entry(p, struct mxs_dma_desc, node); + pdesc->flags &= ~MXS_DMA_DESC_READY; + + if (head) + list_move_tail(p, head); + else + list_move_tail(p, &pchan->done); + + if (pdesc->flags & MXS_DMA_DESC_LAST) + pchan->active_num--; + } + + if (sem == 0) + pchan->flags &= ~MXS_DMA_FLAGS_BUSY; + + return 0; +} + +/* + * Wait for DMA channel to complete + */ +static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan) +{ + struct mxs_apbh_regs *apbh_regs = + (struct mxs_apbh_regs *)MXS_APBH_BASE; + int ret; + + ret = mxs_dma_validate_chan(chan); + if (ret) + return ret; + + if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg, + 1 << chan, timeout)) { + ret = -ETIMEDOUT; + mxs_dma_reset(chan); + } + + return ret; +} + +/* + * Execute the DMA channel + */ +int mxs_dma_go(int chan) +{ + uint32_t timeout = 10000000; + int ret; + + LIST_HEAD(tmp_desc_list); + + mxs_dma_enable_irq(chan, 1); + mxs_dma_enable(chan); + + /* Wait for DMA to finish. */ + ret = mxs_dma_wait_complete(timeout, chan); + + /* Clear out the descriptors we just ran. */ + mxs_dma_finish(chan, &tmp_desc_list); + + /* Shut the DMA channel down. */ + mxs_dma_ack_irq(chan); + mxs_dma_reset(chan); + mxs_dma_enable_irq(chan, 0); + mxs_dma_disable(chan); + + return ret; +} + +/* + * Execute a continuously running circular DMA descriptor. + * NOTE: This is not intended for general use, but rather + * for the LCD driver in Smart-LCD mode. It allows + * continuous triggering of the RUN bit there. + */ +void mxs_dma_circ_start(int chan, struct mxs_dma_desc *pdesc) +{ + struct mxs_apbh_regs *apbh_regs = + (struct mxs_apbh_regs *)MXS_APBH_BASE; + + mxs_dma_flush_desc(pdesc); + + mxs_dma_enable_irq(chan, 1); + + writel(mxs_dma_cmd_address(pdesc), + &apbh_regs->ch[chan].hw_apbh_ch_nxtcmdar); + writel(1, &apbh_regs->ch[chan].hw_apbh_ch_sema); + writel(1 << (chan + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET), + &apbh_regs->hw_apbh_ctrl0_clr); +} + +/* + * Initialize the DMA hardware + */ +void mxs_dma_init(void) +{ + struct mxs_apbh_regs *apbh_regs = + (struct mxs_apbh_regs *)MXS_APBH_BASE; + + mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg); + +#ifdef CONFIG_APBH_DMA_BURST8 + writel(APBH_CTRL0_AHB_BURST8_EN, + &apbh_regs->hw_apbh_ctrl0_set); +#else + writel(APBH_CTRL0_AHB_BURST8_EN, + &apbh_regs->hw_apbh_ctrl0_clr); +#endif + +#ifdef CONFIG_APBH_DMA_BURST + writel(APBH_CTRL0_APB_BURST_EN, + &apbh_regs->hw_apbh_ctrl0_set); +#else + writel(APBH_CTRL0_APB_BURST_EN, + &apbh_regs->hw_apbh_ctrl0_clr); +#endif +} + +int mxs_dma_init_channel(int channel) +{ + struct mxs_dma_chan *pchan; + int ret; + + pchan = mxs_dma_channels + channel; + pchan->flags = MXS_DMA_FLAGS_VALID; + + ret = mxs_dma_request(channel); + + if (ret) { + printf("MXS DMA: Can't acquire DMA channel %i\n", + channel); + return ret; + } + + mxs_dma_reset(channel); + mxs_dma_ack_irq(channel); + + return 0; +} diff --git a/drivers/dma/bcm6348-iudma.c b/drivers/dma/bcm6348-iudma.c new file mode 100644 index 00000000000..fd3a353d548 --- /dev/null +++ b/drivers/dma/bcm6348-iudma.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com> + * + * Derived from linux/drivers/dma/bcm63xx-iudma.c: + * Copyright (C) 2015 Simon Arlott <simon@fire.lp0.eu> + * + * Derived from linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c: + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> + * + * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h: + * Copyright (C) 2000-2010 Broadcom Corporation + * + * Derived from bcm963xx_4.12L.06B_consumer/bcmdrivers/opensource/net/enet/impl4/bcmenet.c: + * Copyright (C) 2010 Broadcom Corporation + */ + +#include <clk.h> +#include <cpu_func.h> +#include <dm.h> +#include <dma-uclass.h> +#include <log.h> +#include <malloc.h> +#include <memalign.h> +#include <net.h> +#include <reset.h> +#include <asm/io.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/printk.h> + +#define DMA_RX_DESC 6 +#define DMA_TX_DESC 1 + +/* DMA Channels */ +#define DMA_CHAN_FLOWC(x) ((x) >> 1) +#define DMA_CHAN_MAX 16 +#define DMA_CHAN_SIZE 0x10 +#define DMA_CHAN_TOUT 500 + +/* DMA Global Configuration register */ +#define DMA_CFG_REG 0x00 +#define DMA_CFG_ENABLE_SHIFT 0 +#define DMA_CFG_ENABLE_MASK (1 << DMA_CFG_ENABLE_SHIFT) +#define DMA_CFG_FLOWC_ENABLE(x) BIT(DMA_CHAN_FLOWC(x) + 1) +#define DMA_CFG_NCHANS_SHIFT 24 +#define DMA_CFG_NCHANS_MASK (0xf << DMA_CFG_NCHANS_SHIFT) + +/* DMA Global Flow Control registers */ +#define DMA_FLOWC_THR_LO_REG(x) (0x04 + DMA_CHAN_FLOWC(x) * 0x0c) +#define DMA_FLOWC_THR_HI_REG(x) (0x08 + DMA_CHAN_FLOWC(x) * 0x0c) +#define DMA_FLOWC_ALLOC_REG(x) (0x0c + DMA_CHAN_FLOWC(x) * 0x0c) +#define DMA_FLOWC_ALLOC_FORCE_SHIFT 31 +#define DMA_FLOWC_ALLOC_FORCE_MASK (1 << DMA_FLOWC_ALLOC_FORCE_SHIFT) + +/* DMA Global Reset register */ +#define DMA_RST_REG 0x34 +#define DMA_RST_CHAN_SHIFT 0 +#define DMA_RST_CHAN_MASK(x) (1 << x) + +/* DMA Channel Configuration register */ +#define DMAC_CFG_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) +#define DMAC_CFG_ENABLE_SHIFT 0 +#define DMAC_CFG_ENABLE_MASK (1 << DMAC_CFG_ENABLE_SHIFT) +#define DMAC_CFG_PKT_HALT_SHIFT 1 +#define DMAC_CFG_PKT_HALT_MASK (1 << DMAC_CFG_PKT_HALT_SHIFT) +#define DMAC_CFG_BRST_HALT_SHIFT 2 +#define DMAC_CFG_BRST_HALT_MASK (1 << DMAC_CFG_BRST_HALT_SHIFT) + +/* DMA Channel Max Burst Length register */ +#define DMAC_BURST_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) + +/* DMA SRAM Descriptor Ring Start register */ +#define DMAS_RSTART_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) + +/* DMA SRAM State/Bytes done/ring offset register */ +#define DMAS_STATE_DATA_REG(x) (DMA_CHAN_SIZE * (x) + 0x04) + +/* DMA SRAM Buffer Descriptor status and length register */ +#define DMAS_DESC_LEN_STATUS_REG(x) (DMA_CHAN_SIZE * (x) + 0x08) + +/* DMA SRAM Buffer Descriptor status and length register */ +#define DMAS_DESC_BASE_BUFPTR_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) + +/* DMA Descriptor Status */ +#define DMAD_ST_CRC_SHIFT 8 +#define DMAD_ST_CRC_MASK (1 << DMAD_ST_CRC_SHIFT) +#define DMAD_ST_WRAP_SHIFT 12 +#define DMAD_ST_WRAP_MASK (1 << DMAD_ST_WRAP_SHIFT) +#define DMAD_ST_SOP_SHIFT 13 +#define DMAD_ST_SOP_MASK (1 << DMAD_ST_SOP_SHIFT) +#define DMAD_ST_EOP_SHIFT 14 +#define DMAD_ST_EOP_MASK (1 << DMAD_ST_EOP_SHIFT) +#define DMAD_ST_OWN_SHIFT 15 +#define DMAD_ST_OWN_MASK (1 << DMAD_ST_OWN_SHIFT) + +#define DMAD6348_ST_OV_ERR_SHIFT 0 +#define DMAD6348_ST_OV_ERR_MASK (1 << DMAD6348_ST_OV_ERR_SHIFT) +#define DMAD6348_ST_CRC_ERR_SHIFT 1 +#define DMAD6348_ST_CRC_ERR_MASK (1 << DMAD6348_ST_CRC_ERR_SHIFT) +#define DMAD6348_ST_RX_ERR_SHIFT 2 +#define DMAD6348_ST_RX_ERR_MASK (1 << DMAD6348_ST_RX_ERR_SHIFT) +#define DMAD6348_ST_OS_ERR_SHIFT 4 +#define DMAD6348_ST_OS_ERR_MASK (1 << DMAD6348_ST_OS_ERR_SHIFT) +#define DMAD6348_ST_UN_ERR_SHIFT 9 +#define DMAD6348_ST_UN_ERR_MASK (1 << DMAD6348_ST_UN_ERR_SHIFT) + +struct bcm6348_dma_desc { + uint16_t length; + uint16_t status; + uint32_t address; +}; + +struct bcm6348_chan_priv { + void __iomem *dma_ring; + uint8_t dma_ring_size; + uint8_t desc_id; + uint8_t desc_cnt; + bool *busy_desc; + bool running; +}; + +struct bcm6348_iudma_hw { + uint16_t err_mask; +}; + +struct bcm6348_iudma_priv { + const struct bcm6348_iudma_hw *hw; + void __iomem *base; + void __iomem *chan; + void __iomem *sram; + struct bcm6348_chan_priv **ch_priv; + uint8_t n_channels; +}; + +static inline bool bcm6348_iudma_chan_is_rx(uint8_t ch) +{ + return !(ch & 1); +} + +static inline void bcm6348_iudma_fdc(void *ptr, ulong size) +{ + ulong start = (ulong) ptr; + + flush_dcache_range(start, start + size); +} + +static inline void bcm6348_iudma_idc(void *ptr, ulong size) +{ + ulong start = (ulong) ptr; + + invalidate_dcache_range(start, start + size); +} + +static void bcm6348_iudma_chan_stop(struct bcm6348_iudma_priv *priv, + uint8_t ch) +{ + unsigned int timeout = DMA_CHAN_TOUT; + + do { + uint32_t cfg, halt; + + if (timeout > DMA_CHAN_TOUT / 2) + halt = DMAC_CFG_PKT_HALT_MASK; + else + halt = DMAC_CFG_BRST_HALT_MASK; + + /* try to stop dma channel */ + writel_be(halt, priv->chan + DMAC_CFG_REG(ch)); + mb(); + + /* check if channel was stopped */ + cfg = readl_be(priv->chan + DMAC_CFG_REG(ch)); + if (!(cfg & DMAC_CFG_ENABLE_MASK)) + break; + + udelay(1); + } while (--timeout); + + if (!timeout) + pr_err("unable to stop channel %u\n", ch); + + /* reset dma channel */ + setbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); + mb(); + clrbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); +} + +static int bcm6348_iudma_disable(struct dma *dma) +{ + struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; + + /* stop dma channel */ + bcm6348_iudma_chan_stop(priv, dma->id); + + /* dma flow control */ + if (bcm6348_iudma_chan_is_rx(dma->id)) + writel_be(DMA_FLOWC_ALLOC_FORCE_MASK, + DMA_FLOWC_ALLOC_REG(dma->id)); + + /* init channel config */ + ch_priv->running = false; + ch_priv->desc_id = 0; + if (bcm6348_iudma_chan_is_rx(dma->id)) + ch_priv->desc_cnt = 0; + else + ch_priv->desc_cnt = ch_priv->dma_ring_size; + + return 0; +} + +static int bcm6348_iudma_enable(struct dma *dma) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; + struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; + uint8_t i; + + /* dma ring init */ + for (i = 0; i < ch_priv->desc_cnt; i++) { + if (bcm6348_iudma_chan_is_rx(dma->id)) { + ch_priv->busy_desc[i] = false; + dma_desc->status |= DMAD_ST_OWN_MASK; + } else { + dma_desc->status = 0; + dma_desc->length = 0; + dma_desc->address = 0; + } + + if (i == ch_priv->desc_cnt - 1) + dma_desc->status |= DMAD_ST_WRAP_MASK; + + dma_desc++; + } + + /* init to first descriptor */ + ch_priv->desc_id = 0; + + /* force cache writeback */ + bcm6348_iudma_fdc(ch_priv->dma_ring, + sizeof(*dma_desc) * ch_priv->desc_cnt); + + /* clear sram */ + writel_be(0, priv->sram + DMAS_STATE_DATA_REG(dma->id)); + writel_be(0, priv->sram + DMAS_DESC_LEN_STATUS_REG(dma->id)); + writel_be(0, priv->sram + DMAS_DESC_BASE_BUFPTR_REG(dma->id)); + + /* set dma ring start */ + writel_be(virt_to_phys(ch_priv->dma_ring), + priv->sram + DMAS_RSTART_REG(dma->id)); + + /* set flow control */ + if (bcm6348_iudma_chan_is_rx(dma->id)) { + u32 val; + + setbits_be32(priv->base + DMA_CFG_REG, + DMA_CFG_FLOWC_ENABLE(dma->id)); + + val = ch_priv->desc_cnt / 3; + writel_be(val, priv->base + DMA_FLOWC_THR_LO_REG(dma->id)); + + val = (ch_priv->desc_cnt * 2) / 3; + writel_be(val, priv->base + DMA_FLOWC_THR_HI_REG(dma->id)); + + writel_be(0, priv->base + DMA_FLOWC_ALLOC_REG(dma->id)); + } + + /* set dma max burst */ + writel_be(ch_priv->desc_cnt, + priv->chan + DMAC_BURST_REG(dma->id)); + + /* kick rx dma channel */ + if (bcm6348_iudma_chan_is_rx(dma->id)) + setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), + DMAC_CFG_ENABLE_MASK); + + /* channel is now enabled */ + ch_priv->running = true; + + return 0; +} + +static int bcm6348_iudma_request(struct dma *dma) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + struct bcm6348_chan_priv *ch_priv; + + /* check if channel is valid */ + if (dma->id >= priv->n_channels) + return -ENODEV; + + /* alloc channel private data */ + priv->ch_priv[dma->id] = calloc(1, sizeof(struct bcm6348_chan_priv)); + if (!priv->ch_priv[dma->id]) + return -ENOMEM; + ch_priv = priv->ch_priv[dma->id]; + + /* alloc dma ring */ + if (bcm6348_iudma_chan_is_rx(dma->id)) + ch_priv->dma_ring_size = DMA_RX_DESC; + else + ch_priv->dma_ring_size = DMA_TX_DESC; + + ch_priv->dma_ring = + malloc_cache_aligned(sizeof(struct bcm6348_dma_desc) * + ch_priv->dma_ring_size); + if (!ch_priv->dma_ring) + return -ENOMEM; + + /* init channel config */ + ch_priv->running = false; + ch_priv->desc_id = 0; + if (bcm6348_iudma_chan_is_rx(dma->id)) { + ch_priv->desc_cnt = 0; + ch_priv->busy_desc = NULL; + } else { + ch_priv->desc_cnt = ch_priv->dma_ring_size; + ch_priv->busy_desc = calloc(ch_priv->desc_cnt, sizeof(bool)); + } + + return 0; +} + +static int bcm6348_iudma_receive(struct dma *dma, void **dst, void *metadata) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + const struct bcm6348_iudma_hw *hw = priv->hw; + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; + struct bcm6348_dma_desc *dma_desc = dma_desc = ch_priv->dma_ring; + int ret; + + if (!ch_priv->running) + return -EINVAL; + + /* get dma ring descriptor address */ + dma_desc += ch_priv->desc_id; + + /* invalidate cache data */ + bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc)); + + /* check dma own */ + if (dma_desc->status & DMAD_ST_OWN_MASK) + return -EAGAIN; + + /* check pkt */ + if (!(dma_desc->status & DMAD_ST_EOP_MASK) || + !(dma_desc->status & DMAD_ST_SOP_MASK) || + (dma_desc->status & hw->err_mask)) { + pr_err("invalid pkt received (ch=%ld desc=%u) (st=%04x)\n", + dma->id, ch_priv->desc_id, dma_desc->status); + ret = -EAGAIN; + } else { + /* set dma buffer address */ + *dst = phys_to_virt(dma_desc->address); + + /* invalidate cache data */ + bcm6348_iudma_idc(*dst, dma_desc->length); + + /* return packet length */ + ret = dma_desc->length; + } + + /* busy dma descriptor */ + ch_priv->busy_desc[ch_priv->desc_id] = true; + + /* increment dma descriptor */ + ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt; + + return ret; +} + +static int bcm6348_iudma_send(struct dma *dma, void *src, size_t len, + void *metadata) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; + struct bcm6348_dma_desc *dma_desc; + uint16_t status; + + if (!ch_priv->running) + return -EINVAL; + + /* flush cache */ + bcm6348_iudma_fdc(src, len); + + /* get dma ring descriptor address */ + dma_desc = ch_priv->dma_ring; + dma_desc += ch_priv->desc_id; + + /* config dma descriptor */ + status = (DMAD_ST_OWN_MASK | + DMAD_ST_EOP_MASK | + DMAD_ST_CRC_MASK | + DMAD_ST_SOP_MASK); + if (ch_priv->desc_id == ch_priv->desc_cnt - 1) + status |= DMAD_ST_WRAP_MASK; + + /* set dma descriptor */ + dma_desc->address = virt_to_phys(src); + dma_desc->length = len; + dma_desc->status = status; + + /* flush cache */ + bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc)); + + /* kick tx dma channel */ + setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK); + + /* poll dma status */ + do { + /* invalidate cache */ + bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc)); + + if (!(dma_desc->status & DMAD_ST_OWN_MASK)) + break; + } while(1); + + /* increment dma descriptor */ + ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt; + + return 0; +} + +static int bcm6348_iudma_free_rcv_buf(struct dma *dma, void *dst, size_t size) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; + struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; + uint16_t status; + uint8_t i; + u32 cfg; + + /* get dirty dma descriptor */ + for (i = 0; i < ch_priv->desc_cnt; i++) { + if (phys_to_virt(dma_desc->address) == dst) + break; + + dma_desc++; + } + + /* dma descriptor not found */ + if (i == ch_priv->desc_cnt) { + pr_err("dirty dma descriptor not found\n"); + return -ENOENT; + } + + /* invalidate cache */ + bcm6348_iudma_idc(ch_priv->dma_ring, + sizeof(*dma_desc) * ch_priv->desc_cnt); + + /* free dma descriptor */ + ch_priv->busy_desc[i] = false; + + status = DMAD_ST_OWN_MASK; + if (i == ch_priv->desc_cnt - 1) + status |= DMAD_ST_WRAP_MASK; + + dma_desc->status |= status; + dma_desc->length = PKTSIZE_ALIGN; + + /* tell dma we allocated one buffer */ + writel_be(1, DMA_FLOWC_ALLOC_REG(dma->id)); + + /* flush cache */ + bcm6348_iudma_fdc(ch_priv->dma_ring, + sizeof(*dma_desc) * ch_priv->desc_cnt); + + /* kick rx dma channel if disabled */ + cfg = readl_be(priv->chan + DMAC_CFG_REG(dma->id)); + if (!(cfg & DMAC_CFG_ENABLE_MASK)) + setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), + DMAC_CFG_ENABLE_MASK); + + return 0; +} + +static int bcm6348_iudma_add_rcv_buf(struct dma *dma, void *dst, size_t size) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; + struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; + + /* no more dma descriptors available */ + if (ch_priv->desc_cnt == ch_priv->dma_ring_size) { + pr_err("max number of buffers reached\n"); + return -EINVAL; + } + + /* get next dma descriptor */ + dma_desc += ch_priv->desc_cnt; + + /* init dma descriptor */ + dma_desc->address = virt_to_phys(dst); + dma_desc->length = size; + dma_desc->status = 0; + + /* flush cache */ + bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc)); + + /* increment dma descriptors */ + ch_priv->desc_cnt++; + + return 0; +} + +static int bcm6348_iudma_prepare_rcv_buf(struct dma *dma, void *dst, + size_t size) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; + + /* only add new rx buffers if channel isn't running */ + if (ch_priv->running) + return bcm6348_iudma_free_rcv_buf(dma, dst, size); + else + return bcm6348_iudma_add_rcv_buf(dma, dst, size); +} + +static const struct dma_ops bcm6348_iudma_ops = { + .disable = bcm6348_iudma_disable, + .enable = bcm6348_iudma_enable, + .prepare_rcv_buf = bcm6348_iudma_prepare_rcv_buf, + .request = bcm6348_iudma_request, + .receive = bcm6348_iudma_receive, + .send = bcm6348_iudma_send, +}; + +static const struct bcm6348_iudma_hw bcm6348_hw = { + .err_mask = (DMAD6348_ST_OV_ERR_MASK | + DMAD6348_ST_CRC_ERR_MASK | + DMAD6348_ST_RX_ERR_MASK | + DMAD6348_ST_OS_ERR_MASK | + DMAD6348_ST_UN_ERR_MASK), +}; + +static const struct bcm6348_iudma_hw bcm6368_hw = { + .err_mask = 0, +}; + +static const struct udevice_id bcm6348_iudma_ids[] = { + { + .compatible = "brcm,bcm6348-iudma", + .data = (ulong)&bcm6348_hw, + }, { + .compatible = "brcm,bcm6368-iudma", + .data = (ulong)&bcm6368_hw, + }, { /* sentinel */ } +}; + +static int bcm6348_iudma_probe(struct udevice *dev) +{ + struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); + struct bcm6348_iudma_priv *priv = dev_get_priv(dev); + const struct bcm6348_iudma_hw *hw = + (const struct bcm6348_iudma_hw *)dev_get_driver_data(dev); + uint8_t ch; + int i; + + uc_priv->supported = (DMA_SUPPORTS_DEV_TO_MEM | + DMA_SUPPORTS_MEM_TO_DEV); + priv->hw = hw; + + /* dma global base address */ + priv->base = dev_remap_addr_name(dev, "dma"); + if (!priv->base) + return -EINVAL; + + /* dma channels base address */ + priv->chan = dev_remap_addr_name(dev, "dma-channels"); + if (!priv->chan) + return -EINVAL; + + /* dma sram base address */ + priv->sram = dev_remap_addr_name(dev, "dma-sram"); + if (!priv->sram) + return -EINVAL; + + /* get number of channels */ + priv->n_channels = dev_read_u32_default(dev, "dma-channels", 8); + if (priv->n_channels > DMA_CHAN_MAX) + return -EINVAL; + + /* try to enable clocks */ + for (i = 0; ; i++) { + struct clk clk; + int ret; + + ret = clk_get_by_index(dev, i, &clk); + if (ret < 0) + break; + + ret = clk_enable(&clk); + if (ret < 0) { + pr_err("error enabling clock %d\n", i); + return ret; + } + } + + /* try to perform resets */ + for (i = 0; ; i++) { + struct reset_ctl reset; + int ret; + + ret = reset_get_by_index(dev, i, &reset); + if (ret < 0) + break; + + ret = reset_deassert(&reset); + if (ret < 0) { + pr_err("error deasserting reset %d\n", i); + return ret; + } + + ret = reset_free(&reset); + if (ret < 0) { + pr_err("error freeing reset %d\n", i); + return ret; + } + } + + /* disable dma controller */ + clrbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); + + /* alloc channel private data pointers */ + priv->ch_priv = calloc(priv->n_channels, + sizeof(struct bcm6348_chan_priv*)); + if (!priv->ch_priv) + return -ENOMEM; + + /* stop dma channels */ + for (ch = 0; ch < priv->n_channels; ch++) + bcm6348_iudma_chan_stop(priv, ch); + + /* enable dma controller */ + setbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); + + return 0; +} + +U_BOOT_DRIVER(bcm6348_iudma) = { + .name = "bcm6348_iudma", + .id = UCLASS_DMA, + .of_match = bcm6348_iudma_ids, + .ops = &bcm6348_iudma_ops, + .priv_auto = sizeof(struct bcm6348_iudma_priv), + .probe = bcm6348_iudma_probe, +}; diff --git a/drivers/dma/dma-uclass.c b/drivers/dma/dma-uclass.c new file mode 100644 index 00000000000..2c76ba3fe32 --- /dev/null +++ b/drivers/dma/dma-uclass.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Direct Memory Access U-Class driver + * + * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com> + * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com> + * Written by Mugunthan V N <mugunthanvnm@ti.com> + * + * Author: Mugunthan V N <mugunthanvnm@ti.com> + */ + +#define LOG_CATEGORY UCLASS_DMA + +#include <cpu_func.h> +#include <dm.h> +#include <log.h> +#include <malloc.h> +#include <asm/cache.h> +#include <dm/read.h> +#include <dma-uclass.h> +#include <linux/dma-mapping.h> +#include <dt-structs.h> +#include <errno.h> +#include <linux/printk.h> + +#ifdef CONFIG_DMA_CHANNELS +static inline struct dma_ops *dma_dev_ops(struct udevice *dev) +{ + return (struct dma_ops *)dev->driver->ops; +} + +# if CONFIG_IS_ENABLED(OF_CONTROL) +static int dma_of_xlate_default(struct dma *dma, + struct ofnode_phandle_args *args) +{ + debug("%s(dma=%p)\n", __func__, dma); + + if (args->args_count > 1) { + pr_err("Invalid args_count: %d\n", args->args_count); + return -EINVAL; + } + + if (args->args_count) + dma->id = args->args[0]; + else + dma->id = 0; + + return 0; +} + +int dma_get_by_index(struct udevice *dev, int index, struct dma *dma) +{ + int ret; + struct ofnode_phandle_args args; + struct udevice *dev_dma; + const struct dma_ops *ops; + + debug("%s(dev=%p, index=%d, dma=%p)\n", __func__, dev, index, dma); + + assert(dma); + dma->dev = NULL; + + ret = dev_read_phandle_with_args(dev, "dmas", "#dma-cells", 0, index, + &args); + if (ret) { + pr_err("%s: dev_read_phandle_with_args failed: err=%d\n", + __func__, ret); + return ret; + } + + ret = uclass_get_device_by_ofnode(UCLASS_DMA, args.node, &dev_dma); + if (ret) { + pr_err("%s: uclass_get_device_by_ofnode failed: err=%d\n", + __func__, ret); + return ret; + } + + dma->dev = dev_dma; + + ops = dma_dev_ops(dev_dma); + + if (ops->of_xlate) + ret = ops->of_xlate(dma, &args); + else + ret = dma_of_xlate_default(dma, &args); + if (ret) { + pr_err("of_xlate() failed: %d\n", ret); + return ret; + } + + return dma_request(dev_dma, dma); +} + +int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma) +{ + int index; + + debug("%s(dev=%p, name=%s, dma=%p)\n", __func__, dev, name, dma); + dma->dev = NULL; + + index = dev_read_stringlist_search(dev, "dma-names", name); + if (index < 0) { + pr_err("dev_read_stringlist_search() failed: %d\n", index); + return index; + } + + return dma_get_by_index(dev, index, dma); +} +# endif /* OF_CONTROL */ + +int dma_request(struct udevice *dev, struct dma *dma) +{ + struct dma_ops *ops = dma_dev_ops(dev); + + debug("%s(dev=%p, dma=%p)\n", __func__, dev, dma); + + dma->dev = dev; + + if (!ops->request) + return 0; + + return ops->request(dma); +} + +int dma_free(struct dma *dma) +{ + struct dma_ops *ops = dma_dev_ops(dma->dev); + + debug("%s(dma=%p)\n", __func__, dma); + + if (!ops->rfree) + return 0; + + return ops->rfree(dma); +} + +int dma_enable(struct dma *dma) +{ + struct dma_ops *ops = dma_dev_ops(dma->dev); + + debug("%s(dma=%p)\n", __func__, dma); + + if (!ops->enable) + return -ENOSYS; + + return ops->enable(dma); +} + +int dma_disable(struct dma *dma) +{ + struct dma_ops *ops = dma_dev_ops(dma->dev); + + debug("%s(dma=%p)\n", __func__, dma); + + if (!ops->disable) + return -ENOSYS; + + return ops->disable(dma); +} + +int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size) +{ + struct dma_ops *ops = dma_dev_ops(dma->dev); + + debug("%s(dma=%p)\n", __func__, dma); + + if (!ops->prepare_rcv_buf) + return -1; + + return ops->prepare_rcv_buf(dma, dst, size); +} + +int dma_receive(struct dma *dma, void **dst, void *metadata) +{ + struct dma_ops *ops = dma_dev_ops(dma->dev); + + debug("%s(dma=%p)\n", __func__, dma); + + if (!ops->receive) + return -ENOSYS; + + return ops->receive(dma, dst, metadata); +} + +int dma_send(struct dma *dma, void *src, size_t len, void *metadata) +{ + struct dma_ops *ops = dma_dev_ops(dma->dev); + + debug("%s(dma=%p)\n", __func__, dma); + + if (!ops->send) + return -ENOSYS; + + return ops->send(dma, src, len, metadata); +} + +int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data) +{ + struct dma_ops *ops = dma_dev_ops(dma->dev); + + debug("%s(dma=%p)\n", __func__, dma); + + if (!ops->get_cfg) + return -ENOSYS; + + return ops->get_cfg(dma, cfg_id, cfg_data); +} +#endif /* CONFIG_DMA_CHANNELS */ + +int dma_get_device(u32 transfer_type, struct udevice **devp) +{ + struct udevice *dev; + + for (uclass_first_device(UCLASS_DMA, &dev); dev; + uclass_next_device(&dev)) { + struct dma_dev_priv *uc_priv; + + uc_priv = dev_get_uclass_priv(dev); + if (uc_priv->supported & transfer_type) + break; + } + + if (!dev) { + pr_debug("No DMA device found that supports %x type\n", + transfer_type); + return -EPROTONOSUPPORT; + } + + *devp = dev; + + return 0; +} + +int dma_memcpy(void *dst, void *src, size_t len) +{ + struct udevice *dev; + const struct dma_ops *ops; + dma_addr_t destination; + dma_addr_t source; + int ret; + + ret = dma_get_device(DMA_SUPPORTS_MEM_TO_MEM, &dev); + if (ret < 0) + return ret; + + ops = device_get_ops(dev); + if (!ops->transfer) + return -ENOSYS; + + /* Clean the areas, so no writeback into the RAM races with DMA */ + destination = dma_map_single(dst, len, DMA_FROM_DEVICE); + source = dma_map_single(src, len, DMA_TO_DEVICE); + + ret = ops->transfer(dev, DMA_MEM_TO_MEM, destination, source, len); + + /* Clean+Invalidate the areas after, so we can see DMA'd data */ + dma_unmap_single(destination, len, DMA_FROM_DEVICE); + dma_unmap_single(source, len, DMA_TO_DEVICE); + + return ret; +} + +UCLASS_DRIVER(dma) = { + .id = UCLASS_DMA, + .name = "dma", + .flags = DM_UC_FLAG_SEQ_ALIAS, + .per_device_auto = sizeof(struct dma_dev_priv), +}; diff --git a/drivers/dma/fsl_dma.c b/drivers/dma/fsl_dma.c new file mode 100644 index 00000000000..e5d242f4f8f --- /dev/null +++ b/drivers/dma/fsl_dma.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2004,2007,2008 Freescale Semiconductor, Inc. + * (C) Copyright 2002, 2003 Motorola Inc. + * Xianghua Xiao (X.Xiao@motorola.com) + * + * (C) Copyright 2000 + * Wolfgang Denk, DENX Software Engineering, wd@denx.de. + */ + +#include <config.h> +#include <asm/io.h> +#include <asm/fsl_dma.h> + +/* Controller can only transfer 2^26 - 1 bytes at a time */ +#define FSL_DMA_MAX_SIZE (0x3ffffff) + +#if defined(CONFIG_MPC83xx) +#define FSL_DMA_MR_DEFAULT (FSL_DMA_MR_CTM_DIRECT | FSL_DMA_MR_DMSEN) +#else +#define FSL_DMA_MR_DEFAULT (FSL_DMA_MR_BWC_DIS | FSL_DMA_MR_CTM_DIRECT) +#endif + +#if defined(CONFIG_MPC83xx) +dma83xx_t *dma_base = (void *)(CFG_SYS_MPC83xx_DMA_ADDR); +#elif defined(CONFIG_MPC85xx) +ccsr_dma_t *dma_base = (void *)(CFG_SYS_MPC85xx_DMA_ADDR); +#elif defined(CONFIG_MPC86xx) +ccsr_dma_t *dma_base = (void *)(CONFIG_SYS_MPC86xx_DMA_ADDR); +#else +#error "Freescale DMA engine not supported on your processor" +#endif + +static void dma_sync(void) +{ +#if defined(CONFIG_MPC85xx) + asm("sync; isync; msync"); +#elif defined(CONFIG_MPC86xx) + asm("sync; isync"); +#endif +} + +static void out_dma32(volatile unsigned *addr, int val) +{ +#if defined(CONFIG_MPC83xx) + out_le32(addr, val); +#else + out_be32(addr, val); +#endif +} + +static uint in_dma32(volatile unsigned *addr) +{ +#if defined(CONFIG_MPC83xx) + return in_le32(addr); +#else + return in_be32(addr); +#endif +} + +static uint dma_check(void) { + volatile fsl_dma_t *dma = &dma_base->dma[0]; + uint status; + + /* While the channel is busy, spin */ + do { + status = in_dma32(&dma->sr); + } while (status & FSL_DMA_SR_CB); + + /* clear MR[CS] channel start bit */ + out_dma32(&dma->mr, in_dma32(&dma->mr) & ~FSL_DMA_MR_CS); + dma_sync(); + + if (status != 0) + printf ("DMA Error: status = %x\n", status); + + return status; +} + +#if !defined(CONFIG_MPC83xx) +void dma_init(void) { + volatile fsl_dma_t *dma = &dma_base->dma[0]; + + out_dma32(&dma->satr, FSL_DMA_SATR_SREAD_SNOOP); + out_dma32(&dma->datr, FSL_DMA_DATR_DWRITE_SNOOP); + out_dma32(&dma->sr, 0xffffffff); /* clear any errors */ + dma_sync(); +} +#endif + +int dmacpy(phys_addr_t dest, phys_addr_t src, phys_size_t count) { + volatile fsl_dma_t *dma = &dma_base->dma[0]; + uint xfer_size; + + while (count) { + xfer_size = min(FSL_DMA_MAX_SIZE, count); + + out_dma32(&dma->dar, (u32) (dest & 0xFFFFFFFF)); + out_dma32(&dma->sar, (u32) (src & 0xFFFFFFFF)); +#if !defined(CONFIG_MPC83xx) + out_dma32(&dma->satr, + in_dma32(&dma->satr) | (u32)((u64)src >> 32)); + out_dma32(&dma->datr, + in_dma32(&dma->datr) | (u32)((u64)dest >> 32)); +#endif + out_dma32(&dma->bcr, xfer_size); + dma_sync(); + + /* Prepare mode register */ + out_dma32(&dma->mr, FSL_DMA_MR_DEFAULT); + dma_sync(); + + /* Start the transfer */ + out_dma32(&dma->mr, FSL_DMA_MR_DEFAULT | FSL_DMA_MR_CS); + + count -= xfer_size; + src += xfer_size; + dest += xfer_size; + + dma_sync(); + + if (dma_check()) + return -1; + } + + return 0; +} + +/* + * 85xx/86xx use dma to initialize SDRAM when !CONFIG_ECC_INIT_VIA_DDRCONTROLLER + */ +#if ((!defined CONFIG_MPC83xx && defined(CONFIG_DDR_ECC) && \ + !defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER))) +void dma_meminit(uint size) +{ + uint *p = 0; + uint i = 0; + + for (*p = 0; p < (uint *)(8 * 1024); p++) { + if (((uint)p & 0x1f) == 0) + ppcDcbz((ulong)p); + + *p = (uint)0xDEADBEEF; + + if (((uint)p & 0x1c) == 0x1c) + ppcDcbf((ulong)p); + } + + dmacpy(0x002000, 0, 0x002000); /* 8K */ + dmacpy(0x004000, 0, 0x004000); /* 16K */ + dmacpy(0x008000, 0, 0x008000); /* 32K */ + dmacpy(0x010000, 0, 0x010000); /* 64K */ + dmacpy(0x020000, 0, 0x020000); /* 128K */ + dmacpy(0x040000, 0, 0x040000); /* 256K */ + dmacpy(0x080000, 0, 0x080000); /* 512K */ + dmacpy(0x100000, 0, 0x100000); /* 1M */ + dmacpy(0x200000, 0, 0x200000); /* 2M */ + dmacpy(0x400000, 0, 0x400000); /* 4M */ + + for (i = 1; i < size / 0x800000; i++) + dmacpy((0x800000 * i), 0, 0x800000); +} +#endif diff --git a/drivers/dma/keystone_nav.c b/drivers/dma/keystone_nav.c new file mode 100644 index 00000000000..c84db454bfd --- /dev/null +++ b/drivers/dma/keystone_nav.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Multicore Navigator driver for TI Keystone 2 devices. + * + * (C) Copyright 2012-2014 + * Texas Instruments Incorporated, <www.ti.com> + */ +#include <asm/io.h> +#include <asm/ti-common/keystone_nav.h> +#include <linux/delay.h> +#include <linux/string.h> + +struct qm_config qm_memmap = { + .stat_cfg = KS2_QM_QUEUE_STATUS_BASE, + .queue = (void *)KS2_QM_MANAGER_QUEUES_BASE, + .mngr_vbusm = KS2_QM_BASE_ADDRESS, + .i_lram = KS2_QM_LINK_RAM_BASE, + .proxy = (void *)KS2_QM_MANAGER_Q_PROXY_BASE, + .status_ram = KS2_QM_STATUS_RAM_BASE, + .mngr_cfg = (void *)KS2_QM_CONF_BASE, + .intd_cfg = KS2_QM_INTD_CONF_BASE, + .desc_mem = (void *)KS2_QM_DESC_SETUP_BASE, + .region_num = KS2_QM_REGION_NUM, + .pdsp_cmd = KS2_QM_PDSP1_CMD_BASE, + .pdsp_ctl = KS2_QM_PDSP1_CTRL_BASE, + .pdsp_iram = KS2_QM_PDSP1_IRAM_BASE, + .qpool_num = KS2_QM_QPOOL_NUM, +}; + +/* + * We are going to use only one type of descriptors - host packet + * descriptors. We staticaly allocate memory for them here + */ +struct qm_host_desc desc_pool[HDESC_NUM] __aligned(sizeof(struct qm_host_desc)); + +static struct qm_config *qm_cfg; + +inline int num_of_desc_to_reg(int num_descr) +{ + int j, num; + + for (j = 0, num = 32; j < 15; j++, num *= 2) { + if (num_descr <= num) + return j; + } + + return 15; +} + +int _qm_init(struct qm_config *cfg) +{ + u32 j; + + qm_cfg = cfg; + + qm_cfg->mngr_cfg->link_ram_base0 = qm_cfg->i_lram; + qm_cfg->mngr_cfg->link_ram_size0 = HDESC_NUM * 8 - 1; + qm_cfg->mngr_cfg->link_ram_base1 = 0; + qm_cfg->mngr_cfg->link_ram_size1 = 0; + qm_cfg->mngr_cfg->link_ram_base2 = 0; + + qm_cfg->desc_mem[0].base_addr = (u32)desc_pool; + qm_cfg->desc_mem[0].start_idx = 0; + qm_cfg->desc_mem[0].desc_reg_size = + (((sizeof(struct qm_host_desc) >> 4) - 1) << 16) | + num_of_desc_to_reg(HDESC_NUM); + + memset(desc_pool, 0, sizeof(desc_pool)); + for (j = 0; j < HDESC_NUM; j++) + qm_push(&desc_pool[j], qm_cfg->qpool_num); + + return QM_OK; +} + +int qm_init(void) +{ + return _qm_init(&qm_memmap); +} + +void qm_close(void) +{ + u32 j; + + queue_close(qm_cfg->qpool_num); + + qm_cfg->mngr_cfg->link_ram_base0 = 0; + qm_cfg->mngr_cfg->link_ram_size0 = 0; + qm_cfg->mngr_cfg->link_ram_base1 = 0; + qm_cfg->mngr_cfg->link_ram_size1 = 0; + qm_cfg->mngr_cfg->link_ram_base2 = 0; + + for (j = 0; j < qm_cfg->region_num; j++) { + qm_cfg->desc_mem[j].base_addr = 0; + qm_cfg->desc_mem[j].start_idx = 0; + qm_cfg->desc_mem[j].desc_reg_size = 0; + } + + qm_cfg = NULL; +} + +void qm_push(struct qm_host_desc *hd, u32 qnum) +{ + u32 regd; + + cpu_to_bus((u32 *)hd, sizeof(struct qm_host_desc)/4); + regd = (u32)hd | ((sizeof(struct qm_host_desc) >> 4) - 1); + writel(regd, &qm_cfg->queue[qnum].ptr_size_thresh); +} + +void qm_buff_push(struct qm_host_desc *hd, u32 qnum, + void *buff_ptr, u32 buff_len) +{ + hd->orig_buff_len = buff_len; + hd->buff_len = buff_len; + hd->orig_buff_ptr = (u32)buff_ptr; + hd->buff_ptr = (u32)buff_ptr; + qm_push(hd, qnum); +} + +struct qm_host_desc *qm_pop(u32 qnum) +{ + u32 uhd; + + uhd = readl(&qm_cfg->queue[qnum].ptr_size_thresh) & ~0xf; + if (uhd) + cpu_to_bus((u32 *)uhd, sizeof(struct qm_host_desc)/4); + + return (struct qm_host_desc *)uhd; +} + +struct qm_host_desc *qm_pop_from_free_pool(void) +{ + return qm_pop(qm_cfg->qpool_num); +} + +void queue_close(u32 qnum) +{ + struct qm_host_desc *hd; + + while ((hd = qm_pop(qnum))) + ; +} + +/** + * DMA API + */ + +static int ksnav_rx_disable(struct pktdma_cfg *pktdma) +{ + u32 j, v, k; + + for (j = 0; j < pktdma->rx_ch_num; j++) { + v = readl(&pktdma->rx_ch[j].cfg_a); + if (!(v & CPDMA_CHAN_A_ENABLE)) + continue; + + writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->rx_ch[j].cfg_a); + for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) { + udelay(100); + v = readl(&pktdma->rx_ch[j].cfg_a); + if (!(v & CPDMA_CHAN_A_ENABLE)) + continue; + } + /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */ + } + + /* Clear all of the flow registers */ + for (j = 0; j < pktdma->rx_flow_num; j++) { + writel(0, &pktdma->rx_flows[j].control); + writel(0, &pktdma->rx_flows[j].tags); + writel(0, &pktdma->rx_flows[j].tag_sel); + writel(0, &pktdma->rx_flows[j].fdq_sel[0]); + writel(0, &pktdma->rx_flows[j].fdq_sel[1]); + writel(0, &pktdma->rx_flows[j].thresh[0]); + writel(0, &pktdma->rx_flows[j].thresh[1]); + writel(0, &pktdma->rx_flows[j].thresh[2]); + } + + return QM_OK; +} + +static int ksnav_tx_disable(struct pktdma_cfg *pktdma) +{ + u32 j, v, k; + + for (j = 0; j < pktdma->tx_ch_num; j++) { + v = readl(&pktdma->tx_ch[j].cfg_a); + if (!(v & CPDMA_CHAN_A_ENABLE)) + continue; + + writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->tx_ch[j].cfg_a); + for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) { + udelay(100); + v = readl(&pktdma->tx_ch[j].cfg_a); + if (!(v & CPDMA_CHAN_A_ENABLE)) + continue; + } + /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */ + } + + return QM_OK; +} + +int ksnav_init(struct pktdma_cfg *pktdma, struct rx_buff_desc *rx_buffers) +{ + u32 j, v; + struct qm_host_desc *hd; + u8 *rx_ptr; + + if (pktdma == NULL || rx_buffers == NULL || + rx_buffers->buff_ptr == NULL || qm_cfg == NULL) + return QM_ERR; + + pktdma->rx_flow = rx_buffers->rx_flow; + + /* init rx queue */ + rx_ptr = rx_buffers->buff_ptr; + + for (j = 0; j < rx_buffers->num_buffs; j++) { + hd = qm_pop(qm_cfg->qpool_num); + if (hd == NULL) + return QM_ERR; + + qm_buff_push(hd, pktdma->rx_free_q, + rx_ptr, rx_buffers->buff_len); + + rx_ptr += rx_buffers->buff_len; + } + + ksnav_rx_disable(pktdma); + + /* configure rx channels */ + v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, pktdma->rx_rcv_q); + writel(v, &pktdma->rx_flows[pktdma->rx_flow].control); + writel(0, &pktdma->rx_flows[pktdma->rx_flow].tags); + writel(0, &pktdma->rx_flows[pktdma->rx_flow].tag_sel); + + v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, pktdma->rx_free_q, 0, + pktdma->rx_free_q); + + writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[0]); + writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[1]); + writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[0]); + writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[1]); + writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[2]); + + for (j = 0; j < pktdma->rx_ch_num; j++) + writel(CPDMA_CHAN_A_ENABLE, &pktdma->rx_ch[j].cfg_a); + + /* configure tx channels */ + /* Disable loopback in the tx direction */ + writel(0, &pktdma->global->emulation_control); + + /* Set QM base address, only for K2x devices */ + writel(KS2_QM_BASE_ADDRESS, &pktdma->global->qm_base_addr[0]); + + /* Enable all channels. The current state isn't important */ + for (j = 0; j < pktdma->tx_ch_num; j++) { + writel(0, &pktdma->tx_ch[j].cfg_b); + writel(CPDMA_CHAN_A_ENABLE, &pktdma->tx_ch[j].cfg_a); + } + + return QM_OK; +} + +int ksnav_close(struct pktdma_cfg *pktdma) +{ + if (!pktdma) + return QM_ERR; + + ksnav_tx_disable(pktdma); + ksnav_rx_disable(pktdma); + + queue_close(pktdma->rx_free_q); + queue_close(pktdma->rx_rcv_q); + queue_close(pktdma->tx_snd_q); + + return QM_OK; +} + +int ksnav_send(struct pktdma_cfg *pktdma, u32 *pkt, int num_bytes, u32 swinfo2) +{ + struct qm_host_desc *hd; + + hd = qm_pop(qm_cfg->qpool_num); + if (hd == NULL) + return QM_ERR; + + hd->desc_info = num_bytes; + hd->swinfo[2] = swinfo2; + hd->packet_info = qm_cfg->qpool_num; + + qm_buff_push(hd, pktdma->tx_snd_q, pkt, num_bytes); + + return QM_OK; +} + +void *ksnav_recv(struct pktdma_cfg *pktdma, u32 **pkt, int *num_bytes) +{ + struct qm_host_desc *hd; + + hd = qm_pop(pktdma->rx_rcv_q); + if (!hd) + return NULL; + + *pkt = (u32 *)hd->buff_ptr; + *num_bytes = hd->desc_info & 0x3fffff; + + return hd; +} + +void ksnav_release_rxhd(struct pktdma_cfg *pktdma, void *hd) +{ + struct qm_host_desc *_hd = (struct qm_host_desc *)hd; + + _hd->buff_len = _hd->orig_buff_len; + _hd->buff_ptr = _hd->orig_buff_ptr; + + qm_push(_hd, pktdma->rx_free_q); +} diff --git a/drivers/dma/keystone_nav_cfg.c b/drivers/dma/keystone_nav_cfg.c new file mode 100644 index 00000000000..301419b6fda --- /dev/null +++ b/drivers/dma/keystone_nav_cfg.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Multicore Navigator driver for TI Keystone 2 devices. + * + * (C) Copyright 2012-2014 + * Texas Instruments Incorporated, <www.ti.com> + */ + +#include <asm/ti-common/keystone_nav.h> + +/* NETCP Pktdma */ +struct pktdma_cfg netcp_pktdma = { + .global = (void *)KS2_NETCP_PDMA_CTRL_BASE, + .tx_ch = (void *)KS2_NETCP_PDMA_TX_BASE, + .tx_ch_num = KS2_NETCP_PDMA_TX_CH_NUM, + .rx_ch = (void *)KS2_NETCP_PDMA_RX_BASE, + .rx_ch_num = KS2_NETCP_PDMA_RX_CH_NUM, + .tx_sched = (u32 *)KS2_NETCP_PDMA_SCHED_BASE, + .rx_flows = (void *)KS2_NETCP_PDMA_RX_FLOW_BASE, + .rx_flow_num = KS2_NETCP_PDMA_RX_FLOW_NUM, + .rx_free_q = KS2_NETCP_PDMA_RX_FREE_QUEUE, + .rx_rcv_q = KS2_NETCP_PDMA_RX_RCV_QUEUE, + .tx_snd_q = KS2_NETCP_PDMA_TX_SND_QUEUE, +}; diff --git a/drivers/dma/sandbox-dma-test.c b/drivers/dma/sandbox-dma-test.c new file mode 100644 index 00000000000..0290b93340f --- /dev/null +++ b/drivers/dma/sandbox-dma-test.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Direct Memory Access U-Class Simulation driver + * + * Copyright (C) 2018 Texas Instruments Incorporated <www.ti.com> + * + * Author: Grygorii Strashko <grygorii.strashko@ti.com> + */ + +#include <dm.h> +#include <log.h> +#include <malloc.h> +#include <dm/read.h> +#include <dma-uclass.h> +#include <dt-structs.h> +#include <errno.h> +#include <linux/printk.h> + +#define SANDBOX_DMA_CH_CNT 3 +#define SANDBOX_DMA_BUF_SIZE 1024 + +struct sandbox_dma_chan { + struct sandbox_dma_dev *ud; + char name[20]; + u32 id; + enum dma_direction dir; + bool in_use; + bool enabled; +}; + +struct sandbox_dma_dev { + struct device *dev; + u32 ch_count; + struct sandbox_dma_chan channels[SANDBOX_DMA_CH_CNT]; + uchar buf[SANDBOX_DMA_BUF_SIZE]; + uchar *buf_rx; + size_t data_len; + u32 meta; +}; + +static int sandbox_dma_transfer(struct udevice *dev, int direction, + dma_addr_t dst, dma_addr_t src, size_t len) +{ + memcpy((void *)dst, (void *)src, len); + + return 0; +} + +static int sandbox_dma_of_xlate(struct dma *dma, + struct ofnode_phandle_args *args) +{ + struct sandbox_dma_dev *ud = dev_get_priv(dma->dev); + struct sandbox_dma_chan *uc; + + debug("%s(dma id=%u)\n", __func__, args->args[0]); + + if (args->args[0] >= SANDBOX_DMA_CH_CNT) + return -EINVAL; + + dma->id = args->args[0]; + + uc = &ud->channels[dma->id]; + + if (dma->id == 1) + uc->dir = DMA_MEM_TO_DEV; + else if (dma->id == 2) + uc->dir = DMA_DEV_TO_MEM; + else + uc->dir = DMA_MEM_TO_MEM; + debug("%s(dma id=%lu dir=%d)\n", __func__, dma->id, uc->dir); + + return 0; +} + +static int sandbox_dma_request(struct dma *dma) +{ + struct sandbox_dma_dev *ud = dev_get_priv(dma->dev); + struct sandbox_dma_chan *uc; + + if (dma->id >= SANDBOX_DMA_CH_CNT) + return -EINVAL; + + uc = &ud->channels[dma->id]; + if (uc->in_use) + return -EBUSY; + + uc->in_use = true; + debug("%s(dma id=%lu in_use=%d)\n", __func__, dma->id, uc->in_use); + + return 0; +} + +static int sandbox_dma_rfree(struct dma *dma) +{ + struct sandbox_dma_dev *ud = dev_get_priv(dma->dev); + struct sandbox_dma_chan *uc; + + if (dma->id >= SANDBOX_DMA_CH_CNT) + return -EINVAL; + + uc = &ud->channels[dma->id]; + if (!uc->in_use) + return -EINVAL; + + uc->in_use = false; + ud->buf_rx = NULL; + ud->data_len = 0; + debug("%s(dma id=%lu in_use=%d)\n", __func__, dma->id, uc->in_use); + + return 0; +} + +static int sandbox_dma_enable(struct dma *dma) +{ + struct sandbox_dma_dev *ud = dev_get_priv(dma->dev); + struct sandbox_dma_chan *uc; + + if (dma->id >= SANDBOX_DMA_CH_CNT) + return -EINVAL; + + uc = &ud->channels[dma->id]; + if (!uc->in_use) + return -EINVAL; + if (uc->enabled) + return -EINVAL; + + uc->enabled = true; + debug("%s(dma id=%lu enabled=%d)\n", __func__, dma->id, uc->enabled); + + return 0; +} + +static int sandbox_dma_disable(struct dma *dma) +{ + struct sandbox_dma_dev *ud = dev_get_priv(dma->dev); + struct sandbox_dma_chan *uc; + + if (dma->id >= SANDBOX_DMA_CH_CNT) + return -EINVAL; + + uc = &ud->channels[dma->id]; + if (!uc->in_use) + return -EINVAL; + if (!uc->enabled) + return -EINVAL; + + uc->enabled = false; + debug("%s(dma id=%lu enabled=%d)\n", __func__, dma->id, uc->enabled); + + return 0; +} + +static int sandbox_dma_send(struct dma *dma, + void *src, size_t len, void *metadata) +{ + struct sandbox_dma_dev *ud = dev_get_priv(dma->dev); + struct sandbox_dma_chan *uc; + + if (dma->id >= SANDBOX_DMA_CH_CNT) + return -EINVAL; + if (!src || !metadata) + return -EINVAL; + + debug("%s(dma id=%lu)\n", __func__, dma->id); + + uc = &ud->channels[dma->id]; + if (uc->dir != DMA_MEM_TO_DEV) + return -EINVAL; + if (!uc->in_use) + return -EINVAL; + if (!uc->enabled) + return -EINVAL; + if (len >= SANDBOX_DMA_BUF_SIZE) + return -EINVAL; + + memcpy(ud->buf, src, len); + ud->data_len = len; + ud->meta = *((u32 *)metadata); + + debug("%s(dma id=%lu len=%zu meta=%08x)\n", + __func__, dma->id, len, ud->meta); + + return 0; +} + +static int sandbox_dma_receive(struct dma *dma, void **dst, void *metadata) +{ + struct sandbox_dma_dev *ud = dev_get_priv(dma->dev); + struct sandbox_dma_chan *uc; + + if (dma->id >= SANDBOX_DMA_CH_CNT) + return -EINVAL; + if (!dst || !metadata) + return -EINVAL; + + uc = &ud->channels[dma->id]; + if (uc->dir != DMA_DEV_TO_MEM) + return -EINVAL; + if (!uc->in_use) + return -EINVAL; + if (!uc->enabled) + return -EINVAL; + if (!ud->data_len) + return 0; + + if (ud->buf_rx) { + memcpy(ud->buf_rx, ud->buf, ud->data_len); + *dst = ud->buf_rx; + } else { + memcpy(*dst, ud->buf, ud->data_len); + } + + *((u32 *)metadata) = ud->meta; + + debug("%s(dma id=%lu len=%zu meta=%08x %p)\n", + __func__, dma->id, ud->data_len, ud->meta, *dst); + + return ud->data_len; +} + +static int sandbox_dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size) +{ + struct sandbox_dma_dev *ud = dev_get_priv(dma->dev); + + ud->buf_rx = dst; + + return 0; +} + +static const struct dma_ops sandbox_dma_ops = { + .transfer = sandbox_dma_transfer, + .of_xlate = sandbox_dma_of_xlate, + .request = sandbox_dma_request, + .rfree = sandbox_dma_rfree, + .enable = sandbox_dma_enable, + .disable = sandbox_dma_disable, + .send = sandbox_dma_send, + .receive = sandbox_dma_receive, + .prepare_rcv_buf = sandbox_dma_prepare_rcv_buf, +}; + +static int sandbox_dma_probe(struct udevice *dev) +{ + struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); + struct sandbox_dma_dev *ud = dev_get_priv(dev); + int i, ret = 0; + + uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | + DMA_SUPPORTS_MEM_TO_DEV | + DMA_SUPPORTS_DEV_TO_MEM; + + ud->ch_count = SANDBOX_DMA_CH_CNT; + ud->buf_rx = NULL; + ud->meta = 0; + ud->data_len = 0; + + pr_err("Number of channels: %u\n", ud->ch_count); + + for (i = 0; i < ud->ch_count; i++) { + struct sandbox_dma_chan *uc = &ud->channels[i]; + + uc->ud = ud; + uc->id = i; + sprintf(uc->name, "DMA chan%d\n", i); + uc->in_use = false; + uc->enabled = false; + } + + return ret; +} + +static const struct udevice_id sandbox_dma_ids[] = { + { .compatible = "sandbox,dma" }, + { } +}; + +U_BOOT_DRIVER(sandbox_dma) = { + .name = "sandbox-dma", + .id = UCLASS_DMA, + .of_match = sandbox_dma_ids, + .ops = &sandbox_dma_ops, + .probe = sandbox_dma_probe, + .priv_auto = sizeof(struct sandbox_dma_dev), +}; diff --git a/drivers/dma/ti-edma3.c b/drivers/dma/ti-edma3.c new file mode 100644 index 00000000000..d64059f39ab --- /dev/null +++ b/drivers/dma/ti-edma3.c @@ -0,0 +1,600 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Enhanced Direct Memory Access (EDMA3) Controller + * + * (C) Copyright 2014 + * Texas Instruments Incorporated, <www.ti.com> + * + * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com> + */ + +#include <asm/cache.h> +#include <asm/io.h> +#include <dm.h> +#include <dma-uclass.h> +#include <linux/dma-mapping.h> +#include <asm/omap_common.h> +#include <asm/ti-common/ti-edma3.h> +#include <linux/printk.h> + +#define EDMA3_SL_BASE(slot) (0x4000 + ((slot) << 5)) +#define EDMA3_SL_MAX_NUM 512 +#define EDMA3_SLOPT_FIFO_WIDTH_MASK (0x7 << 8) + +#define EDMA3_QCHMAP(ch) 0x0200 + ((ch) << 2) +#define EDMA3_CHMAP_PARSET_MASK 0x1ff +#define EDMA3_CHMAP_PARSET_SHIFT 0x5 +#define EDMA3_CHMAP_TRIGWORD_SHIFT 0x2 + +#define EDMA3_QEMCR 0x314 +#define EDMA3_IPR 0x1068 +#define EDMA3_IPRH 0x106c +#define EDMA3_ICR 0x1070 +#define EDMA3_ICRH 0x1074 +#define EDMA3_QEECR 0x1088 +#define EDMA3_QEESR 0x108c +#define EDMA3_QSECR 0x1094 + +#define EDMA_FILL_BUFFER_SIZE 512 + +struct ti_edma3_priv { + u32 base; +}; + +static u8 edma_fill_buffer[EDMA_FILL_BUFFER_SIZE] __aligned(ARCH_DMA_MINALIGN); + +/** + * qedma3_start - start qdma on a channel + * @base: base address of edma + * @cfg: pinter to struct edma3_channel_config where you can set + * the slot number to associate with, the chnum, which corresponds + * your quick channel number 0-7, complete code - transfer complete code + * and trigger slot word - which has to correspond to the word number in + * edma3_slot_layout struct for generating event. + * + */ +void qedma3_start(u32 base, struct edma3_channel_config *cfg) +{ + u32 qchmap; + + /* Clear the pending int bit */ + if (cfg->complete_code < 32) + __raw_writel(1 << cfg->complete_code, base + EDMA3_ICR); + else + __raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH); + + /* Map parameter set and trigger word 7 to quick channel */ + qchmap = ((EDMA3_CHMAP_PARSET_MASK & cfg->slot) + << EDMA3_CHMAP_PARSET_SHIFT) | + (cfg->trigger_slot_word << EDMA3_CHMAP_TRIGWORD_SHIFT); + + __raw_writel(qchmap, base + EDMA3_QCHMAP(cfg->chnum)); + + /* Clear missed event if set*/ + __raw_writel(1 << cfg->chnum, base + EDMA3_QSECR); + __raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR); + + /* Enable qdma channel event */ + __raw_writel(1 << cfg->chnum, base + EDMA3_QEESR); +} + +/** + * edma3_set_dest - set initial DMA destination address in parameter RAM slot + * @base: base address of edma + * @slot: parameter RAM slot being configured + * @dst: physical address of destination (memory, controller FIFO, etc) + * @addressMode: INCR, except in very rare cases + * @width: ignored unless @addressMode is FIFO, else specifies the + * width to use when addressing the fifo (e.g. W8BIT, W32BIT) + * + * Note that the destination address is modified during the DMA transfer + * according to edma3_set_dest_index(). + */ +void edma3_set_dest(u32 base, int slot, u32 dst, enum edma3_address_mode mode, + enum edma3_fifo_width width) +{ + u32 opt; + struct edma3_slot_layout *rg; + + rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot)); + + opt = __raw_readl(&rg->opt); + if (mode == FIFO) + opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) | + (EDMA3_SLOPT_DST_ADDR_CONST_MODE | + EDMA3_SLOPT_FIFO_WIDTH_SET(width)); + else + opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE; + + __raw_writel(opt, &rg->opt); + __raw_writel(dst, &rg->dst); +} + +/** + * edma3_set_dest_index - configure DMA destination address indexing + * @base: base address of edma + * @slot: parameter RAM slot being configured + * @bidx: byte offset between destination arrays in a frame + * @cidx: byte offset between destination frames in a block + * + * Offsets are specified to support either contiguous or discontiguous + * memory transfers, or repeated access to a hardware register, as needed. + * When accessing hardware registers, both offsets are normally zero. + */ +void edma3_set_dest_index(u32 base, unsigned slot, int bidx, int cidx) +{ + u32 src_dst_bidx; + u32 src_dst_cidx; + struct edma3_slot_layout *rg; + + rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot)); + + src_dst_bidx = __raw_readl(&rg->src_dst_bidx); + src_dst_cidx = __raw_readl(&rg->src_dst_cidx); + + __raw_writel((src_dst_bidx & 0x0000ffff) | (bidx << 16), + &rg->src_dst_bidx); + __raw_writel((src_dst_cidx & 0x0000ffff) | (cidx << 16), + &rg->src_dst_cidx); +} + +/** + * edma3_set_dest_addr - set destination address for slot only + */ +void edma3_set_dest_addr(u32 base, int slot, u32 dst) +{ + struct edma3_slot_layout *rg; + + rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot)); + __raw_writel(dst, &rg->dst); +} + +/** + * edma3_set_src - set initial DMA source address in parameter RAM slot + * @base: base address of edma + * @slot: parameter RAM slot being configured + * @src_port: physical address of source (memory, controller FIFO, etc) + * @mode: INCR, except in very rare cases + * @width: ignored unless @addressMode is FIFO, else specifies the + * width to use when addressing the fifo (e.g. W8BIT, W32BIT) + * + * Note that the source address is modified during the DMA transfer + * according to edma3_set_src_index(). + */ +void edma3_set_src(u32 base, int slot, u32 src, enum edma3_address_mode mode, + enum edma3_fifo_width width) +{ + u32 opt; + struct edma3_slot_layout *rg; + + rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot)); + + opt = __raw_readl(&rg->opt); + if (mode == FIFO) + opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) | + (EDMA3_SLOPT_DST_ADDR_CONST_MODE | + EDMA3_SLOPT_FIFO_WIDTH_SET(width)); + else + opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE; + + __raw_writel(opt, &rg->opt); + __raw_writel(src, &rg->src); +} + +/** + * edma3_set_src_index - configure DMA source address indexing + * @base: base address of edma + * @slot: parameter RAM slot being configured + * @bidx: byte offset between source arrays in a frame + * @cidx: byte offset between source frames in a block + * + * Offsets are specified to support either contiguous or discontiguous + * memory transfers, or repeated access to a hardware register, as needed. + * When accessing hardware registers, both offsets are normally zero. + */ +void edma3_set_src_index(u32 base, unsigned slot, int bidx, int cidx) +{ + u32 src_dst_bidx; + u32 src_dst_cidx; + struct edma3_slot_layout *rg; + + rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot)); + + src_dst_bidx = __raw_readl(&rg->src_dst_bidx); + src_dst_cidx = __raw_readl(&rg->src_dst_cidx); + + __raw_writel((src_dst_bidx & 0xffff0000) | bidx, + &rg->src_dst_bidx); + __raw_writel((src_dst_cidx & 0xffff0000) | cidx, + &rg->src_dst_cidx); +} + +/** + * edma3_set_src_addr - set source address for slot only + */ +void edma3_set_src_addr(u32 base, int slot, u32 src) +{ + struct edma3_slot_layout *rg; + + rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot)); + __raw_writel(src, &rg->src); +} + +/** + * edma3_set_transfer_params - configure DMA transfer parameters + * @base: base address of edma + * @slot: parameter RAM slot being configured + * @acnt: how many bytes per array (at least one) + * @bcnt: how many arrays per frame (at least one) + * @ccnt: how many frames per block (at least one) + * @bcnt_rld: used only for A-Synchronized transfers; this specifies + * the value to reload into bcnt when it decrements to zero + * @sync_mode: ASYNC or ABSYNC + * + * See the EDMA3 documentation to understand how to configure and link + * transfers using the fields in PaRAM slots. If you are not doing it + * all at once with edma3_write_slot(), you will use this routine + * plus two calls each for source and destination, setting the initial + * address and saying how to index that address. + * + * An example of an A-Synchronized transfer is a serial link using a + * single word shift register. In that case, @acnt would be equal to + * that word size; the serial controller issues a DMA synchronization + * event to transfer each word, and memory access by the DMA transfer + * controller will be word-at-a-time. + * + * An example of an AB-Synchronized transfer is a device using a FIFO. + * In that case, @acnt equals the FIFO width and @bcnt equals its depth. + * The controller with the FIFO issues DMA synchronization events when + * the FIFO threshold is reached, and the DMA transfer controller will + * transfer one frame to (or from) the FIFO. It will probably use + * efficient burst modes to access memory. + */ +void edma3_set_transfer_params(u32 base, int slot, int acnt, + int bcnt, int ccnt, u16 bcnt_rld, + enum edma3_sync_dimension sync_mode) +{ + u32 opt; + u32 link_bcntrld; + struct edma3_slot_layout *rg; + + rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot)); + + link_bcntrld = __raw_readl(&rg->link_bcntrld); + + __raw_writel((bcnt_rld << 16) | (0x0000ffff & link_bcntrld), + &rg->link_bcntrld); + + opt = __raw_readl(&rg->opt); + if (sync_mode == ASYNC) + __raw_writel(opt & ~EDMA3_SLOPT_AB_SYNC, &rg->opt); + else + __raw_writel(opt | EDMA3_SLOPT_AB_SYNC, &rg->opt); + + /* Set the acount, bcount, ccount registers */ + __raw_writel((bcnt << 16) | (acnt & 0xffff), &rg->a_b_cnt); + __raw_writel(0xffff & ccnt, &rg->ccnt); +} + +/** + * edma3_write_slot - write parameter RAM data for slot + * @base: base address of edma + * @slot: number of parameter RAM slot being modified + * @param: data to be written into parameter RAM slot + * + * Use this to assign all parameters of a transfer at once. This + * allows more efficient setup of transfers than issuing multiple + * calls to set up those parameters in small pieces, and provides + * complete control over all transfer options. + */ +void edma3_write_slot(u32 base, int slot, struct edma3_slot_layout *param) +{ + int i; + u32 *p = (u32 *)param; + u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot)); + + for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4) + __raw_writel(*p++, addr++); +} + +/** + * edma3_read_slot - read parameter RAM data from slot + * @base: base address of edma + * @slot: number of parameter RAM slot being copied + * @param: where to store copy of parameter RAM data + * + * Use this to read data from a parameter RAM slot, perhaps to + * save them as a template for later reuse. + */ +void edma3_read_slot(u32 base, int slot, struct edma3_slot_layout *param) +{ + int i; + u32 *p = (u32 *)param; + u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot)); + + for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4) + *p++ = __raw_readl(addr++); +} + +void edma3_slot_configure(u32 base, int slot, struct edma3_slot_config *cfg) +{ + struct edma3_slot_layout *rg; + + rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot)); + + __raw_writel(cfg->opt, &rg->opt); + __raw_writel(cfg->src, &rg->src); + __raw_writel((cfg->bcnt << 16) | (cfg->acnt & 0xffff), &rg->a_b_cnt); + __raw_writel(cfg->dst, &rg->dst); + __raw_writel((cfg->dst_bidx << 16) | + (cfg->src_bidx & 0xffff), &rg->src_dst_bidx); + __raw_writel((cfg->bcntrld << 16) | + (cfg->link & 0xffff), &rg->link_bcntrld); + __raw_writel((cfg->dst_cidx << 16) | + (cfg->src_cidx & 0xffff), &rg->src_dst_cidx); + __raw_writel(0xffff & cfg->ccnt, &rg->ccnt); +} + +/** + * edma3_check_for_transfer - check if transfer coplete by checking + * interrupt pending bit. Clear interrupt pending bit if complete. + * @base: base address of edma + * @cfg: pinter to struct edma3_channel_config which was passed + * to qedma3_start when you started qdma channel + * + * Return 0 if complete, 1 if not. + */ +int edma3_check_for_transfer(u32 base, struct edma3_channel_config *cfg) +{ + u32 inum; + u32 ipr_base; + u32 icr_base; + + if (cfg->complete_code < 32) { + ipr_base = base + EDMA3_IPR; + icr_base = base + EDMA3_ICR; + inum = 1 << cfg->complete_code; + } else { + ipr_base = base + EDMA3_IPRH; + icr_base = base + EDMA3_ICRH; + inum = 1 << (cfg->complete_code - 32); + } + + /* check complete interrupt */ + if (!(__raw_readl(ipr_base) & inum)) + return 1; + + /* clean up the pending int bit */ + __raw_writel(inum, icr_base); + + return 0; +} + +/** + * qedma3_stop - stops dma on the channel passed + * @base: base address of edma + * @cfg: pinter to struct edma3_channel_config which was passed + * to qedma3_start when you started qdma channel + */ +void qedma3_stop(u32 base, struct edma3_channel_config *cfg) +{ + /* Disable qdma channel event */ + __raw_writel(1 << cfg->chnum, base + EDMA3_QEECR); + + /* clean up the interrupt indication */ + if (cfg->complete_code < 32) + __raw_writel(1 << cfg->complete_code, base + EDMA3_ICR); + else + __raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH); + + /* Clear missed event if set*/ + __raw_writel(1 << cfg->chnum, base + EDMA3_QSECR); + __raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR); + + /* Clear the channel map */ + __raw_writel(0, base + EDMA3_QCHMAP(cfg->chnum)); +} + +void __edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num, + dma_addr_t dst, dma_addr_t src, size_t len, size_t s_len) +{ + struct edma3_slot_config slot; + struct edma3_channel_config edma_channel; + int b_cnt_value = 1; + int rem_bytes = 0; + int a_cnt_value = len; + unsigned int addr = (unsigned int) (dst); + unsigned int max_acnt = 0x7FFFU; + + if (len > s_len) { + b_cnt_value = (len / s_len); + rem_bytes = (len % s_len); + a_cnt_value = s_len; + } else if (len > max_acnt) { + b_cnt_value = (len / max_acnt); + rem_bytes = (len % max_acnt); + a_cnt_value = max_acnt; + } + + slot.opt = 0; + slot.src = ((unsigned int) src); + slot.acnt = a_cnt_value; + slot.bcnt = b_cnt_value; + slot.ccnt = 1; + if (len == s_len) + slot.src_bidx = a_cnt_value; + else + slot.src_bidx = 0; + slot.dst_bidx = a_cnt_value; + slot.src_cidx = 0; + slot.dst_cidx = 0; + slot.link = EDMA3_PARSET_NULL_LINK; + slot.bcntrld = 0; + slot.opt = EDMA3_SLOPT_TRANS_COMP_INT_ENB | + EDMA3_SLOPT_COMP_CODE(0) | + EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC; + + edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot); + edma_channel.slot = edma_slot_num; + edma_channel.chnum = 0; + edma_channel.complete_code = 0; + /* set event trigger to dst update */ + edma_channel.trigger_slot_word = EDMA3_TWORD(dst); + + qedma3_start(edma3_base_addr, &edma_channel); + edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr); + + while (edma3_check_for_transfer(edma3_base_addr, &edma_channel)) + ; + qedma3_stop(edma3_base_addr, &edma_channel); + + if (rem_bytes != 0) { + slot.opt = 0; + if (len == s_len) + slot.src = + (b_cnt_value * max_acnt) + ((unsigned int) src); + else + slot.src = (unsigned int) src; + slot.acnt = rem_bytes; + slot.bcnt = 1; + slot.ccnt = 1; + slot.src_bidx = rem_bytes; + slot.dst_bidx = rem_bytes; + slot.src_cidx = 0; + slot.dst_cidx = 0; + slot.link = EDMA3_PARSET_NULL_LINK; + slot.bcntrld = 0; + slot.opt = EDMA3_SLOPT_TRANS_COMP_INT_ENB | + EDMA3_SLOPT_COMP_CODE(0) | + EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC; + edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot); + edma_channel.slot = edma_slot_num; + edma_channel.chnum = 0; + edma_channel.complete_code = 0; + /* set event trigger to dst update */ + edma_channel.trigger_slot_word = EDMA3_TWORD(dst); + + qedma3_start(edma3_base_addr, &edma_channel); + edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr + + (max_acnt * b_cnt_value)); + while (edma3_check_for_transfer(edma3_base_addr, &edma_channel)) + ; + qedma3_stop(edma3_base_addr, &edma_channel); + } +} + +void __edma3_fill(unsigned long edma3_base_addr, unsigned int edma_slot_num, + dma_addr_t dst, u8 val, size_t len) +{ + int xfer_len; + int max_xfer = EDMA_FILL_BUFFER_SIZE * 65535; + dma_addr_t source; + + memset((void *)edma_fill_buffer, val, sizeof(edma_fill_buffer)); + source = dma_map_single(edma_fill_buffer, len, DMA_TO_DEVICE); + + while (len) { + xfer_len = len; + if (xfer_len > max_xfer) + xfer_len = max_xfer; + + __edma3_transfer(edma3_base_addr, edma_slot_num, dst, + source, xfer_len, + EDMA_FILL_BUFFER_SIZE); + len -= xfer_len; + dst += xfer_len; + } + + dma_unmap_single(source, len, DMA_FROM_DEVICE); +} + +#ifndef CONFIG_DMA + +void edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num, + void *dst, void *src, size_t len) +{ + /* Clean the areas, so no writeback into the RAM races with DMA */ + dma_addr_t destination = dma_map_single(dst, len, DMA_FROM_DEVICE); + dma_addr_t source = dma_map_single(src, len, DMA_TO_DEVICE); + + __edma3_transfer(edma3_base_addr, edma_slot_num, destination, source, len, len); + + /* Clean+Invalidate the areas after, so we can see DMA'd data */ + dma_unmap_single(destination, len, DMA_FROM_DEVICE); + dma_unmap_single(source, len, DMA_TO_DEVICE); +} + +void edma3_fill(unsigned long edma3_base_addr, unsigned int edma_slot_num, + void *dst, u8 val, size_t len) +{ + /* Clean the area, so no writeback into the RAM races with DMA */ + dma_addr_t destination = dma_map_single(dst, len, DMA_FROM_DEVICE); + + __edma3_fill(edma3_base_addr, edma_slot_num, destination, val, len); + + /* Clean+Invalidate the area after, so we can see DMA'd data */ + dma_unmap_single(destination, len, DMA_FROM_DEVICE); +} + +#else + +static int ti_edma3_transfer(struct udevice *dev, int direction, + dma_addr_t dst, dma_addr_t src, size_t len) +{ + struct ti_edma3_priv *priv = dev_get_priv(dev); + + /* enable edma3 clocks */ + enable_edma3_clocks(); + + switch (direction) { + case DMA_MEM_TO_MEM: + __edma3_transfer(priv->base, 1, dst, src, len, len); + break; + default: + pr_err("Transfer type not implemented in DMA driver\n"); + break; + } + + /* disable edma3 clocks */ + disable_edma3_clocks(); + + return 0; +} + +static int ti_edma3_of_to_plat(struct udevice *dev) +{ + struct ti_edma3_priv *priv = dev_get_priv(dev); + + priv->base = dev_read_addr(dev); + + return 0; +} + +static int ti_edma3_probe(struct udevice *dev) +{ + struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); + + uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM; + + return 0; +} + +static const struct dma_ops ti_edma3_ops = { + .transfer = ti_edma3_transfer, +}; + +static const struct udevice_id ti_edma3_ids[] = { + { .compatible = "ti,edma3" }, + { } +}; + +U_BOOT_DRIVER(ti_edma3) = { + .name = "ti_edma3", + .id = UCLASS_DMA, + .of_match = ti_edma3_ids, + .ops = &ti_edma3_ops, + .of_to_plat = ti_edma3_of_to_plat, + .probe = ti_edma3_probe, + .priv_auto = sizeof(struct ti_edma3_priv), +}; +#endif /* CONFIG_DMA */ diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig new file mode 100644 index 00000000000..87c026e0490 --- /dev/null +++ b/drivers/dma/ti/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0+ + +if ARCH_K3 + +config TI_K3_NAVSS_UDMA + bool "Texas Instruments UDMA" + depends on ARCH_K3 + select DMA + select TI_K3_NAVSS_RINGACC + select TI_K3_NAVSS_PSILCFG + select TI_K3_PSIL + help + Support for UDMA used in K3 devices. +endif + +config TI_K3_PSIL + bool diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile new file mode 100644 index 00000000000..90c20a6a3fa --- /dev/null +++ b/drivers/dma/ti/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0+ + +obj-$(CONFIG_TI_K3_NAVSS_UDMA) += k3-udma.o +obj-$(CONFIG_TI_K3_PSIL) += k3-psil-data.o +k3-psil-data-y += k3-psil.o +k3-psil-data-$(CONFIG_SOC_K3_AM654) += k3-psil-am654.o +k3-psil-data-$(CONFIG_SOC_K3_J721E) += k3-psil-j721e.o +k3-psil-data-$(CONFIG_SOC_K3_J7200) += k3-psil-j721e.o +k3-psil-data-$(CONFIG_SOC_K3_J721S2) += k3-psil-j721s2.o +k3-psil-data-$(CONFIG_SOC_K3_AM642) += k3-psil-am64.o +k3-psil-data-$(CONFIG_SOC_K3_AM625) += k3-psil-am62.o +k3-psil-data-$(CONFIG_SOC_K3_AM62A7) += k3-psil-am62a.o +k3-psil-data-$(CONFIG_SOC_K3_J784S4) += k3-psil-j784s4.o +k3-psil-data-$(CONFIG_SOC_K3_AM62P5) += k3-psil-am62p.o +k3-psil-data-$(CONFIG_SOC_K3_J722S) += k3-psil-am62p.o diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c new file mode 100644 index 00000000000..9527da4cac5 --- /dev/null +++ b/drivers/dma/ti/k3-psil-am62.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + .mapped_channel_id = ch, \ + .flow_start = flow_base, \ + .flow_num = flow_cnt, \ + .default_flow_id = flow_base, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep am62_src_ep_map[] = { + /* CPSW3G */ + PSIL_ETHERNET(0x4600, 19, 19, 16), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep am62_dst_ep_map[] = { + /* CPSW3G */ + PSIL_ETHERNET(0xc600, 19, 19, 8), + PSIL_ETHERNET(0xc601, 20, 27, 8), + PSIL_ETHERNET(0xc602, 21, 35, 8), + PSIL_ETHERNET(0xc603, 22, 43, 8), + PSIL_ETHERNET(0xc604, 23, 51, 8), + PSIL_ETHERNET(0xc605, 24, 59, 8), + PSIL_ETHERNET(0xc606, 25, 67, 8), + PSIL_ETHERNET(0xc607, 26, 75, 8), +}; + +struct psil_ep_map am62_ep_map = { + .name = "am62", + .src = am62_src_ep_map, + .src_count = ARRAY_SIZE(am62_src_ep_map), + .dst = am62_dst_ep_map, + .dst_count = ARRAY_SIZE(am62_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-am62a.c b/drivers/dma/ti/k3-psil-am62a.c new file mode 100644 index 00000000000..ca9d71f9142 --- /dev/null +++ b/drivers/dma/ti/k3-psil-am62a.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .mapped_channel_id = -1, \ + .default_flow_id = -1, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .mapped_channel_id = -1, \ + .default_flow_id = -1, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + .mapped_channel_id = ch, \ + .flow_start = flow_base, \ + .flow_num = flow_cnt, \ + .default_flow_id = flow_base, \ + }, \ + } + +#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .mapped_channel_id = ch, \ + .flow_start = flow_base, \ + .flow_num = flow_cnt, \ + .default_flow_id = default_flow, \ + .notdpkt = tx, \ + }, \ + } + +#define PSIL_PDMA_MCASP(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pdma_acc32 = 1, \ + .pdma_burst = 1, \ + }, \ + } + +#define PSIL_CSI2RX(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep am62a_src_ep_map[] = { + /* SAUL */ + PSIL_SAUL(0x7504, 20, 35, 8, 35, 0), + PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), + PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), + PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), + /* PDMA_MAIN0 - SPI0-3 */ + PSIL_PDMA_XY_PKT(0x4302), + PSIL_PDMA_XY_PKT(0x4303), + PSIL_PDMA_XY_PKT(0x4304), + PSIL_PDMA_XY_PKT(0x4305), + PSIL_PDMA_XY_PKT(0x4306), + PSIL_PDMA_XY_PKT(0x4307), + PSIL_PDMA_XY_PKT(0x4308), + PSIL_PDMA_XY_PKT(0x4309), + PSIL_PDMA_XY_PKT(0x430a), + PSIL_PDMA_XY_PKT(0x430b), + PSIL_PDMA_XY_PKT(0x430c), + PSIL_PDMA_XY_PKT(0x430d), + /* PDMA_MAIN1 - UART0-6 */ + PSIL_PDMA_XY_PKT(0x4400), + PSIL_PDMA_XY_PKT(0x4401), + PSIL_PDMA_XY_PKT(0x4402), + PSIL_PDMA_XY_PKT(0x4403), + PSIL_PDMA_XY_PKT(0x4404), + PSIL_PDMA_XY_PKT(0x4405), + PSIL_PDMA_XY_PKT(0x4406), + /* PDMA_MAIN2 - MCASP0-2 */ + PSIL_PDMA_MCASP(0x4500), + PSIL_PDMA_MCASP(0x4501), + PSIL_PDMA_MCASP(0x4502), + /* CPSW3G */ + PSIL_ETHERNET(0x4600, 19, 19, 16), + /* CSI2RX */ + PSIL_CSI2RX(0x5000), + PSIL_CSI2RX(0x5001), + PSIL_CSI2RX(0x5002), + PSIL_CSI2RX(0x5003), + PSIL_CSI2RX(0x5004), + PSIL_CSI2RX(0x5005), + PSIL_CSI2RX(0x5006), + PSIL_CSI2RX(0x5007), + PSIL_CSI2RX(0x5008), + PSIL_CSI2RX(0x5009), + PSIL_CSI2RX(0x500a), + PSIL_CSI2RX(0x500b), + PSIL_CSI2RX(0x500c), + PSIL_CSI2RX(0x500d), + PSIL_CSI2RX(0x500e), + PSIL_CSI2RX(0x500f), + PSIL_CSI2RX(0x5010), + PSIL_CSI2RX(0x5011), + PSIL_CSI2RX(0x5012), + PSIL_CSI2RX(0x5013), + PSIL_CSI2RX(0x5014), + PSIL_CSI2RX(0x5015), + PSIL_CSI2RX(0x5016), + PSIL_CSI2RX(0x5017), + PSIL_CSI2RX(0x5018), + PSIL_CSI2RX(0x5019), + PSIL_CSI2RX(0x501a), + PSIL_CSI2RX(0x501b), + PSIL_CSI2RX(0x501c), + PSIL_CSI2RX(0x501d), + PSIL_CSI2RX(0x501e), + PSIL_CSI2RX(0x501f), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep am62a_dst_ep_map[] = { + /* SAUL */ + PSIL_SAUL(0xf500, 27, 83, 8, 83, 1), + PSIL_SAUL(0xf501, 28, 91, 8, 91, 1), + /* PDMA_MAIN0 - SPI0-3 */ + PSIL_PDMA_XY_PKT(0xc302), + PSIL_PDMA_XY_PKT(0xc303), + PSIL_PDMA_XY_PKT(0xc304), + PSIL_PDMA_XY_PKT(0xc305), + PSIL_PDMA_XY_PKT(0xc306), + PSIL_PDMA_XY_PKT(0xc307), + PSIL_PDMA_XY_PKT(0xc308), + PSIL_PDMA_XY_PKT(0xc309), + PSIL_PDMA_XY_PKT(0xc30a), + PSIL_PDMA_XY_PKT(0xc30b), + PSIL_PDMA_XY_PKT(0xc30c), + PSIL_PDMA_XY_PKT(0xc30d), + /* PDMA_MAIN1 - UART0-6 */ + PSIL_PDMA_XY_PKT(0xc400), + PSIL_PDMA_XY_PKT(0xc401), + PSIL_PDMA_XY_PKT(0xc402), + PSIL_PDMA_XY_PKT(0xc403), + PSIL_PDMA_XY_PKT(0xc404), + PSIL_PDMA_XY_PKT(0xc405), + PSIL_PDMA_XY_PKT(0xc406), + /* PDMA_MAIN2 - MCASP0-2 */ + PSIL_PDMA_MCASP(0xc500), + PSIL_PDMA_MCASP(0xc501), + PSIL_PDMA_MCASP(0xc502), + /* CPSW3G */ + PSIL_ETHERNET(0xc600, 19, 19, 8), + PSIL_ETHERNET(0xc601, 20, 27, 8), + PSIL_ETHERNET(0xc602, 21, 35, 8), + PSIL_ETHERNET(0xc603, 22, 43, 8), + PSIL_ETHERNET(0xc604, 23, 51, 8), + PSIL_ETHERNET(0xc605, 24, 59, 8), + PSIL_ETHERNET(0xc606, 25, 67, 8), + PSIL_ETHERNET(0xc607, 26, 75, 8), +}; + +struct psil_ep_map am62a_ep_map = { + .name = "am62a", + .src = am62a_src_ep_map, + .src_count = ARRAY_SIZE(am62a_src_ep_map), + .dst = am62a_dst_ep_map, + .dst_count = ARRAY_SIZE(am62a_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-am62p.c b/drivers/dma/ti/k3-psil-am62p.c new file mode 100644 index 00000000000..8739bf41b5b --- /dev/null +++ b/drivers/dma/ti/k3-psil-am62p.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .mapped_channel_id = -1, \ + .default_flow_id = -1, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .mapped_channel_id = -1, \ + .default_flow_id = -1, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + .mapped_channel_id = ch, \ + .flow_start = flow_base, \ + .flow_num = flow_cnt, \ + .default_flow_id = flow_base, \ + }, \ + } + +#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .mapped_channel_id = ch, \ + .flow_start = flow_base, \ + .flow_num = flow_cnt, \ + .default_flow_id = default_flow, \ + .notdpkt = tx, \ + }, \ + } + +#define PSIL_PDMA_MCASP(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pdma_acc32 = 1, \ + .pdma_burst = 1, \ + }, \ + } + +#define PSIL_CSI2RX(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep am62p_src_ep_map[] = { + /* SAUL */ + PSIL_SAUL(0x7504, 20, 35, 8, 35, 0), + PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), + PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), + PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), + /* PDMA_MAIN0 - SPI0-3 */ + PSIL_PDMA_XY_PKT(0x4302), + PSIL_PDMA_XY_PKT(0x4303), + PSIL_PDMA_XY_PKT(0x4304), + PSIL_PDMA_XY_PKT(0x4305), + PSIL_PDMA_XY_PKT(0x4306), + PSIL_PDMA_XY_PKT(0x4307), + PSIL_PDMA_XY_PKT(0x4308), + PSIL_PDMA_XY_PKT(0x4309), + PSIL_PDMA_XY_PKT(0x430a), + PSIL_PDMA_XY_PKT(0x430b), + PSIL_PDMA_XY_PKT(0x430c), + PSIL_PDMA_XY_PKT(0x430d), + /* PDMA_MAIN1 - UART0-6 */ + PSIL_PDMA_XY_PKT(0x4400), + PSIL_PDMA_XY_PKT(0x4401), + PSIL_PDMA_XY_PKT(0x4402), + PSIL_PDMA_XY_PKT(0x4403), + PSIL_PDMA_XY_PKT(0x4404), + PSIL_PDMA_XY_PKT(0x4405), + PSIL_PDMA_XY_PKT(0x4406), + /* PDMA_MAIN2 - MCASP0-2 */ + PSIL_PDMA_MCASP(0x4500), + PSIL_PDMA_MCASP(0x4501), + PSIL_PDMA_MCASP(0x4502), + /* CPSW3G */ + PSIL_ETHERNET(0x4600, 19, 19, 16), + /* CSI2RX */ + PSIL_CSI2RX(0x5000), + PSIL_CSI2RX(0x5001), + PSIL_CSI2RX(0x5002), + PSIL_CSI2RX(0x5003), + PSIL_CSI2RX(0x5004), + PSIL_CSI2RX(0x5005), + PSIL_CSI2RX(0x5006), + PSIL_CSI2RX(0x5007), + PSIL_CSI2RX(0x5008), + PSIL_CSI2RX(0x5009), + PSIL_CSI2RX(0x500a), + PSIL_CSI2RX(0x500b), + PSIL_CSI2RX(0x500c), + PSIL_CSI2RX(0x500d), + PSIL_CSI2RX(0x500e), + PSIL_CSI2RX(0x500f), + PSIL_CSI2RX(0x5010), + PSIL_CSI2RX(0x5011), + PSIL_CSI2RX(0x5012), + PSIL_CSI2RX(0x5013), + PSIL_CSI2RX(0x5014), + PSIL_CSI2RX(0x5015), + PSIL_CSI2RX(0x5016), + PSIL_CSI2RX(0x5017), + PSIL_CSI2RX(0x5018), + PSIL_CSI2RX(0x5019), + PSIL_CSI2RX(0x501a), + PSIL_CSI2RX(0x501b), + PSIL_CSI2RX(0x501c), + PSIL_CSI2RX(0x501d), + PSIL_CSI2RX(0x501e), + PSIL_CSI2RX(0x501f), + PSIL_CSI2RX(0x5000), + PSIL_CSI2RX(0x5001), + PSIL_CSI2RX(0x5002), + PSIL_CSI2RX(0x5003), + PSIL_CSI2RX(0x5004), + PSIL_CSI2RX(0x5005), + PSIL_CSI2RX(0x5006), + PSIL_CSI2RX(0x5007), + PSIL_CSI2RX(0x5008), + PSIL_CSI2RX(0x5009), + PSIL_CSI2RX(0x500a), + PSIL_CSI2RX(0x500b), + PSIL_CSI2RX(0x500c), + PSIL_CSI2RX(0x500d), + PSIL_CSI2RX(0x500e), + PSIL_CSI2RX(0x500f), + PSIL_CSI2RX(0x5010), + PSIL_CSI2RX(0x5011), + PSIL_CSI2RX(0x5012), + PSIL_CSI2RX(0x5013), + PSIL_CSI2RX(0x5014), + PSIL_CSI2RX(0x5015), + PSIL_CSI2RX(0x5016), + PSIL_CSI2RX(0x5017), + PSIL_CSI2RX(0x5018), + PSIL_CSI2RX(0x5019), + PSIL_CSI2RX(0x501a), + PSIL_CSI2RX(0x501b), + PSIL_CSI2RX(0x501c), + PSIL_CSI2RX(0x501d), + PSIL_CSI2RX(0x501e), + PSIL_CSI2RX(0x501f), + /* CSIRX 1-3 (only for J722S) */ + PSIL_CSI2RX(0x5100), + PSIL_CSI2RX(0x5101), + PSIL_CSI2RX(0x5102), + PSIL_CSI2RX(0x5103), + PSIL_CSI2RX(0x5104), + PSIL_CSI2RX(0x5105), + PSIL_CSI2RX(0x5106), + PSIL_CSI2RX(0x5107), + PSIL_CSI2RX(0x5108), + PSIL_CSI2RX(0x5109), + PSIL_CSI2RX(0x510a), + PSIL_CSI2RX(0x510b), + PSIL_CSI2RX(0x510c), + PSIL_CSI2RX(0x510d), + PSIL_CSI2RX(0x510e), + PSIL_CSI2RX(0x510f), + PSIL_CSI2RX(0x5110), + PSIL_CSI2RX(0x5111), + PSIL_CSI2RX(0x5112), + PSIL_CSI2RX(0x5113), + PSIL_CSI2RX(0x5114), + PSIL_CSI2RX(0x5115), + PSIL_CSI2RX(0x5116), + PSIL_CSI2RX(0x5117), + PSIL_CSI2RX(0x5118), + PSIL_CSI2RX(0x5119), + PSIL_CSI2RX(0x511a), + PSIL_CSI2RX(0x511b), + PSIL_CSI2RX(0x511c), + PSIL_CSI2RX(0x511d), + PSIL_CSI2RX(0x511e), + PSIL_CSI2RX(0x511f), + PSIL_CSI2RX(0x5200), + PSIL_CSI2RX(0x5201), + PSIL_CSI2RX(0x5202), + PSIL_CSI2RX(0x5203), + PSIL_CSI2RX(0x5204), + PSIL_CSI2RX(0x5205), + PSIL_CSI2RX(0x5206), + PSIL_CSI2RX(0x5207), + PSIL_CSI2RX(0x5208), + PSIL_CSI2RX(0x5209), + PSIL_CSI2RX(0x520a), + PSIL_CSI2RX(0x520b), + PSIL_CSI2RX(0x520c), + PSIL_CSI2RX(0x520d), + PSIL_CSI2RX(0x520e), + PSIL_CSI2RX(0x520f), + PSIL_CSI2RX(0x5210), + PSIL_CSI2RX(0x5211), + PSIL_CSI2RX(0x5212), + PSIL_CSI2RX(0x5213), + PSIL_CSI2RX(0x5214), + PSIL_CSI2RX(0x5215), + PSIL_CSI2RX(0x5216), + PSIL_CSI2RX(0x5217), + PSIL_CSI2RX(0x5218), + PSIL_CSI2RX(0x5219), + PSIL_CSI2RX(0x521a), + PSIL_CSI2RX(0x521b), + PSIL_CSI2RX(0x521c), + PSIL_CSI2RX(0x521d), + PSIL_CSI2RX(0x521e), + PSIL_CSI2RX(0x521f), + PSIL_CSI2RX(0x5300), + PSIL_CSI2RX(0x5301), + PSIL_CSI2RX(0x5302), + PSIL_CSI2RX(0x5303), + PSIL_CSI2RX(0x5304), + PSIL_CSI2RX(0x5305), + PSIL_CSI2RX(0x5306), + PSIL_CSI2RX(0x5307), + PSIL_CSI2RX(0x5308), + PSIL_CSI2RX(0x5309), + PSIL_CSI2RX(0x530a), + PSIL_CSI2RX(0x530b), + PSIL_CSI2RX(0x530c), + PSIL_CSI2RX(0x530d), + PSIL_CSI2RX(0x530e), + PSIL_CSI2RX(0x530f), + PSIL_CSI2RX(0x5310), + PSIL_CSI2RX(0x5311), + PSIL_CSI2RX(0x5312), + PSIL_CSI2RX(0x5313), + PSIL_CSI2RX(0x5314), + PSIL_CSI2RX(0x5315), + PSIL_CSI2RX(0x5316), + PSIL_CSI2RX(0x5317), + PSIL_CSI2RX(0x5318), + PSIL_CSI2RX(0x5319), + PSIL_CSI2RX(0x531a), + PSIL_CSI2RX(0x531b), + PSIL_CSI2RX(0x531c), + PSIL_CSI2RX(0x531d), + PSIL_CSI2RX(0x531e), + PSIL_CSI2RX(0x531f), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep am62p_dst_ep_map[] = { + /* SAUL */ + PSIL_SAUL(0xf500, 27, 83, 8, 83, 1), + PSIL_SAUL(0xf501, 28, 91, 8, 91, 1), + /* PDMA_MAIN0 - SPI0-3 */ + PSIL_PDMA_XY_PKT(0xc302), + PSIL_PDMA_XY_PKT(0xc303), + PSIL_PDMA_XY_PKT(0xc304), + PSIL_PDMA_XY_PKT(0xc305), + PSIL_PDMA_XY_PKT(0xc306), + PSIL_PDMA_XY_PKT(0xc307), + PSIL_PDMA_XY_PKT(0xc308), + PSIL_PDMA_XY_PKT(0xc309), + PSIL_PDMA_XY_PKT(0xc30a), + PSIL_PDMA_XY_PKT(0xc30b), + PSIL_PDMA_XY_PKT(0xc30c), + PSIL_PDMA_XY_PKT(0xc30d), + /* PDMA_MAIN1 - UART0-6 */ + PSIL_PDMA_XY_PKT(0xc400), + PSIL_PDMA_XY_PKT(0xc401), + PSIL_PDMA_XY_PKT(0xc402), + PSIL_PDMA_XY_PKT(0xc403), + PSIL_PDMA_XY_PKT(0xc404), + PSIL_PDMA_XY_PKT(0xc405), + PSIL_PDMA_XY_PKT(0xc406), + /* PDMA_MAIN2 - MCASP0-2 */ + PSIL_PDMA_MCASP(0xc500), + PSIL_PDMA_MCASP(0xc501), + PSIL_PDMA_MCASP(0xc502), + /* CPSW3G */ + PSIL_ETHERNET(0xc600, 19, 19, 8), + PSIL_ETHERNET(0xc601, 20, 27, 8), + PSIL_ETHERNET(0xc602, 21, 35, 8), + PSIL_ETHERNET(0xc603, 22, 43, 8), + PSIL_ETHERNET(0xc604, 23, 51, 8), + PSIL_ETHERNET(0xc605, 24, 59, 8), + PSIL_ETHERNET(0xc606, 25, 67, 8), + PSIL_ETHERNET(0xc607, 26, 75, 8), +}; + +struct psil_ep_map am62p_ep_map = { + .name = "am62p", + .src = am62p_src_ep_map, + .src_count = ARRAY_SIZE(am62p_src_ep_map), + .dst = am62p_dst_ep_map, + .dst_count = ARRAY_SIZE(am62p_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-am64.c b/drivers/dma/ti/k3-psil-am64.c new file mode 100644 index 00000000000..6180e2a1996 --- /dev/null +++ b/drivers/dma/ti/k3-psil-am64.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .mapped_channel_id = -1, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .mapped_channel_id = -1, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + .mapped_channel_id = ch, \ + .flow_start = flow_base, \ + .flow_num = flow_cnt, \ + .default_flow_id = flow_base, \ + }, \ + } + +#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .mapped_channel_id = ch, \ + .flow_start = flow_base, \ + .flow_num = flow_cnt, \ + .default_flow_id = default_flow, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep am64_src_ep_map[] = { + /* SAUL */ + PSIL_SAUL(0x4000, 17, 32, 8, 32, 0), + PSIL_SAUL(0x4001, 18, 32, 8, 33, 0), + PSIL_SAUL(0x4002, 19, 40, 8, 40, 0), + PSIL_SAUL(0x4003, 20, 40, 8, 41, 0), + /* ICSS_G0 */ + PSIL_ETHERNET(0x4100, 21, 48, 16), + PSIL_ETHERNET(0x4101, 22, 64, 16), + PSIL_ETHERNET(0x4102, 23, 80, 16), + PSIL_ETHERNET(0x4103, 24, 96, 16), + /* ICSS_G1 */ + PSIL_ETHERNET(0x4200, 25, 112, 16), + PSIL_ETHERNET(0x4201, 26, 128, 16), + PSIL_ETHERNET(0x4202, 27, 144, 16), + PSIL_ETHERNET(0x4203, 28, 160, 16), + /* PDMA_MAIN0 - SPI0-3 */ + PSIL_PDMA_XY_PKT(0x4300), + PSIL_PDMA_XY_PKT(0x4301), + PSIL_PDMA_XY_PKT(0x4302), + PSIL_PDMA_XY_PKT(0x4303), + PSIL_PDMA_XY_PKT(0x4304), + PSIL_PDMA_XY_PKT(0x4305), + PSIL_PDMA_XY_PKT(0x4306), + PSIL_PDMA_XY_PKT(0x4307), + PSIL_PDMA_XY_PKT(0x4308), + PSIL_PDMA_XY_PKT(0x4309), + PSIL_PDMA_XY_PKT(0x430a), + PSIL_PDMA_XY_PKT(0x430b), + PSIL_PDMA_XY_PKT(0x430c), + PSIL_PDMA_XY_PKT(0x430d), + PSIL_PDMA_XY_PKT(0x430e), + PSIL_PDMA_XY_PKT(0x430f), + /* PDMA_MAIN0 - USART0-1 */ + PSIL_PDMA_XY_PKT(0x4310), + PSIL_PDMA_XY_PKT(0x4311), + /* PDMA_MAIN1 - SPI4 */ + PSIL_PDMA_XY_PKT(0x4400), + PSIL_PDMA_XY_PKT(0x4401), + PSIL_PDMA_XY_PKT(0x4402), + PSIL_PDMA_XY_PKT(0x4403), + /* PDMA_MAIN1 - USART2-6 */ + PSIL_PDMA_XY_PKT(0x4404), + PSIL_PDMA_XY_PKT(0x4405), + PSIL_PDMA_XY_PKT(0x4406), + PSIL_PDMA_XY_PKT(0x4407), + PSIL_PDMA_XY_PKT(0x4408), + /* PDMA_MAIN1 - ADCs */ + PSIL_PDMA_XY_TR(0x440f), + PSIL_PDMA_XY_TR(0x4410), + /* CPSW2 */ + PSIL_ETHERNET(0x4500, 16, 16, 16), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep am64_dst_ep_map[] = { + /* SAUL */ + PSIL_SAUL(0xc000, 24, 80, 8, 80, 1), + PSIL_SAUL(0xc001, 25, 88, 8, 88, 1), + /* ICSS_G0 */ + PSIL_ETHERNET(0xc100, 26, 96, 1), + PSIL_ETHERNET(0xc101, 27, 97, 1), + PSIL_ETHERNET(0xc102, 28, 98, 1), + PSIL_ETHERNET(0xc103, 29, 99, 1), + PSIL_ETHERNET(0xc104, 30, 100, 1), + PSIL_ETHERNET(0xc105, 31, 101, 1), + PSIL_ETHERNET(0xc106, 32, 102, 1), + PSIL_ETHERNET(0xc107, 33, 103, 1), + /* ICSS_G1 */ + PSIL_ETHERNET(0xc200, 34, 104, 1), + PSIL_ETHERNET(0xc201, 35, 105, 1), + PSIL_ETHERNET(0xc202, 36, 106, 1), + PSIL_ETHERNET(0xc203, 37, 107, 1), + PSIL_ETHERNET(0xc204, 38, 108, 1), + PSIL_ETHERNET(0xc205, 39, 109, 1), + PSIL_ETHERNET(0xc206, 40, 110, 1), + PSIL_ETHERNET(0xc207, 41, 111, 1), + /* CPSW2 */ + PSIL_ETHERNET(0xc500, 16, 16, 8), + PSIL_ETHERNET(0xc501, 17, 24, 8), + PSIL_ETHERNET(0xc502, 18, 32, 8), + PSIL_ETHERNET(0xc503, 19, 40, 8), + PSIL_ETHERNET(0xc504, 20, 48, 8), + PSIL_ETHERNET(0xc505, 21, 56, 8), + PSIL_ETHERNET(0xc506, 22, 64, 8), + PSIL_ETHERNET(0xc507, 23, 72, 8), +}; + +struct psil_ep_map am64_ep_map = { + .name = "am64", + .src = am64_src_ep_map, + .src_count = ARRAY_SIZE(am64_src_ep_map), + .dst = am64_dst_ep_map, + .dst_count = ARRAY_SIZE(am64_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-am654.c b/drivers/dma/ti/k3-psil-am654.c new file mode 100644 index 00000000000..ce86600e556 --- /dev/null +++ b/drivers/dma/ti/k3-psil-am654.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep am654_src_ep_map[] = { + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0x4100), + PSIL_ETHERNET(0x4101), + PSIL_ETHERNET(0x4102), + PSIL_ETHERNET(0x4103), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0x4200), + PSIL_ETHERNET(0x4201), + PSIL_ETHERNET(0x4202), + PSIL_ETHERNET(0x4203), + /* PRU_ICSSG2 */ + PSIL_ETHERNET(0x4300), + PSIL_ETHERNET(0x4301), + PSIL_ETHERNET(0x4302), + PSIL_ETHERNET(0x4303), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep am654_dst_ep_map[] = { + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0xc100), + /* PSIL: 0xc101 - 0xc103 unused */ + PSIL_ETHERNET(0xc104), + /* PSIL: 0xc105 - 0xc107 unused */ + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0xc200), + /* PSIL: 0xc201 - 0xc203 unused */ + PSIL_ETHERNET(0xc204), + /* PSIL: 0xc205 - 0xc207 unused */ + /* PRU_ICSSG2 */ + PSIL_ETHERNET(0xc300), + /* PSIL: 0xc301 - 0xc303 unused */ + PSIL_ETHERNET(0xc304), + /* PSIL: 0xc305 - 0xc307 unused */ + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + /* PSIL: 0xf001 - 0xf007 unused */ +}; + +struct psil_ep_map am654_ep_map = { + .name = "am654", + .src = am654_src_ep_map, + .src_count = ARRAY_SIZE(am654_src_ep_map), + .dst = am654_dst_ep_map, + .dst_count = ARRAY_SIZE(am654_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c new file mode 100644 index 00000000000..8e57e860f25 --- /dev/null +++ b/drivers/dma/ti/k3-psil-j721e.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019-2023 Texas Instruments Incorporated - https://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep j721e_src_ep_map[] = { + /* MCU_CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MAIN_CPSW0 */ + PSIL_ETHERNET(0x4a00), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep j721e_dst_ep_map[] = { + /* MCU_CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), + /* MAIN_CPSW0 */ + PSIL_ETHERNET(0xca00), + PSIL_ETHERNET(0xca01), + PSIL_ETHERNET(0xca02), + PSIL_ETHERNET(0xca03), + PSIL_ETHERNET(0xca04), + PSIL_ETHERNET(0xca05), + PSIL_ETHERNET(0xca06), + PSIL_ETHERNET(0xca07), +}; + +struct psil_ep_map j721e_ep_map = { + .name = "j721e", + .src = j721e_src_ep_map, + .src_count = ARRAY_SIZE(j721e_src_ep_map), + .dst = j721e_dst_ep_map, + .dst_count = ARRAY_SIZE(j721e_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c new file mode 100644 index 00000000000..4c4172a4d27 --- /dev/null +++ b/drivers/dma/ti/k3-psil-j721s2.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_PDMA_MCASP(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pdma_acc32 = 1, \ + .pdma_burst = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +#define PSIL_SA2UL(x, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep j721s2_src_ep_map[] = { + /* PDMA_MCASP - McASP0-4 */ + PSIL_PDMA_MCASP(0x4400), + PSIL_PDMA_MCASP(0x4401), + PSIL_PDMA_MCASP(0x4402), + PSIL_PDMA_MCASP(0x4403), + PSIL_PDMA_MCASP(0x4404), + /* PDMA_SPI_G0 - SPI0-3 */ + PSIL_PDMA_XY_PKT(0x4600), + PSIL_PDMA_XY_PKT(0x4601), + PSIL_PDMA_XY_PKT(0x4602), + PSIL_PDMA_XY_PKT(0x4603), + PSIL_PDMA_XY_PKT(0x4604), + PSIL_PDMA_XY_PKT(0x4605), + PSIL_PDMA_XY_PKT(0x4606), + PSIL_PDMA_XY_PKT(0x4607), + PSIL_PDMA_XY_PKT(0x4608), + PSIL_PDMA_XY_PKT(0x4609), + PSIL_PDMA_XY_PKT(0x460a), + PSIL_PDMA_XY_PKT(0x460b), + PSIL_PDMA_XY_PKT(0x460c), + PSIL_PDMA_XY_PKT(0x460d), + PSIL_PDMA_XY_PKT(0x460e), + PSIL_PDMA_XY_PKT(0x460f), + /* PDMA_SPI_G1 - SPI4-7 */ + PSIL_PDMA_XY_PKT(0x4610), + PSIL_PDMA_XY_PKT(0x4611), + PSIL_PDMA_XY_PKT(0x4612), + PSIL_PDMA_XY_PKT(0x4613), + PSIL_PDMA_XY_PKT(0x4614), + PSIL_PDMA_XY_PKT(0x4615), + PSIL_PDMA_XY_PKT(0x4616), + PSIL_PDMA_XY_PKT(0x4617), + PSIL_PDMA_XY_PKT(0x4618), + PSIL_PDMA_XY_PKT(0x4619), + PSIL_PDMA_XY_PKT(0x461a), + PSIL_PDMA_XY_PKT(0x461b), + PSIL_PDMA_XY_PKT(0x461c), + PSIL_PDMA_XY_PKT(0x461d), + PSIL_PDMA_XY_PKT(0x461e), + PSIL_PDMA_XY_PKT(0x461f), + /* PDMA_USART_G0 - UART0-1 */ + PSIL_PDMA_XY_PKT(0x4700), + PSIL_PDMA_XY_PKT(0x4701), + /* PDMA_USART_G1 - UART2-3 */ + PSIL_PDMA_XY_PKT(0x4702), + PSIL_PDMA_XY_PKT(0x4703), + /* PDMA_USART_G2 - UART4-9 */ + PSIL_PDMA_XY_PKT(0x4704), + PSIL_PDMA_XY_PKT(0x4705), + PSIL_PDMA_XY_PKT(0x4706), + PSIL_PDMA_XY_PKT(0x4707), + PSIL_PDMA_XY_PKT(0x4708), + PSIL_PDMA_XY_PKT(0x4709), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */ + PSIL_PDMA_XY_PKT(0x7100), + PSIL_PDMA_XY_PKT(0x7101), + PSIL_PDMA_XY_PKT(0x7102), + PSIL_PDMA_XY_PKT(0x7103), + /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */ + PSIL_PDMA_XY_PKT(0x7200), + PSIL_PDMA_XY_PKT(0x7201), + PSIL_PDMA_XY_PKT(0x7202), + PSIL_PDMA_XY_PKT(0x7203), + PSIL_PDMA_XY_PKT(0x7204), + PSIL_PDMA_XY_PKT(0x7205), + PSIL_PDMA_XY_PKT(0x7206), + PSIL_PDMA_XY_PKT(0x7207), + /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */ + PSIL_PDMA_XY_PKT(0x7300), + /* MCU_PDMA_ADC - ADC0-1 */ + PSIL_PDMA_XY_TR(0x7400), + PSIL_PDMA_XY_TR(0x7401), + PSIL_PDMA_XY_TR(0x7402), + PSIL_PDMA_XY_TR(0x7403), + /* SA2UL */ + PSIL_SA2UL(0x7500, 0), + PSIL_SA2UL(0x7501, 0), + PSIL_SA2UL(0x7502, 0), + PSIL_SA2UL(0x7503, 0), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep j721s2_dst_ep_map[] = { + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), + /* SA2UL */ + PSIL_SA2UL(0xf500, 1), + PSIL_SA2UL(0xf501, 1), +}; + +struct psil_ep_map j721s2_ep_map = { + .name = "j721s2", + .src = j721s2_src_ep_map, + .src_count = ARRAY_SIZE(j721s2_src_ep_map), + .dst = j721s2_dst_ep_map, + .dst_count = ARRAY_SIZE(j721s2_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-j784s4.c b/drivers/dma/ti/k3-psil-j784s4.c new file mode 100644 index 00000000000..7f06a1f307c --- /dev/null +++ b/drivers/dma/ti/k3-psil-j784s4.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com + */ +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_PDMA_MCASP(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pdma_acc32 = 1, \ + .pdma_burst = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +#define PSIL_SA2UL(x, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep j784s4_src_ep_map[] = { + /* PDMA_MCASP - McASP0-4 */ + PSIL_PDMA_MCASP(0x4400), + PSIL_PDMA_MCASP(0x4401), + PSIL_PDMA_MCASP(0x4402), + PSIL_PDMA_MCASP(0x4403), + PSIL_PDMA_MCASP(0x4404), + /* PDMA_SPI_G0 - SPI0-3 */ + PSIL_PDMA_XY_PKT(0x4600), + PSIL_PDMA_XY_PKT(0x4601), + PSIL_PDMA_XY_PKT(0x4602), + PSIL_PDMA_XY_PKT(0x4603), + PSIL_PDMA_XY_PKT(0x4604), + PSIL_PDMA_XY_PKT(0x4605), + PSIL_PDMA_XY_PKT(0x4606), + PSIL_PDMA_XY_PKT(0x4607), + PSIL_PDMA_XY_PKT(0x4608), + PSIL_PDMA_XY_PKT(0x4609), + PSIL_PDMA_XY_PKT(0x460a), + PSIL_PDMA_XY_PKT(0x460b), + PSIL_PDMA_XY_PKT(0x460c), + PSIL_PDMA_XY_PKT(0x460d), + PSIL_PDMA_XY_PKT(0x460e), + PSIL_PDMA_XY_PKT(0x460f), + /* PDMA_SPI_G1 - SPI4-7 */ + PSIL_PDMA_XY_PKT(0x4610), + PSIL_PDMA_XY_PKT(0x4611), + PSIL_PDMA_XY_PKT(0x4612), + PSIL_PDMA_XY_PKT(0x4613), + PSIL_PDMA_XY_PKT(0x4614), + PSIL_PDMA_XY_PKT(0x4615), + PSIL_PDMA_XY_PKT(0x4616), + PSIL_PDMA_XY_PKT(0x4617), + PSIL_PDMA_XY_PKT(0x4618), + PSIL_PDMA_XY_PKT(0x4619), + PSIL_PDMA_XY_PKT(0x461a), + PSIL_PDMA_XY_PKT(0x461b), + PSIL_PDMA_XY_PKT(0x461c), + PSIL_PDMA_XY_PKT(0x461d), + PSIL_PDMA_XY_PKT(0x461e), + PSIL_PDMA_XY_PKT(0x461f), + /* PDMA_USART_G0 - UART0-1 */ + PSIL_PDMA_XY_PKT(0x4700), + PSIL_PDMA_XY_PKT(0x4701), + /* PDMA_USART_G1 - UART2-3 */ + PSIL_PDMA_XY_PKT(0x4702), + PSIL_PDMA_XY_PKT(0x4703), + /* PDMA_USART_G2 - UART4-9 */ + PSIL_PDMA_XY_PKT(0x4704), + PSIL_PDMA_XY_PKT(0x4705), + PSIL_PDMA_XY_PKT(0x4706), + PSIL_PDMA_XY_PKT(0x4707), + PSIL_PDMA_XY_PKT(0x4708), + PSIL_PDMA_XY_PKT(0x4709), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */ + PSIL_PDMA_XY_PKT(0x7100), + PSIL_PDMA_XY_PKT(0x7101), + PSIL_PDMA_XY_PKT(0x7102), + PSIL_PDMA_XY_PKT(0x7103), + /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */ + PSIL_PDMA_XY_PKT(0x7200), + PSIL_PDMA_XY_PKT(0x7201), + PSIL_PDMA_XY_PKT(0x7202), + PSIL_PDMA_XY_PKT(0x7203), + PSIL_PDMA_XY_PKT(0x7204), + PSIL_PDMA_XY_PKT(0x7205), + PSIL_PDMA_XY_PKT(0x7206), + PSIL_PDMA_XY_PKT(0x7207), + /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */ + PSIL_PDMA_XY_PKT(0x7300), + /* MCU_PDMA_ADC - ADC0-1 */ + PSIL_PDMA_XY_TR(0x7400), + PSIL_PDMA_XY_TR(0x7401), + PSIL_PDMA_XY_TR(0x7402), + PSIL_PDMA_XY_TR(0x7403), + /* SA2UL */ + PSIL_SA2UL(0x7500, 0), + PSIL_SA2UL(0x7501, 0), + PSIL_SA2UL(0x7502, 0), + PSIL_SA2UL(0x7503, 0), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep j784s4_dst_ep_map[] = { + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), + /* SA2UL */ + PSIL_SA2UL(0xf500, 1), + PSIL_SA2UL(0xf501, 1), +}; + +struct psil_ep_map j784s4_ep_map = { + .name = "j784s4", + .src = j784s4_src_ep_map, + .src_count = ARRAY_SIZE(j784s4_src_ep_map), + .dst = j784s4_dst_ep_map, + .dst_count = ARRAY_SIZE(j784s4_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h new file mode 100644 index 00000000000..b80916a7ff8 --- /dev/null +++ b/drivers/dma/ti/k3-psil-priv.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com + */ + +#ifndef K3_PSIL_PRIV_H_ +#define K3_PSIL_PRIV_H_ + +#include "k3-psil.h" + +struct psil_ep { + u32 thread_id; + struct psil_endpoint_config ep_config; +}; + +/** + * struct psil_ep_map - PSI-L thread ID configuration maps + * @name: Name of the map, set it to the name of the SoC + * @src: Array of source PSI-L thread configurations + * @src_count: Number of entries in the src array + * @dst: Array of destination PSI-L thread configurations + * @dst_count: Number of entries in the dst array + * + * In case of symmetric configuration for a matching src/dst thread (for example + * 0x4400 and 0xc400) only the src configuration can be present. If no dst + * configuration found the code will look for (dst_thread_id & ~0x8000) to find + * the symmetric match. + */ +struct psil_ep_map { + char *name; + struct psil_ep *src; + int src_count; + struct psil_ep *dst; + int dst_count; +}; + +struct psil_endpoint_config *psil_get_ep_config(u32 thread_id); + +/* SoC PSI-L endpoint maps */ +extern struct psil_ep_map am654_ep_map; +extern struct psil_ep_map j721e_ep_map; +extern struct psil_ep_map j721s2_ep_map; +extern struct psil_ep_map am64_ep_map; +extern struct psil_ep_map am62_ep_map; +extern struct psil_ep_map am62a_ep_map; +extern struct psil_ep_map j784s4_ep_map; +extern struct psil_ep_map am62p_ep_map; + +#endif /* K3_PSIL_PRIV_H_ */ diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c new file mode 100644 index 00000000000..39798844a8a --- /dev/null +++ b/drivers/dma/ti/k3-psil.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> +#include <linux/err.h> + +#include "k3-psil-priv.h" + +static const struct psil_ep_map *soc_ep_map; + +struct psil_endpoint_config *psil_get_ep_config(u32 thread_id) +{ + int i; + + if (!soc_ep_map) { + if (IS_ENABLED(CONFIG_SOC_K3_AM654)) + soc_ep_map = &am654_ep_map; + else if (IS_ENABLED(CONFIG_SOC_K3_J721E)) + soc_ep_map = &j721e_ep_map; + else if (IS_ENABLED(CONFIG_SOC_K3_J7200)) + soc_ep_map = &j721e_ep_map; + else if (IS_ENABLED(CONFIG_SOC_K3_J721S2)) + soc_ep_map = &j721s2_ep_map; + else if (IS_ENABLED(CONFIG_SOC_K3_AM642)) + soc_ep_map = &am64_ep_map; + else if (IS_ENABLED(CONFIG_SOC_K3_AM625)) + soc_ep_map = &am62_ep_map; + else if (IS_ENABLED(CONFIG_SOC_K3_AM62A7)) + soc_ep_map = &am62a_ep_map; + else if (IS_ENABLED(CONFIG_SOC_K3_J784S4)) + soc_ep_map = &j784s4_ep_map; + else if (IS_ENABLED(CONFIG_SOC_K3_AM62P5)) + soc_ep_map = &am62p_ep_map; + else if (IS_ENABLED(CONFIG_SOC_K3_J722S)) + soc_ep_map = &am62p_ep_map; + } + + if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) { + /* check in destination thread map */ + for (i = 0; i < soc_ep_map->dst_count; i++) { + if (soc_ep_map->dst[i].thread_id == thread_id) + return &soc_ep_map->dst[i].ep_config; + } + } + + thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET; + if (soc_ep_map->src) { + for (i = 0; i < soc_ep_map->src_count; i++) { + if (soc_ep_map->src[i].thread_id == thread_id) + return &soc_ep_map->src[i].ep_config; + } + } + + return ERR_PTR(-ENOENT); +} diff --git a/drivers/dma/ti/k3-psil.h b/drivers/dma/ti/k3-psil.h new file mode 100644 index 00000000000..af60a9924e2 --- /dev/null +++ b/drivers/dma/ti/k3-psil.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com + */ + +#ifndef K3_PSIL_H_ +#define K3_PSIL_H_ + +#include <linux/types.h> + +#define K3_PSIL_DST_THREAD_ID_OFFSET 0x8000 + +struct device; + +/** + * enum udma_tp_level - Channel Throughput Levels + * @UDMA_TP_NORMAL: Normal channel + * @UDMA_TP_HIGH: High Throughput channel + * @UDMA_TP_ULTRAHIGH: Ultra High Throughput channel + */ +enum udma_tp_level { + UDMA_TP_NORMAL = 0, + UDMA_TP_HIGH, + UDMA_TP_ULTRAHIGH, + UDMA_TP_LAST, +}; + +/** + * enum psil_endpoint_type - PSI-L Endpoint type + * @PSIL_EP_NATIVE: Normal channel + * @PSIL_EP_PDMA_XY: XY mode PDMA + * @PSIL_EP_PDMA_MCAN: MCAN mode PDMA + * @PSIL_EP_PDMA_AASRC: AASRC mode PDMA + */ +enum psil_endpoint_type { + PSIL_EP_NATIVE = 0, + PSIL_EP_PDMA_XY, + PSIL_EP_PDMA_MCAN, + PSIL_EP_PDMA_AASRC, +}; + +/** + * struct psil_endpoint_config - PSI-L Endpoint configuration + * @ep_type: PSI-L endpoint type + * @pkt_mode: If set, the channel must be in Packet mode, otherwise in + * TR mode + * @notdpkt: TDCM must be suppressed on the TX channel + * @needs_epib: Endpoint needs EPIB + * @psd_size: If set, PSdata is used by the endpoint + * @channel_tpl: Desired throughput level for the channel + * @pdma_acc32: ACC32 must be enabled on the PDMA side + * @pdma_burst: BURST must be enabled on the PDMA side + * @mapped_channel_id: PKTDMA thread to channel mapping for mapped + * channels. The thread must be serviced by the specified + * channel if mapped_channel_id is >= 0 in case of PKTDMA + * @flow_start: PKTDMA flow range start of mapped channel. Unmapped + * channels use flow_id == chan_id + * @flow_num: PKTDMA flow count of mapped channel. Unmapped + * channels use flow_id == chan_id + * @default_flow_id: PKTDMA default (r)flow index of mapped channel. + * Must be within the flow range of the mapped channel. + */ +struct psil_endpoint_config { + enum psil_endpoint_type ep_type; + + unsigned pkt_mode:1; + unsigned notdpkt:1; + unsigned needs_epib:1; + u32 psd_size; + enum udma_tp_level channel_tpl; + + /* PDMA properties, valid for PSIL_EP_PDMA_* */ + unsigned pdma_acc32:1; + unsigned pdma_burst:1; + + /* PKTDMA mapped channel */ + int mapped_channel_id; + /* PKTDMA tflow and rflow ranges for mapped channel */ + u16 flow_start; + u16 flow_num; + u16 default_flow_id; +}; +#endif /* K3_PSIL_H_ */ diff --git a/drivers/dma/ti/k3-udma-hwdef.h b/drivers/dma/ti/k3-udma-hwdef.h new file mode 100644 index 00000000000..3d6b4d10fff --- /dev/null +++ b/drivers/dma/ti/k3-udma-hwdef.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef K3_NAVSS_UDMA_HWDEF_H_ +#define K3_NAVSS_UDMA_HWDEF_H_ + +#include <linux/bitops.h> +#define UDMA_PSIL_DST_THREAD_ID_OFFSET 0x8000 + +/* Global registers */ +#define UDMA_REV_REG 0x0 +#define UDMA_PERF_CTL_REG 0x4 +#define UDMA_EMU_CTL_REG 0x8 +#define UDMA_PSIL_TO_REG 0x10 +#define UDMA_UTC_CTL_REG 0x1c +#define UDMA_CAP_REG(i) (0x20 + (i * 4)) +#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80 +#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88 + +/* RX Flow regs */ +#define UDMA_RFLOW_RFA_REG 0x0 +#define UDMA_RFLOW_RFB_REG 0x4 +#define UDMA_RFLOW_RFC_REG 0x8 +#define UDMA_RFLOW_RFD_REG 0xc +#define UDMA_RFLOW_RFE_REG 0x10 +#define UDMA_RFLOW_RFF_REG 0x14 +#define UDMA_RFLOW_RFG_REG 0x18 +#define UDMA_RFLOW_RFH_REG 0x1c + +#define UDMA_RFLOW_REG(x) (UDMA_RFLOW_RF##x##_REG) + +/* TX chan regs */ +#define UDMA_TCHAN_TCFG_REG 0x0 +#define UDMA_TCHAN_TCREDIT_REG 0x4 +#define UDMA_TCHAN_TCQ_REG 0x14 +#define UDMA_TCHAN_TOES_REG(i) (0x20 + (i) * 4) +#define UDMA_TCHAN_TEOES_REG 0x60 +#define UDMA_TCHAN_TPRI_CTRL_REG 0x64 +#define UDMA_TCHAN_THREAD_ID_REG 0x68 +#define UDMA_TCHAN_TFIFO_DEPTH_REG 0x70 +#define UDMA_TCHAN_TST_SCHED_REG 0x80 + +/* RX chan regs */ +#define UDMA_RCHAN_RCFG_REG 0x0 +#define UDMA_RCHAN_RCQ_REG 0x14 +#define UDMA_RCHAN_ROES_REG(i) (0x20 + (i) * 4) +#define UDMA_RCHAN_REOES_REG 0x60 +#define UDMA_RCHAN_RPRI_CTRL_REG 0x64 +#define UDMA_RCHAN_THREAD_ID_REG 0x68 +#define UDMA_RCHAN_RST_SCHED_REG 0x80 +#define UDMA_RCHAN_RFLOW_RNG_REG 0xf0 + +/* TX chan RT regs */ +#define UDMA_TCHAN_RT_CTL_REG 0x0 +#define UDMA_TCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_TCHAN_RT_STDATA_REG 0x80 + +#define UDMA_TCHAN_RT_PEERn_REG(i) (0x200 + (i * 0x4)) +#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_TCHAN_RT_PEERn_REG(0) /* PSI-L: 0x400 */ +#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_TCHAN_RT_PEERn_REG(1) /* PSI-L: 0x401 */ +#define UDMA_TCHAN_RT_PEER_BCNT_REG \ + UDMA_TCHAN_RT_PEERn_REG(4) /* PSI-L: 0x404 */ +#define UDMA_TCHAN_RT_PEER_RT_EN_REG \ + UDMA_TCHAN_RT_PEERn_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_TCHAN_RT_PCNT_REG 0x400 +#define UDMA_TCHAN_RT_BCNT_REG 0x408 +#define UDMA_TCHAN_RT_SBCNT_REG 0x410 + +/* RX chan RT regs */ +#define UDMA_RCHAN_RT_CTL_REG 0x0 +#define UDMA_RCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_RCHAN_RT_STDATA_REG 0x80 + +#define UDMA_RCHAN_RT_PEERn_REG(i) (0x200 + (i * 0x4)) +#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_RCHAN_RT_PEERn_REG(0) /* PSI-L: 0x400 */ +#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_RCHAN_RT_PEERn_REG(1) /* PSI-L: 0x401 */ +#define UDMA_RCHAN_RT_PEER_BCNT_REG \ + UDMA_RCHAN_RT_PEERn_REG(4) /* PSI-L: 0x404 */ +#define UDMA_RCHAN_RT_PEER_RT_EN_REG \ + UDMA_RCHAN_RT_PEERn_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_RCHAN_RT_PCNT_REG 0x400 +#define UDMA_RCHAN_RT_BCNT_REG 0x408 +#define UDMA_RCHAN_RT_SBCNT_REG 0x410 + +/* UDMA_TCHAN_TCFG_REG/UDMA_RCHAN_RCFG_REG */ +#define UDMA_CHAN_CFG_PAUSE_ON_ERR BIT(31) +#define UDMA_TCHAN_CFG_FILT_EINFO BIT(30) +#define UDMA_TCHAN_CFG_FILT_PSWORDS BIT(29) +#define UDMA_CHAN_CFG_ATYPE_MASK GENMASK(25, 24) +#define UDMA_CHAN_CFG_ATYPE_SHIFT 24 +#define UDMA_CHAN_CFG_CHAN_TYPE_MASK GENMASK(19, 16) +#define UDMA_CHAN_CFG_CHAN_TYPE_SHIFT 16 +/* + * PBVR - using pass by value rings + * PBRR - using pass by reference rings + * 3RDP - Third Party DMA + * BC - Block Copy + * SB - single buffer packet mode enabled + */ +#define UDMA_CHAN_CFG_CHAN_TYPE_PACKET_PBRR \ + (2 << UDMA_CHAN_CFG_CHAN_TYPE_SHIFT) +#define UDMA_CHAN_CFG_CHAN_TYPE_PACKET_SB_PBRR \ + (3 << UDMA_CHAN_CFG_CHAN_TYPE_SHIFT) +#define UDMA_CHAN_CFG_CHAN_TYPE_3RDP_PBRR \ + (10 << UDMA_CHAN_CFG_CHAN_TYPE_SHIFT) +#define UDMA_CHAN_CFG_CHAN_TYPE_3RDP_PBVR \ + (11 << UDMA_CHAN_CFG_CHAN_TYPE_SHIFT) +#define UDMA_CHAN_CFG_CHAN_TYPE_3RDP_BC_PBRR \ + (12 << UDMA_CHAN_CFG_CHAN_TYPE_SHIFT) +#define UDMA_RCHAN_CFG_IGNORE_SHORT BIT(15) +#define UDMA_RCHAN_CFG_IGNORE_LONG BIT(14) +#define UDMA_TCHAN_CFG_SUPR_TDPKT BIT(8) +#define UDMA_CHAN_CFG_FETCH_SIZE_MASK GENMASK(6, 0) +#define UDMA_CHAN_CFG_FETCH_SIZE_SHIFT 0 + +/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */ +#define UDMA_CHAN_RT_CTL_EN BIT(31) +#define UDMA_CHAN_RT_CTL_TDOWN BIT(30) +#define UDMA_CHAN_RT_CTL_PAUSE BIT(29) +#define UDMA_CHAN_RT_CTL_FTDOWN BIT(28) +#define UDMA_CHAN_RT_CTL_ERROR BIT(0) + +/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */ +#define UDMA_PEER_RT_EN_ENABLE BIT(31) +#define UDMA_PEER_RT_EN_TEARDOWN BIT(30) +#define UDMA_PEER_RT_EN_PAUSE BIT(29) +#define UDMA_PEER_RT_EN_FLUSH BIT(28) +#define UDMA_PEER_RT_EN_IDLE BIT(1) + +/* RX Flow reg RFA */ +#define UDMA_RFLOW_RFA_EINFO BIT(30) +#define UDMA_RFLOW_RFA_PSINFO BIT(29) +#define UDMA_RFLOW_RFA_ERR_HANDLING BIT(28) +#define UDMA_RFLOW_RFA_DESC_TYPE_MASK GENMASK(27, 26) +#define UDMA_RFLOW_RFA_DESC_TYPE_SHIFT 26 +#define UDMA_RFLOW_RFA_PS_LOC BIT(25) +#define UDMA_RFLOW_RFA_SOP_OFF_MASK GENMASK(24, 16) +#define UDMA_RFLOW_RFA_SOP_OFF_SHIFT 16 +#define UDMA_RFLOW_RFA_DEST_QNUM_MASK GENMASK(15, 0) +#define UDMA_RFLOW_RFA_DEST_QNUM_SHIFT 0 + +/* RX Flow reg RFC */ +#define UDMA_RFLOW_RFC_SRC_TAG_HI_SEL_SHIFT 28 +#define UDMA_RFLOW_RFC_SRC_TAG_LO_SEL_SHIFT 24 +#define UDMA_RFLOW_RFC_DST_TAG_HI_SEL_SHIFT 20 +#define UDMA_RFLOW_RFC_DST_TAG_LO_SE_SHIFT 16 + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG + */ +#define PDMA_STATIC_TR_X_MASK GENMASK(26, 24) +#define PDMA_STATIC_TR_X_SHIFT (24) +#define PDMA_STATIC_TR_Y_MASK GENMASK(11, 0) +#define PDMA_STATIC_TR_Y_SHIFT (0) + +#define PDMA_STATIC_TR_Y(x) \ + (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK) +#define PDMA_STATIC_TR_X(x) \ + (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK) + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG + */ +#define PDMA_STATIC_TR_Z_MASK GENMASK(11, 0) +#define PDMA_STATIC_TR_Z_SHIFT (0) +#define PDMA_STATIC_TR_Z(x) \ + (((x) << PDMA_STATIC_TR_Z_SHIFT) & PDMA_STATIC_TR_Z_MASK) + +#endif /* K3_NAVSS_UDMA_HWDEF_H_ */ diff --git a/drivers/dma/ti/k3-udma-u-boot.c b/drivers/dma/ti/k3-udma-u-boot.c new file mode 100644 index 00000000000..3e04f551e29 --- /dev/null +++ b/drivers/dma/ti/k3-udma-u-boot.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com + */ + +#define UDMA_RCHAN_RFLOW_RNG_FLOWID_CNT_SHIFT (16) + +/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ +#define UDMA_RFLOW_SRCTAG_NONE 0 +#define UDMA_RFLOW_SRCTAG_CFG_TAG 1 +#define UDMA_RFLOW_SRCTAG_FLOW_ID 2 +#define UDMA_RFLOW_SRCTAG_SRC_TAG 4 + +#define UDMA_RFLOW_DSTTAG_NONE 0 +#define UDMA_RFLOW_DSTTAG_CFG_TAG 1 +#define UDMA_RFLOW_DSTTAG_FLOW_ID 2 +#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 +#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 + +#define UDMA_RFLOW_RFC_DEFAULT \ + ((UDMA_RFLOW_SRCTAG_NONE << UDMA_RFLOW_RFC_SRC_TAG_HI_SEL_SHIFT) | \ + (UDMA_RFLOW_SRCTAG_SRC_TAG << UDMA_RFLOW_RFC_SRC_TAG_LO_SEL_SHIFT) | \ + (UDMA_RFLOW_DSTTAG_DST_TAG_HI << UDMA_RFLOW_RFC_DST_TAG_HI_SEL_SHIFT) | \ + (UDMA_RFLOW_DSTTAG_DST_TAG_LO << UDMA_RFLOW_RFC_DST_TAG_LO_SE_SHIFT)) + +#define UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT (16) + +/* TCHAN */ +static inline u32 udma_tchan_read(struct udma_tchan *tchan, int reg) +{ + if (!tchan) + return 0; + return udma_read(tchan->reg_chan, reg); +} + +static inline void udma_tchan_write(struct udma_tchan *tchan, int reg, u32 val) +{ + if (!tchan) + return; + udma_write(tchan->reg_chan, reg, val); +} + +static inline void udma_tchan_update_bits(struct udma_tchan *tchan, int reg, + u32 mask, u32 val) +{ + if (!tchan) + return; + udma_update_bits(tchan->reg_chan, reg, mask, val); +} + +/* RCHAN */ +static inline u32 udma_rchan_read(struct udma_rchan *rchan, int reg) +{ + if (!rchan) + return 0; + return udma_read(rchan->reg_chan, reg); +} + +static inline void udma_rchan_write(struct udma_rchan *rchan, int reg, u32 val) +{ + if (!rchan) + return; + udma_write(rchan->reg_chan, reg, val); +} + +static inline void udma_rchan_update_bits(struct udma_rchan *rchan, int reg, + u32 mask, u32 val) +{ + if (!rchan) + return; + udma_update_bits(rchan->reg_chan, reg, mask, val); +} + +/* RFLOW */ +static inline u32 udma_rflow_read(struct udma_rflow *rflow, int reg) +{ + if (!rflow) + return 0; + return udma_read(rflow->reg_rflow, reg); +} + +static inline void udma_rflow_write(struct udma_rflow *rflow, int reg, u32 val) +{ + if (!rflow) + return; + udma_write(rflow->reg_rflow, reg, val); +} + +static inline void udma_rflow_update_bits(struct udma_rflow *rflow, int reg, + u32 mask, u32 val) +{ + if (!rflow) + return; + udma_update_bits(rflow->reg_rflow, reg, mask, val); +} + +static void udma_alloc_tchan_raw(struct udma_chan *uc) +{ + u32 mode, fetch_size; + + if (uc->config.pkt_mode) + mode = UDMA_CHAN_CFG_CHAN_TYPE_PACKET_PBRR; + else + mode = UDMA_CHAN_CFG_CHAN_TYPE_3RDP_BC_PBRR; + + udma_tchan_update_bits(uc->tchan, UDMA_TCHAN_TCFG_REG, + UDMA_CHAN_CFG_CHAN_TYPE_MASK, mode); + + if (uc->config.dir == DMA_MEM_TO_MEM) + fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + else + fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, 0) >> 2; + + udma_tchan_update_bits(uc->tchan, UDMA_TCHAN_TCFG_REG, + UDMA_CHAN_CFG_FETCH_SIZE_MASK, fetch_size); + udma_tchan_write(uc->tchan, UDMA_TCHAN_TCQ_REG, + k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring)); +} + +static void udma_alloc_rchan_raw(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring); + int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring); + int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring); + u32 rx_einfo_present = 0, rx_psinfo_present = 0; + u32 mode, fetch_size, rxcq_num; + + if (uc->config.pkt_mode) + mode = UDMA_CHAN_CFG_CHAN_TYPE_PACKET_PBRR; + else + mode = UDMA_CHAN_CFG_CHAN_TYPE_3RDP_BC_PBRR; + + udma_rchan_update_bits(uc->rchan, UDMA_RCHAN_RCFG_REG, + UDMA_CHAN_CFG_CHAN_TYPE_MASK, mode); + + if (uc->config.dir == DMA_MEM_TO_MEM) { + fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + rxcq_num = tc_ring; + } else { + fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, 0) >> 2; + rxcq_num = rx_ring; + } + + udma_rchan_update_bits(uc->rchan, UDMA_RCHAN_RCFG_REG, + UDMA_CHAN_CFG_FETCH_SIZE_MASK, fetch_size); + udma_rchan_write(uc->rchan, UDMA_RCHAN_RCQ_REG, rxcq_num); + + if (uc->config.dir == DMA_MEM_TO_MEM) + return; + + if (ud->match_data->type == DMA_TYPE_UDMA && + uc->rflow->id != uc->rchan->id && + uc->config.dir != DMA_MEM_TO_MEM) + udma_rchan_write(uc->rchan, UDMA_RCHAN_RFLOW_RNG_REG, uc->rflow->id | + 1 << UDMA_RCHAN_RFLOW_RNG_FLOWID_CNT_SHIFT); + + if (uc->config.needs_epib) + rx_einfo_present = UDMA_RFLOW_RFA_EINFO; + + if (uc->config.psd_size) + rx_psinfo_present = UDMA_RFLOW_RFA_PSINFO; + + udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(A), + rx_einfo_present | rx_psinfo_present | rxcq_num); + + udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(C), UDMA_RFLOW_RFC_DEFAULT); + udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(D), + fd_ring | fd_ring << UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT); + udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(E), + fd_ring | fd_ring << UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT); + udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(G), fd_ring); + udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(H), + fd_ring | fd_ring << UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT); +} diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c new file mode 100644 index 00000000000..723265ab2e5 --- /dev/null +++ b/drivers/dma/ti/k3-udma.c @@ -0,0 +1,2878 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ +#define pr_fmt(fmt) "udma: " fmt + +#include <cpu_func.h> +#include <log.h> +#include <asm/cache.h> +#include <asm/io.h> +#include <asm/bitops.h> +#include <malloc.h> +#include <net.h> +#include <linux/bitops.h> +#include <linux/dma-mapping.h> +#include <linux/sizes.h> +#include <dm.h> +#include <dm/device_compat.h> +#include <dm/devres.h> +#include <dm/read.h> +#include <dm/of_access.h> +#include <dma.h> +#include <dma-uclass.h> +#include <linux/delay.h> +#include <linux/bitmap.h> +#include <linux/err.h> +#include <linux/printk.h> +#include <linux/soc/ti/k3-navss-ringacc.h> +#include <linux/soc/ti/cppi5.h> +#include <linux/soc/ti/ti-udma.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/soc/ti/cppi5.h> + +#include "k3-udma-hwdef.h" +#include "k3-psil-priv.h" + +#define K3_UDMA_MAX_RFLOWS 1024 +#define K3_UDMA_MAX_TR 2 + +struct udma_chan; + +enum k3_dma_type { + DMA_TYPE_UDMA = 0, + DMA_TYPE_BCDMA, + DMA_TYPE_PKTDMA, +}; + +enum udma_mmr { + MMR_GCFG = 0, + MMR_BCHANRT, + MMR_RCHANRT, + MMR_TCHANRT, + MMR_RCHAN, + MMR_TCHAN, + MMR_RFLOW, + MMR_LAST, +}; + +static const char * const mmr_names[] = { + [MMR_GCFG] = "gcfg", + [MMR_BCHANRT] = "bchanrt", + [MMR_RCHANRT] = "rchanrt", + [MMR_TCHANRT] = "tchanrt", + [MMR_RCHAN] = "rchan", + [MMR_TCHAN] = "tchan", + [MMR_RFLOW] = "rflow", +}; + +struct udma_tchan { + void __iomem *reg_chan; + void __iomem *reg_rt; + + int id; + struct k3_nav_ring *t_ring; /* Transmit ring */ + struct k3_nav_ring *tc_ring; /* Transmit Completion ring */ + int tflow_id; /* applicable only for PKTDMA */ +}; + +#define udma_bchan udma_tchan + +struct udma_rflow { + void __iomem *reg_rflow; + int id; + struct k3_nav_ring *fd_ring; /* Free Descriptor ring */ + struct k3_nav_ring *r_ring; /* Receive ring */ +}; + +struct udma_rchan { + void __iomem *reg_chan; + void __iomem *reg_rt; + + int id; +}; + +struct udma_oes_offsets { + /* K3 UDMA Output Event Offset */ + u32 udma_rchan; + + /* BCDMA Output Event Offsets */ + u32 bcdma_bchan_data; + u32 bcdma_bchan_ring; + u32 bcdma_tchan_data; + u32 bcdma_tchan_ring; + u32 bcdma_rchan_data; + u32 bcdma_rchan_ring; + + /* PKTDMA Output Event Offsets */ + u32 pktdma_tchan_flow; + u32 pktdma_rchan_flow; +}; + +#define UDMA_FLAG_PDMA_ACC32 BIT(0) +#define UDMA_FLAG_PDMA_BURST BIT(1) +#define UDMA_FLAG_TDTYPE BIT(2) + +struct udma_match_data { + enum k3_dma_type type; + u32 psil_base; + bool enable_memcpy_support; + u32 flags; + u32 statictr_z_mask; + struct udma_oes_offsets oes; + + u8 tpl_levels; + u32 level_start_idx[]; +}; + +enum udma_rm_range { + RM_RANGE_BCHAN = 0, + RM_RANGE_TCHAN, + RM_RANGE_RCHAN, + RM_RANGE_RFLOW, + RM_RANGE_TFLOW, + RM_RANGE_LAST, +}; + +struct udma_tisci_rm { + const struct ti_sci_handle *tisci; + const struct ti_sci_rm_udmap_ops *tisci_udmap_ops; + u32 tisci_dev_id; + + /* tisci information for PSI-L thread pairing/unpairing */ + const struct ti_sci_rm_psil_ops *tisci_psil_ops; + u32 tisci_navss_dev_id; + + struct ti_sci_resource *rm_ranges[RM_RANGE_LAST]; +}; + +struct udma_dev { + struct udevice *dev; + void __iomem *mmrs[MMR_LAST]; + + struct udma_tisci_rm tisci_rm; + struct k3_nav_ringacc *ringacc; + + u32 features; + + int bchan_cnt; + int tchan_cnt; + int echan_cnt; + int rchan_cnt; + int rflow_cnt; + int tflow_cnt; + unsigned long *bchan_map; + unsigned long *tchan_map; + unsigned long *rchan_map; + unsigned long *rflow_map; + unsigned long *rflow_map_reserved; + unsigned long *tflow_map; + + struct udma_bchan *bchans; + struct udma_tchan *tchans; + struct udma_rchan *rchans; + struct udma_rflow *rflows; + + struct udma_match_data *match_data; + void *bc_desc; + + struct udma_chan *channels; + u32 psil_base; + + u32 ch_count; +}; + +struct udma_chan_config { + u32 psd_size; /* size of Protocol Specific Data */ + u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ + u32 hdesc_size; /* Size of a packet descriptor in packet mode */ + int remote_thread_id; + u32 atype; + u32 src_thread; + u32 dst_thread; + enum psil_endpoint_type ep_type; + enum udma_tp_level channel_tpl; /* Channel Throughput Level */ + + /* PKTDMA mapped channel */ + int mapped_channel_id; + /* PKTDMA default tflow or rflow for mapped channel */ + int default_flow_id; + + enum dma_direction dir; + + unsigned int pkt_mode:1; /* TR or packet */ + unsigned int needs_epib:1; /* EPIB is needed for the communication or not */ + unsigned int enable_acc32:1; + unsigned int enable_burst:1; + unsigned int notdpkt:1; /* Suppress sending TDC packet */ +}; + +struct udma_chan { + struct udma_dev *ud; + char name[20]; + + struct udma_bchan *bchan; + struct udma_tchan *tchan; + struct udma_rchan *rchan; + struct udma_rflow *rflow; + + struct ti_udma_drv_chan_cfg_data cfg_data; + + u32 bcnt; /* number of bytes completed since the start of the channel */ + + struct udma_chan_config config; + + u32 id; + + struct cppi5_host_desc_t *desc_tx; + bool in_use; + void *desc_rx; + u32 num_rx_bufs; + u32 desc_rx_cur; + +}; + +#define UDMA_CH_1000(ch) (ch * 0x1000) +#define UDMA_CH_100(ch) (ch * 0x100) +#define UDMA_CH_40(ch) (ch * 0x40) + +#ifdef PKTBUFSRX +#define UDMA_RX_DESC_NUM PKTBUFSRX +#else +#define UDMA_RX_DESC_NUM 4 +#endif + +/* Generic register access functions */ +static inline u32 udma_read(void __iomem *base, int reg) +{ + u32 v; + + v = __raw_readl(base + reg); + pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg); + return v; +} + +static inline void udma_write(void __iomem *base, int reg, u32 val) +{ + pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg); + __raw_writel(val, base + reg); +} + +static inline void udma_update_bits(void __iomem *base, int reg, + u32 mask, u32 val) +{ + u32 tmp, orig; + + orig = udma_read(base, reg); + tmp = orig & ~mask; + tmp |= (val & mask); + + if (tmp != orig) + udma_write(base, reg, tmp); +} + +/* TCHANRT */ +static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg) +{ + if (!tchan) + return 0; + return udma_read(tchan->reg_rt, reg); +} + +static inline void udma_tchanrt_write(struct udma_tchan *tchan, + int reg, u32 val) +{ + if (!tchan) + return; + udma_write(tchan->reg_rt, reg, val); +} + +/* RCHANRT */ +static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg) +{ + if (!rchan) + return 0; + return udma_read(rchan->reg_rt, reg); +} + +static inline void udma_rchanrt_write(struct udma_rchan *rchan, + int reg, u32 val) +{ + if (!rchan) + return; + udma_write(rchan->reg_rt, reg, val); +} + +static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread) +{ + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET; + + return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, + tisci_rm->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread) +{ + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET; + + return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, + tisci_rm->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static inline char *udma_get_dir_text(enum dma_direction dir) +{ + switch (dir) { + case DMA_DEV_TO_MEM: + return "DEV_TO_MEM"; + case DMA_MEM_TO_DEV: + return "MEM_TO_DEV"; + case DMA_MEM_TO_MEM: + return "MEM_TO_MEM"; + case DMA_DEV_TO_DEV: + return "DEV_TO_DEV"; + default: + break; + } + + return "invalid"; +} + +#include "k3-udma-u-boot.c" + +static void udma_reset_uchan(struct udma_chan *uc) +{ + memset(&uc->config, 0, sizeof(uc->config)); + uc->config.remote_thread_id = -1; + uc->config.mapped_channel_id = -1; + uc->config.default_flow_id = -1; +} + +static inline bool udma_is_chan_running(struct udma_chan *uc) +{ + u32 trt_ctl = 0; + u32 rrt_ctl = 0; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n", + __func__, rrt_ctl, + udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG)); + break; + case DMA_MEM_TO_DEV: + trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n", + __func__, trt_ctl, + udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG)); + break; + case DMA_MEM_TO_MEM: + trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + break; + default: + break; + } + + if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) + return true; + + return false; +} + +static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) +{ + struct k3_nav_ring *ring = NULL; + int ret = -ENOENT; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + ring = uc->rflow->r_ring; + break; + case DMA_MEM_TO_DEV: + ring = uc->tchan->tc_ring; + break; + case DMA_MEM_TO_MEM: + ring = uc->tchan->tc_ring; + break; + default: + break; + } + + if (ring && k3_nav_ringacc_ring_get_occ(ring)) + ret = k3_nav_ringacc_ring_pop(ring, addr); + + return ret; +} + +static void udma_reset_rings(struct udma_chan *uc) +{ + struct k3_nav_ring *ring1 = NULL; + struct k3_nav_ring *ring2 = NULL; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + ring1 = uc->rflow->fd_ring; + ring2 = uc->rflow->r_ring; + break; + case DMA_MEM_TO_DEV: + ring1 = uc->tchan->t_ring; + ring2 = uc->tchan->tc_ring; + break; + case DMA_MEM_TO_MEM: + ring1 = uc->tchan->t_ring; + ring2 = uc->tchan->tc_ring; + break; + default: + break; + } + + if (ring1) + k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1)); + if (ring2) + k3_nav_ringacc_ring_reset(ring2); +} + +static void udma_reset_counters(struct udma_chan *uc) +{ + u32 val; + + if (uc->tchan) { + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val); + + if (!uc->bchan) { + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val); + } + } + + if (uc->rchan) { + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val); + } + + uc->bcnt = 0; +} + +static inline int udma_stop_hard(struct udma_chan *uc) +{ + pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id); + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0); + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int udma_start(struct udma_chan *uc) +{ + /* Channel is already running, no need to proceed further */ + if (udma_is_chan_running(uc)) + goto out; + + pr_debug("%s: chan:%d dir:%s\n", + __func__, uc->id, udma_get_dir_text(uc->config.dir)); + + /* Make sure that we clear the teardown bit, if it is set */ + udma_stop_hard(uc); + + /* Reset all counters */ + udma_reset_counters(uc); + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + /* Enable remote */ + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n", + __func__, + udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_CTL_REG), + udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG)); + break; + case DMA_MEM_TO_DEV: + /* Enable remote */ + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n", + __func__, + udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_CTL_REG), + udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG)); + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + break; + default: + return -EINVAL; + } + + pr_debug("%s: DONE chan:%d\n", __func__, uc->id); +out: + return 0; +} + +static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync) +{ + int i = 0; + u32 val; + + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + udelay(1); + if (i > 1000) { + printf(" %s TIMEOUT !\n", __func__); + break; + } + i++; + } + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG); + if (val & UDMA_PEER_RT_EN_ENABLE) + printf("%s: peer not stopped TIMEOUT !\n", __func__); +} + +static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync) +{ + int i = 0; + u32 val; + + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | + UDMA_PEER_RT_EN_TEARDOWN); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + udelay(1); + if (i > 1000) { + printf("%s TIMEOUT !\n", __func__); + break; + } + i++; + } + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG); + if (val & UDMA_PEER_RT_EN_ENABLE) + printf("%s: peer not stopped TIMEOUT !\n", __func__); +} + +static inline int udma_stop(struct udma_chan *uc) +{ + pr_debug("%s: chan:%d dir:%s\n", + __func__, uc->id, udma_get_dir_text(uc->config.dir)); + + udma_reset_counters(uc); + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + udma_stop_dev2mem(uc, true); + break; + case DMA_MEM_TO_DEV: + udma_stop_mem2dev(uc, true); + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr) +{ + int i = 1; + + while (udma_pop_from_ring(uc, paddr)) { + udelay(1); + if (!(i % 1000000)) + printf("."); + i++; + } +} + +static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id) +{ + DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); + + if (id >= 0) { + if (test_bit(id, ud->rflow_map)) { + dev_err(ud->dev, "rflow%d is in use\n", id); + return ERR_PTR(-ENOENT); + } + } else { + bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved, + ud->rflow_cnt); + + id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt); + if (id >= ud->rflow_cnt) + return ERR_PTR(-ENOENT); + } + + __set_bit(id, ud->rflow_map); + return &ud->rflows[id]; +} + +#define UDMA_RESERVE_RESOURCE(res) \ +static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ + int id) \ +{ \ + if (id >= 0) { \ + if (test_bit(id, ud->res##_map)) { \ + dev_err(ud->dev, "res##%d is in use\n", id); \ + return ERR_PTR(-ENOENT); \ + } \ + } else { \ + id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \ + if (id == ud->res##_cnt) { \ + return ERR_PTR(-ENOENT); \ + } \ + } \ + \ + __set_bit(id, ud->res##_map); \ + return &ud->res##s[id]; \ +} + +UDMA_RESERVE_RESOURCE(tchan); +UDMA_RESERVE_RESOURCE(rchan); + +static int udma_get_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id); + if (IS_ERR(uc->tchan)) + return PTR_ERR(uc->tchan); + + if (ud->tflow_cnt) { + int tflow_id; + + /* Only PKTDMA have support for tx flows */ + if (uc->config.default_flow_id >= 0) + tflow_id = uc->config.default_flow_id; + else + tflow_id = uc->tchan->id; + + if (test_bit(tflow_id, ud->tflow_map)) { + dev_err(ud->dev, "tflow%d is in use\n", tflow_id); + __clear_bit(uc->tchan->id, ud->tchan_map); + uc->tchan = NULL; + return -ENOENT; + } + + uc->tchan->tflow_id = tflow_id; + __set_bit(tflow_id, ud->tflow_map); + } else { + uc->tchan->tflow_id = -1; + } + + pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id); + + return 0; +} + +static int udma_get_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return 0; + } + + uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id); + if (IS_ERR(uc->rchan)) + return PTR_ERR(uc->rchan); + + pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id); + + return 0; +} + +static int udma_get_chan_pair(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + int chan_id, end; + + if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { + dev_info(ud->dev, "chan%d: already have %d pair allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + if (uc->tchan) { + dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return -EBUSY; + } else if (uc->rchan) { + dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return -EBUSY; + } + + /* Can be optimized, but let's have it like this for now */ + end = min(ud->tchan_cnt, ud->rchan_cnt); + for (chan_id = 0; chan_id < end; chan_id++) { + if (!test_bit(chan_id, ud->tchan_map) && + !test_bit(chan_id, ud->rchan_map)) + break; + } + + if (chan_id == end) + return -ENOENT; + + __set_bit(chan_id, ud->tchan_map); + __set_bit(chan_id, ud->rchan_map); + uc->tchan = &ud->tchans[chan_id]; + uc->rchan = &ud->rchans[chan_id]; + + pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id); + + return 0; +} + +static int udma_get_rflow(struct udma_chan *uc, int flow_id) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", + uc->id, uc->rflow->id); + return 0; + } + + if (!uc->rchan) + dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id); + + uc->rflow = __udma_reserve_rflow(ud, flow_id); + if (IS_ERR(uc->rflow)) + return PTR_ERR(uc->rflow); + + pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id); + return 0; +} + +static void udma_put_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, + uc->rchan->id); + __clear_bit(uc->rchan->id, ud->rchan_map); + uc->rchan = NULL; + } +} + +static void udma_put_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, + uc->tchan->id); + __clear_bit(uc->tchan->id, ud->tchan_map); + if (uc->tchan->tflow_id >= 0) + __clear_bit(uc->tchan->tflow_id, ud->tflow_map); + uc->tchan = NULL; + } +} + +static void udma_put_rflow(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, + uc->rflow->id); + __clear_bit(uc->rflow->id, ud->rflow_map); + uc->rflow = NULL; + } +} + +static void udma_free_tx_resources(struct udma_chan *uc) +{ + if (!uc->tchan) + return; + + k3_nav_ringacc_ring_free(uc->tchan->t_ring); + k3_nav_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->t_ring = NULL; + uc->tchan->tc_ring = NULL; + + udma_put_tchan(uc); +} + +static int udma_alloc_tx_resources(struct udma_chan *uc) +{ + struct k3_nav_ring_cfg ring_cfg; + struct udma_dev *ud = uc->ud; + struct udma_tchan *tchan; + int ring_idx, ret; + + ret = udma_get_tchan(uc); + if (ret) + return ret; + + tchan = uc->tchan; + if (tchan->tflow_id > 0) + ring_idx = tchan->tflow_id; + else + ring_idx = tchan->id; + + ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1, + &uc->tchan->t_ring, + &uc->tchan->tc_ring); + if (ret) { + ret = -EBUSY; + goto err_tx_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + ring_cfg.size = 16; + ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING; + + ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg); + ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg); + + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_nav_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->tc_ring = NULL; + k3_nav_ringacc_ring_free(uc->tchan->t_ring); + uc->tchan->t_ring = NULL; +err_tx_ring: + udma_put_tchan(uc); + + return ret; +} + +static void udma_free_rx_resources(struct udma_chan *uc) +{ + if (!uc->rchan) + return; + + if (uc->rflow) { + k3_nav_ringacc_ring_free(uc->rflow->fd_ring); + k3_nav_ringacc_ring_free(uc->rflow->r_ring); + uc->rflow->fd_ring = NULL; + uc->rflow->r_ring = NULL; + + udma_put_rflow(uc); + } + + udma_put_rchan(uc); +} + +static int udma_alloc_rx_resources(struct udma_chan *uc) +{ + struct k3_nav_ring_cfg ring_cfg; + struct udma_dev *ud = uc->ud; + struct udma_rflow *rflow; + int fd_ring_id; + int ret; + + ret = udma_get_rchan(uc); + if (ret) + return ret; + + /* For MEM_TO_MEM we don't need rflow or rings */ + if (uc->config.dir == DMA_MEM_TO_MEM) + return 0; + + if (uc->config.default_flow_id >= 0) + ret = udma_get_rflow(uc, uc->config.default_flow_id); + else + ret = udma_get_rflow(uc, uc->rchan->id); + + if (ret) { + ret = -EBUSY; + goto err_rflow; + } + + rflow = uc->rflow; + if (ud->tflow_cnt) { + fd_ring_id = ud->tflow_cnt + rflow->id; + } else { + fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt + + uc->rchan->id; + } + + ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, + &rflow->fd_ring, &rflow->r_ring); + if (ret) { + ret = -EBUSY; + goto err_rx_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + ring_cfg.size = 16; + ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING; + + ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); + ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_nav_ringacc_ring_free(rflow->r_ring); + rflow->r_ring = NULL; + k3_nav_ringacc_ring_free(rflow->fd_ring); + rflow->fd_ring = NULL; +err_rx_ring: + udma_put_rflow(uc); +err_rflow: + udma_put_rchan(uc); + + return ret; +} + +static int udma_alloc_tchan_sci_req(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring); + struct ti_sci_msg_rm_udmap_tx_ch_cfg req; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + u32 mode; + int ret; + + if (uc->config.pkt_mode) + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + else + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.index = uc->tchan->id; + req.tx_chan_type = mode; + if (uc->config.dir == DMA_MEM_TO_MEM) + req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + else + req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, + 0) >> 2; + req.txcq_qnum = tc_ring; + + ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(ud->dev, "tisci tx alloc failed %d\n", ret); + return ret; + } + + /* + * Above TI SCI call handles firewall configuration, cfg + * register configuration still has to be done locally in + * absence of RM services. + */ + if (IS_ENABLED(CONFIG_K3_DM_FW)) + udma_alloc_tchan_raw(uc); + + return 0; +} + +static int udma_alloc_rchan_sci_req(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring); + int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring); + int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring); + struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 }; + struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + u32 mode; + int ret; + + if (uc->config.pkt_mode) + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + else + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.index = uc->rchan->id; + req.rx_chan_type = mode; + if (uc->config.dir == DMA_MEM_TO_MEM) { + req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + req.rxcq_qnum = tc_ring; + } else { + req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, + 0) >> 2; + req.rxcq_qnum = rx_ring; + } + if (ud->match_data->type == DMA_TYPE_UDMA && + uc->rflow->id != uc->rchan->id && + uc->config.dir != DMA_MEM_TO_MEM) { + req.flowid_start = uc->rflow->id; + req.flowid_cnt = 1; + req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; + } + + ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(ud->dev, "tisci rx %u cfg failed %d\n", + uc->rchan->id, ret); + return ret; + } + if (uc->config.dir == DMA_MEM_TO_MEM) + return ret; + + flow_req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID; + + flow_req.nav_id = tisci_rm->tisci_dev_id; + flow_req.flow_index = uc->rflow->id; + + if (uc->config.needs_epib) + flow_req.rx_einfo_present = 1; + else + flow_req.rx_einfo_present = 0; + + if (uc->config.psd_size) + flow_req.rx_psinfo_present = 1; + else + flow_req.rx_psinfo_present = 0; + + flow_req.rx_error_handling = 0; + flow_req.rx_desc_type = 0; + flow_req.rx_dest_qnum = rx_ring; + flow_req.rx_src_tag_hi_sel = 2; + flow_req.rx_src_tag_lo_sel = 4; + flow_req.rx_dest_tag_hi_sel = 5; + flow_req.rx_dest_tag_lo_sel = 4; + flow_req.rx_fdq0_sz0_qnum = fd_ring; + flow_req.rx_fdq1_qnum = fd_ring; + flow_req.rx_fdq2_qnum = fd_ring; + flow_req.rx_fdq3_qnum = fd_ring; + flow_req.rx_ps_location = 0; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, + &flow_req); + if (ret) { + dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n", + uc->rchan->id, uc->rflow->id, ret); + return ret; + } + + /* + * Above TI SCI call handles firewall configuration, cfg + * register configuration still has to be done locally in + * absence of RM services. + */ + if (IS_ENABLED(CONFIG_K3_DM_FW)) + udma_alloc_rchan_raw(uc); + + return 0; +} + +static int udma_alloc_chan_resources(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + int ret; + + pr_debug("%s: chan:%d as %s\n", + __func__, uc->id, udma_get_dir_text(uc->config.dir)); + + switch (uc->config.dir) { + case DMA_MEM_TO_MEM: + /* Non synchronized - mem to mem type of transfer */ + uc->config.pkt_mode = false; + ret = udma_get_chan_pair(uc); + if (ret) + return ret; + + ret = udma_alloc_tx_resources(uc); + if (ret) + goto err_free_res; + + ret = udma_alloc_rx_resources(uc); + if (ret) + goto err_free_res; + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000; + break; + case DMA_MEM_TO_DEV: + /* Slave transfer synchronized - mem to dev (TX) trasnfer */ + ret = udma_alloc_tx_resources(uc); + if (ret) + goto err_free_res; + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = uc->config.remote_thread_id; + uc->config.dst_thread |= 0x8000; + + break; + case DMA_DEV_TO_MEM: + /* Slave transfer synchronized - dev to mem (RX) trasnfer */ + ret = udma_alloc_rx_resources(uc); + if (ret) + goto err_free_res; + + uc->config.src_thread = uc->config.remote_thread_id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000; + + break; + default: + /* Can not happen */ + pr_debug("%s: chan:%d invalid direction (%u)\n", + __func__, uc->id, uc->config.dir); + return -EINVAL; + } + + /* We have channel indexes and rings */ + if (uc->config.dir == DMA_MEM_TO_MEM) { + ret = udma_alloc_tchan_sci_req(uc); + if (ret) + goto err_free_res; + + ret = udma_alloc_rchan_sci_req(uc); + if (ret) + goto err_free_res; + } else { + /* Slave transfer */ + if (uc->config.dir == DMA_MEM_TO_DEV) { + ret = udma_alloc_tchan_sci_req(uc); + if (ret) + goto err_free_res; + } else { + ret = udma_alloc_rchan_sci_req(uc); + if (ret) + goto err_free_res; + } + } + + if (udma_is_chan_running(uc)) { + dev_warn(ud->dev, "chan%d: is running!\n", uc->id); + udma_stop(uc); + if (udma_is_chan_running(uc)) { + dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + goto err_free_res; + } + } + + /* PSI-L pairing */ + ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); + if (ret) { + dev_err(ud->dev, "k3_nav_psil_request_link fail\n"); + goto err_free_res; + } + + return 0; + +err_free_res: + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + uc->config.remote_thread_id = -1; + return ret; +} + +static void udma_free_chan_resources(struct udma_chan *uc) +{ + /* Hard reset UDMA channel */ + udma_stop_hard(uc); + udma_reset_counters(uc); + + /* Release PSI-L pairing */ + udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread); + + /* Reset the rings for a new start */ + udma_reset_rings(uc); + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + + uc->config.remote_thread_id = -1; + uc->config.dir = DMA_MEM_TO_MEM; +} + +static const char * const range_names[] = { + [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan", + [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan", + [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan", + [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow", + [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow", +}; + +static int udma_get_mmrs(struct udevice *dev) +{ + struct udma_dev *ud = dev_get_priv(dev); + u32 cap2, cap3, cap4; + int i; + + ud->mmrs[MMR_GCFG] = dev_read_addr_name_ptr(dev, mmr_names[MMR_GCFG]); + if (!ud->mmrs[MMR_GCFG]) + return -EINVAL; + + cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); + cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); + + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + ud->rflow_cnt = cap3 & 0x3fff; + ud->tchan_cnt = cap2 & 0x1ff; + ud->echan_cnt = (cap2 >> 9) & 0x1ff; + ud->rchan_cnt = (cap2 >> 18) & 0x1ff; + break; + case DMA_TYPE_BCDMA: + ud->bchan_cnt = cap2 & 0x1ff; + ud->tchan_cnt = (cap2 >> 9) & 0x1ff; + ud->rchan_cnt = (cap2 >> 18) & 0x1ff; + break; + case DMA_TYPE_PKTDMA: + cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30); + ud->tchan_cnt = cap2 & 0x1ff; + ud->rchan_cnt = (cap2 >> 18) & 0x1ff; + ud->rflow_cnt = cap3 & 0x3fff; + ud->tflow_cnt = cap4 & 0x3fff; + break; + default: + return -EINVAL; + } + + for (i = 1; i < MMR_LAST; i++) { + if (i == MMR_BCHANRT && ud->bchan_cnt == 0) + continue; + if (i == MMR_TCHANRT && ud->tchan_cnt == 0) + continue; + if (i == MMR_RCHANRT && ud->rchan_cnt == 0) + continue; + if (i == MMR_RFLOW && ud->match_data->type == DMA_TYPE_BCDMA) + continue; + + ud->mmrs[i] = dev_read_addr_name_ptr(dev, mmr_names[i]); + if (!ud->mmrs[i]) + return -EINVAL; + } + + return 0; +} + +static int udma_setup_resources(struct udma_dev *ud) +{ + struct udevice *dev = ud->dev; + int i; + struct ti_sci_resource_desc *rm_desc; + struct ti_sci_resource *rm_res; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + size_t desc_size; + + ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), + GFP_KERNEL); + ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), + GFP_KERNEL); + ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), + GFP_KERNEL); + + desc_size = cppi5_trdesc_calc_size(K3_UDMA_MAX_TR, sizeof(struct cppi5_tr_type15_t)); + ud->bc_desc = devm_kzalloc(dev, ALIGN(desc_size, ARCH_DMA_MINALIGN), GFP_KERNEL); + if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map || + !ud->rflow_map_reserved || !ud->tchans || !ud->rchans || + !ud->rflows || !ud->bc_desc) + return -ENOMEM; + + /* + * RX flows with the same Ids as RX channels are reserved to be used + * as default flows if remote HW can't generate flow_ids. Those + * RX flows can be requested only explicitly by id. + */ + bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt); + + /* Get resource ranges from tisci */ + for (i = 0; i < RM_RANGE_LAST; i++) { + if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW) + continue; + + tisci_rm->rm_ranges[i] = + devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, + tisci_rm->tisci_dev_id, + (char *)range_names[i]); + } + + /* tchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->tchan_map, ud->tchan_cnt); + } else { + bitmap_fill(ud->tchan_map, ud->tchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->tchan_map, rm_desc->start, + rm_desc->num); + } + } + + /* rchan and matching default flow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->rchan_map, ud->rchan_cnt); + bitmap_zero(ud->rflow_map, ud->rchan_cnt); + } else { + bitmap_fill(ud->rchan_map, ud->rchan_cnt); + bitmap_fill(ud->rflow_map, ud->rchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rchan_map, rm_desc->start, + rm_desc->num); + bitmap_clear(ud->rflow_map, rm_desc->start, + rm_desc->num); + } + } + + /* GP rflow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; + if (IS_ERR(rm_res)) { + bitmap_clear(ud->rflow_map, ud->rchan_cnt, + ud->rflow_cnt - ud->rchan_cnt); + } else { + bitmap_set(ud->rflow_map, ud->rchan_cnt, + ud->rflow_cnt - ud->rchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rflow_map, rm_desc->start, + rm_desc->num); + } + } + + return 0; +} + +static int bcdma_setup_resources(struct udma_dev *ud) +{ + int i; + struct udevice *dev = ud->dev; + struct ti_sci_resource_desc *rm_desc; + struct ti_sci_resource *rm_res; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + size_t desc_size; + + ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans), + GFP_KERNEL); + ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), + GFP_KERNEL); + ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), + GFP_KERNEL); + ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows), + GFP_KERNEL); + + desc_size = cppi5_trdesc_calc_size(K3_UDMA_MAX_TR, sizeof(struct cppi5_tr_type15_t)); + ud->bc_desc = devm_kzalloc(dev, ALIGN(desc_size, ARCH_DMA_MINALIGN), GFP_KERNEL); + + if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map || + !ud->bchans || !ud->tchans || !ud->rchans || + !ud->rflows || !ud->bc_desc) + return -ENOMEM; + + /* Get resource ranges from tisci */ + for (i = 0; i < RM_RANGE_LAST; i++) { + if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW) + continue; + + tisci_rm->rm_ranges[i] = + devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, + tisci_rm->tisci_dev_id, + (char *)range_names[i]); + } + + /* bchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->bchan_map, ud->bchan_cnt); + } else { + bitmap_fill(ud->bchan_map, ud->bchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->bchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + /* tchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->tchan_map, ud->tchan_cnt); + } else { + bitmap_fill(ud->tchan_map, ud->tchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->tchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + /* rchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->rchan_map, ud->rchan_cnt); + } else { + bitmap_fill(ud->rchan_map, ud->rchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + return 0; +} + +static int pktdma_setup_resources(struct udma_dev *ud) +{ + int i; + struct udevice *dev = ud->dev; + struct ti_sci_resource *rm_res; + struct ti_sci_resource_desc *rm_desc; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), + GFP_KERNEL); + ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), + GFP_KERNEL); + ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), + GFP_KERNEL); + ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt), + sizeof(unsigned long), GFP_KERNEL); + + if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans || + !ud->rchans || !ud->rflows || !ud->rflow_map) + return -ENOMEM; + + /* Get resource ranges from tisci */ + for (i = 0; i < RM_RANGE_LAST; i++) { + if (i == RM_RANGE_BCHAN) + continue; + + tisci_rm->rm_ranges[i] = + devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, + tisci_rm->tisci_dev_id, + (char *)range_names[i]); + } + + /* tchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->tchan_map, ud->tchan_cnt); + } else { + bitmap_fill(ud->tchan_map, ud->tchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->tchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + /* rchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->rchan_map, ud->rchan_cnt); + } else { + bitmap_fill(ud->rchan_map, ud->rchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + /* rflow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; + if (IS_ERR(rm_res)) { + /* all rflows are assigned exclusively to Linux */ + bitmap_zero(ud->rflow_map, ud->rflow_cnt); + } else { + bitmap_fill(ud->rflow_map, ud->rflow_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rflow_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + /* tflow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; + if (IS_ERR(rm_res)) { + /* all tflows are assigned exclusively to Linux */ + bitmap_zero(ud->tflow_map, ud->tflow_cnt); + } else { + bitmap_fill(ud->tflow_map, ud->tflow_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->tflow_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + return 0; +} + +static int setup_resources(struct udma_dev *ud) +{ + struct udevice *dev = ud->dev; + int ch_count, ret; + + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + ret = udma_setup_resources(ud); + break; + case DMA_TYPE_BCDMA: + ret = bcdma_setup_resources(ud); + break; + case DMA_TYPE_PKTDMA: + ret = pktdma_setup_resources(ud); + break; + default: + return -EINVAL; + } + + if (ret) + return ret; + + ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; + if (ud->bchan_cnt) + ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt); + ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); + ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); + if (!ch_count) + return -ENODEV; + + ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), + GFP_KERNEL); + if (!ud->channels) + return -ENOMEM; + + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + dev_dbg(dev, + "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", + ch_count, + ud->tchan_cnt - bitmap_weight(ud->tchan_map, + ud->tchan_cnt), + ud->rchan_cnt - bitmap_weight(ud->rchan_map, + ud->rchan_cnt), + ud->rflow_cnt - bitmap_weight(ud->rflow_map, + ud->rflow_cnt)); + break; + case DMA_TYPE_BCDMA: + dev_dbg(dev, + "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n", + ch_count, + ud->bchan_cnt - bitmap_weight(ud->bchan_map, + ud->bchan_cnt), + ud->tchan_cnt - bitmap_weight(ud->tchan_map, + ud->tchan_cnt), + ud->rchan_cnt - bitmap_weight(ud->rchan_map, + ud->rchan_cnt)); + break; + case DMA_TYPE_PKTDMA: + dev_dbg(dev, + "Channels: %d (tchan: %u, rchan: %u)\n", + ch_count, + ud->tchan_cnt - bitmap_weight(ud->tchan_map, + ud->tchan_cnt), + ud->rchan_cnt - bitmap_weight(ud->rchan_map, + ud->rchan_cnt)); + break; + default: + break; + } + + return ch_count; +} + +static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem) +{ + u64 addr = 0; + + memcpy(&addr, &elem, sizeof(elem)); + return k3_nav_ringacc_ring_push(ring, &addr); +} + +static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest, + dma_addr_t src, size_t len) +{ + u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring); + struct cppi5_tr_type15_t *tr_req; + int num_tr; + size_t tr_size = sizeof(struct cppi5_tr_type15_t); + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; + void *tr_desc = uc->ud->bc_desc; + size_t desc_size; + + if (len < SZ_64K) { + num_tr = 1; + tr0_cnt0 = len; + tr0_cnt1 = 1; + } else { + unsigned long align_to = __ffs(src | dest); + + if (align_to > 3) + align_to = 3; + /* + * Keep simple: tr0: SZ_64K-alignment blocks, + * tr1: the remaining + */ + num_tr = 2; + tr0_cnt0 = (SZ_64K - BIT(align_to)); + if (len / tr0_cnt0 >= SZ_64K) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + len); + return NULL; + } + + tr0_cnt1 = len / tr0_cnt0; + tr1_cnt0 = len % tr0_cnt0; + } + + desc_size = cppi5_trdesc_calc_size(num_tr, tr_size); + memset(tr_desc, 0, desc_size); + + cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0); + cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff); + cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id); + + tr_req = tr_desc + tr_size; + + cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 1); + cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[0].addr = src; + tr_req[0].icnt0 = tr0_cnt0; + tr_req[0].icnt1 = tr0_cnt1; + tr_req[0].icnt2 = 1; + tr_req[0].icnt3 = 1; + tr_req[0].dim1 = tr0_cnt0; + + tr_req[0].daddr = dest; + tr_req[0].dicnt0 = tr0_cnt0; + tr_req[0].dicnt1 = tr0_cnt1; + tr_req[0].dicnt2 = 1; + tr_req[0].dicnt3 = 1; + tr_req[0].ddim1 = tr0_cnt0; + + if (num_tr == 2) { + cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; + tr_req[1].icnt0 = tr1_cnt0; + tr_req[1].icnt1 = 1; + tr_req[1].icnt2 = 1; + tr_req[1].icnt3 = 1; + + tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; + tr_req[1].dicnt0 = tr1_cnt0; + tr_req[1].dicnt1 = 1; + tr_req[1].dicnt2 = 1; + tr_req[1].dicnt3 = 1; + } + + cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP); + + flush_dcache_range((unsigned long)tr_desc, + ALIGN((unsigned long)tr_desc + desc_size, + ARCH_DMA_MINALIGN)); + + udma_push_to_ring(uc->tchan->t_ring, tr_desc); + + return 0; +} + +#define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID) + +#define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID) + +#define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID) + +#define TISCI_UDMA_TCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) + +#define TISCI_UDMA_RCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) + +static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + struct udma_bchan *bchan = uc->bchan; + int ret = 0; + + req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN; + req_tx.index = bchan->id; + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) + dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret); + + return ret; +} + +static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id) +{ + if (id >= 0) { + if (test_bit(id, ud->bchan_map)) { + dev_err(ud->dev, "bchan%d is in use\n", id); + return ERR_PTR(-ENOENT); + } + } else { + id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0); + if (id == ud->bchan_cnt) + return ERR_PTR(-ENOENT); + } + __set_bit(id, ud->bchan_map); + return &ud->bchans[id]; +} + +static int bcdma_get_bchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->bchan) { + dev_err(ud->dev, "chan%d: already have bchan%d allocated\n", + uc->id, uc->bchan->id); + return 0; + } + + uc->bchan = __bcdma_reserve_bchan(ud, -1); + if (IS_ERR(uc->bchan)) + return PTR_ERR(uc->bchan); + + uc->tchan = uc->bchan; + + return 0; +} + +static void bcdma_put_bchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->bchan) { + dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id, + uc->bchan->id); + __clear_bit(uc->bchan->id, ud->bchan_map); + uc->bchan = NULL; + uc->tchan = NULL; + } +} + +static void bcdma_free_bchan_resources(struct udma_chan *uc) +{ + if (!uc->bchan) + return; + + k3_nav_ringacc_ring_free(uc->bchan->tc_ring); + k3_nav_ringacc_ring_free(uc->bchan->t_ring); + uc->bchan->tc_ring = NULL; + uc->bchan->t_ring = NULL; + + bcdma_put_bchan(uc); +} + +static int bcdma_alloc_bchan_resources(struct udma_chan *uc) +{ + struct k3_nav_ring_cfg ring_cfg; + struct udma_dev *ud = uc->ud; + int ret; + + ret = bcdma_get_bchan(uc); + if (ret) + return ret; + + ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1, + &uc->bchan->t_ring, + &uc->bchan->tc_ring); + if (ret) { + ret = -EBUSY; + goto err_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + ring_cfg.size = 16; + ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING; + + ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg); + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_nav_ringacc_ring_free(uc->bchan->tc_ring); + uc->bchan->tc_ring = NULL; + k3_nav_ringacc_ring_free(uc->bchan->t_ring); + uc->bchan->t_ring = NULL; +err_ring: + bcdma_put_bchan(uc); + + return ret; +} + +static int bcdma_tisci_tx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_tchan *tchan = uc->tchan; + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + int ret = 0; + + req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.index = tchan->id; + req_tx.tx_supr_tdpkt = uc->config.notdpkt; + if (uc->config.ep_type == PSIL_EP_PDMA_XY && + ud->match_data->flags & UDMA_FLAG_TDTYPE) { + /* wait for peer to complete the teardown for PDMAs */ + req_tx.valid_params |= + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; + req_tx.tx_tdtype = 1; + } + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) + dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); + + if (IS_ENABLED(CONFIG_K3_DM_FW)) + udma_alloc_tchan_raw(uc); + + return ret; +} + +#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config + +static int pktdma_tisci_rx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; + struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; + int ret = 0; + + req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; + req_rx.nav_id = tisci_rm->tisci_dev_id; + req_rx.index = uc->rchan->id; + + ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); + if (ret) { + dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret); + return ret; + } + + flow_req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID; + + flow_req.nav_id = tisci_rm->tisci_dev_id; + flow_req.flow_index = uc->rflow->id; + + if (uc->config.needs_epib) + flow_req.rx_einfo_present = 1; + else + flow_req.rx_einfo_present = 0; + if (uc->config.psd_size) + flow_req.rx_psinfo_present = 1; + else + flow_req.rx_psinfo_present = 0; + flow_req.rx_error_handling = 0; + + ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); + + if (ret) + dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id, + ret); + + if (IS_ENABLED(CONFIG_K3_DM_FW)) + udma_alloc_rchan_raw(uc); + + return ret; +} + +static int bcdma_alloc_chan_resources(struct udma_chan *uc) +{ + int ret; + + uc->config.pkt_mode = false; + + switch (uc->config.dir) { + case DMA_MEM_TO_MEM: + /* Non synchronized - mem to mem type of transfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, + uc->id); + + ret = bcdma_alloc_bchan_resources(uc); + if (ret) + return ret; + + ret = bcdma_tisci_m2m_channel_config(uc); + break; + default: + /* Can not happen */ + dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", + __func__, uc->id, uc->config.dir); + return -EINVAL; + } + + /* check if the channel configuration was successful */ + if (ret) + goto err_res_free; + + if (udma_is_chan_running(uc)) { + dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id); + udma_stop(uc); + if (udma_is_chan_running(uc)) { + dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id); + goto err_res_free; + } + } + + udma_reset_rings(uc); + + return 0; + +err_res_free: + bcdma_free_bchan_resources(uc); + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + + udma_reset_uchan(uc); + + return ret; +} + +static int pktdma_alloc_chan_resources(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + int ret; + + switch (uc->config.dir) { + case DMA_MEM_TO_DEV: + /* Slave transfer synchronized - mem to dev (TX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, + uc->id); + + ret = udma_alloc_tx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = uc->config.remote_thread_id; + uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + + ret = pktdma_tisci_tx_channel_config(uc); + break; + case DMA_DEV_TO_MEM: + /* Slave transfer synchronized - dev to mem (RX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, + uc->id); + + ret = udma_alloc_rx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = uc->config.remote_thread_id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | + K3_PSIL_DST_THREAD_ID_OFFSET; + + ret = pktdma_tisci_rx_channel_config(uc); + break; + default: + /* Can not happen */ + dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", + __func__, uc->id, uc->config.dir); + return -EINVAL; + } + + /* check if the channel configuration was successful */ + if (ret) + goto err_res_free; + + /* PSI-L pairing */ + ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); + if (ret) { + dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", + uc->config.src_thread, uc->config.dst_thread); + goto err_res_free; + } + + if (udma_is_chan_running(uc)) { + dev_warn(ud->dev, "chan%d: is running!\n", uc->id); + udma_stop(uc); + if (udma_is_chan_running(uc)) { + dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + goto err_res_free; + } + } + + udma_reset_rings(uc); + + if (uc->tchan) + dev_dbg(ud->dev, + "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n", + uc->id, uc->tchan->id, uc->tchan->tflow_id, + uc->config.remote_thread_id); + else if (uc->rchan) + dev_dbg(ud->dev, + "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n", + uc->id, uc->rchan->id, uc->rflow->id, + uc->config.remote_thread_id); + return 0; + +err_res_free: + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + + udma_reset_uchan(uc); + + return ret; +} + +static int udma_transfer(struct udevice *dev, int direction, + dma_addr_t dst, dma_addr_t src, size_t len) +{ + struct udma_dev *ud = dev_get_priv(dev); + /* Channel0 is reserved for memcpy */ + struct udma_chan *uc = &ud->channels[0]; + dma_addr_t paddr = 0; + + udma_prep_dma_memcpy(uc, dst, src, len); + udma_start(uc); + udma_poll_completion(uc, &paddr); + udma_stop(uc); + + return 0; +} + +static int udma_request(struct dma *dma) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan_config *ucc; + struct udma_chan *uc; + unsigned long dummy; + int ret; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + + uc = &ud->channels[dma->id]; + ucc = &uc->config; + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + ret = udma_alloc_chan_resources(uc); + break; + case DMA_TYPE_BCDMA: + ret = bcdma_alloc_chan_resources(uc); + break; + case DMA_TYPE_PKTDMA: + ret = pktdma_alloc_chan_resources(uc); + break; + default: + return -EINVAL; + } + if (ret) { + dev_err(dma->dev, "alloc dma res failed %d\n", ret); + return -EINVAL; + } + + if (uc->config.dir == DMA_MEM_TO_DEV) { + uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy); + memset(uc->desc_tx, 0, ucc->hdesc_size); + } else { + uc->desc_rx = dma_alloc_coherent( + ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy); + memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM); + } + + uc->in_use = true; + uc->desc_rx_cur = 0; + uc->num_rx_bufs = 0; + + if (uc->config.dir == DMA_DEV_TO_MEM) { + uc->cfg_data.flow_id_base = uc->rflow->id; + uc->cfg_data.flow_id_cnt = 1; + } + + return 0; +} + +static int udma_rfree(struct dma *dma) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan *uc; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + + if (udma_is_chan_running(uc)) + udma_stop(uc); + + udma_navss_psil_unpair(ud, uc->config.src_thread, + uc->config.dst_thread); + + bcdma_free_bchan_resources(uc); + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + udma_reset_uchan(uc); + + uc->in_use = false; + + return 0; +} + +static int udma_enable(struct dma *dma) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan *uc; + int ret; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + + ret = udma_start(uc); + + return ret; +} + +static int udma_disable(struct dma *dma) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan *uc; + int ret = 0; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + + if (udma_is_chan_running(uc)) + ret = udma_stop(uc); + else + dev_err(dma->dev, "%s not running\n", __func__); + + return ret; +} + +static int udma_send(struct dma *dma, void *src, size_t len, void *metadata) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct cppi5_host_desc_t *desc_tx; + dma_addr_t dma_src = (dma_addr_t)src; + struct ti_udma_drv_packet_data packet_data = { 0 }; + dma_addr_t paddr; + struct udma_chan *uc; + u32 tc_ring_id; + int ret; + + if (metadata) + packet_data = *((struct ti_udma_drv_packet_data *)metadata); + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + + if (uc->config.dir != DMA_MEM_TO_DEV) + return -EINVAL; + + tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring); + + desc_tx = uc->desc_tx; + + cppi5_hdesc_reset_hbdesc(desc_tx); + + cppi5_hdesc_init(desc_tx, + uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0, + uc->config.psd_size); + cppi5_hdesc_set_pktlen(desc_tx, len); + cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len); + cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff); + cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id); + /* pass below information from caller */ + cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type); + cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag); + + flush_dcache_range((unsigned long)dma_src, + ALIGN((unsigned long)dma_src + len, + ARCH_DMA_MINALIGN)); + flush_dcache_range((unsigned long)desc_tx, + ALIGN((unsigned long)desc_tx + uc->config.hdesc_size, + ARCH_DMA_MINALIGN)); + + ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx); + if (ret) { + dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n", + dma->id, ret); + return ret; + } + + udma_poll_completion(uc, &paddr); + + return 0; +} + +static int udma_receive(struct dma *dma, void **dst, void *metadata) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan_config *ucc; + struct cppi5_host_desc_t *desc_rx; + dma_addr_t buf_dma; + struct udma_chan *uc; + u32 buf_dma_len, pkt_len; + u32 port_id = 0; + int ret; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + ucc = &uc->config; + + if (uc->config.dir != DMA_DEV_TO_MEM) + return -EINVAL; + if (!uc->num_rx_bufs) + return -EINVAL; + + ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx); + if (ret && ret != -ENODATA) { + dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret); + return ret; + } else if (ret == -ENODATA) { + return 0; + } + + /* invalidate cache data */ + invalidate_dcache_range((ulong)desc_rx, + (ulong)(desc_rx + ucc->hdesc_size)); + + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); + pkt_len = cppi5_hdesc_get_pktlen(desc_rx); + + /* invalidate cache data */ + invalidate_dcache_range((ulong)buf_dma, + (ulong)(buf_dma + buf_dma_len)); + + cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); + + *dst = (void *)buf_dma; + uc->num_rx_bufs--; + + return pkt_len; +} + +static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args) +{ + struct udma_chan_config *ucc; + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan *uc = &ud->channels[0]; + struct psil_endpoint_config *ep_config; + u32 val; + + for (val = 0; val < ud->ch_count; val++) { + uc = &ud->channels[val]; + if (!uc->in_use) + break; + } + + if (val == ud->ch_count) + return -EBUSY; + + ucc = &uc->config; + ucc->remote_thread_id = args->args[0]; + if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) + ucc->dir = DMA_MEM_TO_DEV; + else + ucc->dir = DMA_DEV_TO_MEM; + + ep_config = psil_get_ep_config(ucc->remote_thread_id); + if (IS_ERR(ep_config)) { + dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", + uc->config.remote_thread_id); + ucc->dir = DMA_MEM_TO_MEM; + ucc->remote_thread_id = -1; + return false; + } + + ucc->pkt_mode = ep_config->pkt_mode; + ucc->channel_tpl = ep_config->channel_tpl; + ucc->notdpkt = ep_config->notdpkt; + ucc->ep_type = ep_config->ep_type; + + if (ud->match_data->type == DMA_TYPE_PKTDMA && + ep_config->mapped_channel_id >= 0) { + ucc->mapped_channel_id = ep_config->mapped_channel_id; + ucc->default_flow_id = ep_config->default_flow_id; + } else { + ucc->mapped_channel_id = -1; + ucc->default_flow_id = -1; + } + + ucc->needs_epib = ep_config->needs_epib; + ucc->psd_size = ep_config->psd_size; + ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size; + + ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib, + ucc->psd_size, 0); + ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN); + + dma->id = uc->id; + pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n", + dma->id, ucc->needs_epib, + ucc->psd_size, ucc->metadata_size, + ucc->remote_thread_id); + + return 0; +} + +int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct cppi5_host_desc_t *desc_rx; + dma_addr_t dma_dst; + struct udma_chan *uc; + u32 desc_num; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + uc = &ud->channels[dma->id]; + + if (uc->config.dir != DMA_DEV_TO_MEM) + return -EINVAL; + + if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM) + return -EINVAL; + + desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM; + desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size); + dma_dst = (dma_addr_t)dst; + + cppi5_hdesc_reset_hbdesc(desc_rx); + + cppi5_hdesc_init(desc_rx, + uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0, + uc->config.psd_size); + cppi5_hdesc_set_pktlen(desc_rx, size); + cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size); + + invalidate_dcache_range((unsigned long)dma_dst, + (unsigned long)(dma_dst + size)); + + flush_dcache_range((unsigned long)desc_rx, + ALIGN((unsigned long)desc_rx + uc->config.hdesc_size, + ARCH_DMA_MINALIGN)); + + udma_push_to_ring(uc->rflow->fd_ring, desc_rx); + + uc->num_rx_bufs++; + uc->desc_rx_cur++; + + return 0; +} + +static int udma_get_cfg(struct dma *dma, u32 id, void **data) +{ + struct udma_dev *ud = dev_get_priv(dma->dev); + struct udma_chan *uc; + + if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) { + dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id); + return -EINVAL; + } + + switch (id) { + case TI_UDMA_CHAN_PRIV_INFO: + uc = &ud->channels[dma->id]; + *data = &uc->cfg_data; + return 0; + } + + return -EINVAL; +} + +static int udma_probe(struct udevice *dev) +{ + struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); + struct udma_dev *ud = dev_get_priv(dev); + int i, ret; + struct udevice *tmp; + struct udevice *tisci_dev = NULL; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + struct udma_chan *uc; + ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev)); + + ud->match_data = (void *)dev_get_driver_data(dev); + ret = udma_get_mmrs(dev); + if (ret) + return ret; + + ud->psil_base = ud->match_data->psil_base; + + ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev, + "ti,sci", &tisci_dev); + if (ret) { + debug("Failed to get TISCI phandle (%d)\n", ret); + tisci_rm->tisci = NULL; + return -EINVAL; + } + tisci_rm->tisci = (struct ti_sci_handle *) + (ti_sci_get_handle_from_sysfw(tisci_dev)); + + tisci_rm->tisci_dev_id = -1; + ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); + return ret; + } + + tisci_rm->tisci_navss_dev_id = -1; + ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id", + &tisci_rm->tisci_navss_dev_id); + if (ret) { + dev_err(dev, "navss sci-dev-id read failure %d\n", ret); + return ret; + } + + tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops; + tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops; + + if (ud->match_data->type == DMA_TYPE_UDMA) { + ret = uclass_get_device_by_phandle(UCLASS_MISC, dev, + "ti,ringacc", &tmp); + ud->ringacc = dev_get_priv(tmp); + } else { + struct k3_ringacc_init_data ring_init_data; + + ring_init_data.tisci = ud->tisci_rm.tisci; + ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id; + if (ud->match_data->type == DMA_TYPE_BCDMA) { + ring_init_data.num_rings = ud->bchan_cnt + + ud->tchan_cnt + + ud->rchan_cnt; + } else { + ring_init_data.num_rings = ud->rflow_cnt + + ud->tflow_cnt; + } + + ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data); + } + if (IS_ERR(ud->ringacc)) + return PTR_ERR(ud->ringacc); + + ud->dev = dev; + ret = setup_resources(ud); + if (ret < 0) + return ret; + + ud->ch_count = ret; + + for (i = 0; i < ud->bchan_cnt; i++) { + struct udma_bchan *bchan = &ud->bchans[i]; + + bchan->id = i; + bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000; + } + + for (i = 0; i < ud->tchan_cnt; i++) { + struct udma_tchan *tchan = &ud->tchans[i]; + + tchan->id = i; + tchan->reg_chan = ud->mmrs[MMR_TCHAN] + UDMA_CH_100(i); + tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i); + } + + for (i = 0; i < ud->rchan_cnt; i++) { + struct udma_rchan *rchan = &ud->rchans[i]; + + rchan->id = i; + rchan->reg_chan = ud->mmrs[MMR_RCHAN] + UDMA_CH_100(i); + rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i); + } + + for (i = 0; i < ud->rflow_cnt; i++) { + struct udma_rflow *rflow = &ud->rflows[i]; + + rflow->id = i; + rflow->reg_rflow = ud->mmrs[MMR_RFLOW] + UDMA_CH_40(i); + } + + for (i = 0; i < ud->ch_count; i++) { + struct udma_chan *uc = &ud->channels[i]; + + uc->ud = ud; + uc->id = i; + uc->config.remote_thread_id = -1; + uc->bchan = NULL; + uc->tchan = NULL; + uc->rchan = NULL; + uc->config.mapped_channel_id = -1; + uc->config.default_flow_id = -1; + uc->config.dir = DMA_MEM_TO_MEM; + sprintf(uc->name, "UDMA chan%d\n", i); + if (!i) + uc->in_use = true; + } + + pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + dev->name, + udma_read(ud->mmrs[MMR_GCFG], 0), + udma_read(ud->mmrs[MMR_GCFG], 0x20), + udma_read(ud->mmrs[MMR_GCFG], 0x24), + udma_read(ud->mmrs[MMR_GCFG], 0x28), + udma_read(ud->mmrs[MMR_GCFG], 0x2c)); + + uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV; + + uc = &ud->channels[0]; + ret = 0; + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + ret = udma_alloc_chan_resources(uc); + break; + case DMA_TYPE_BCDMA: + ret = bcdma_alloc_chan_resources(uc); + break; + default: + break; /* Do nothing in any other case */ + }; + + if (ret) + dev_err(dev, " Channel 0 allocation failure %d\n", ret); + + return ret; +} + +static int udma_remove(struct udevice *dev) +{ + struct udma_dev *ud = dev_get_priv(dev); + struct udma_chan *uc = &ud->channels[0]; + + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + udma_free_chan_resources(uc); + break; + case DMA_TYPE_BCDMA: + bcdma_free_bchan_resources(uc); + break; + default: + break; + }; + + return 0; +} + +static const struct dma_ops udma_ops = { + .transfer = udma_transfer, + .of_xlate = udma_of_xlate, + .request = udma_request, + .rfree = udma_rfree, + .enable = udma_enable, + .disable = udma_disable, + .send = udma_send, + .receive = udma_receive, + .prepare_rcv_buf = udma_prepare_rcv_buf, + .get_cfg = udma_get_cfg, +}; + +static struct udma_match_data am654_main_data = { + .type = DMA_TYPE_UDMA, + .psil_base = 0x1000, + .enable_memcpy_support = true, + .statictr_z_mask = GENMASK(11, 0), + .oes = { + .udma_rchan = 0x200, + }, + .tpl_levels = 2, + .level_start_idx = { + [0] = 8, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static struct udma_match_data am654_mcu_data = { + .type = DMA_TYPE_UDMA, + .psil_base = 0x6000, + .enable_memcpy_support = true, + .statictr_z_mask = GENMASK(11, 0), + .oes = { + .udma_rchan = 0x200, + }, + .tpl_levels = 2, + .level_start_idx = { + [0] = 2, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static struct udma_match_data j721e_main_data = { + .type = DMA_TYPE_UDMA, + .psil_base = 0x1000, + .enable_memcpy_support = true, + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, + .statictr_z_mask = GENMASK(23, 0), + .oes = { + .udma_rchan = 0x400, + }, + .tpl_levels = 3, + .level_start_idx = { + [0] = 16, /* Normal channels */ + [1] = 4, /* High Throughput channels */ + [2] = 0, /* Ultra High Throughput channels */ + }, +}; + +static struct udma_match_data j721e_mcu_data = { + .type = DMA_TYPE_UDMA, + .psil_base = 0x6000, + .enable_memcpy_support = true, + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, + .statictr_z_mask = GENMASK(23, 0), + .oes = { + .udma_rchan = 0x400, + }, + .tpl_levels = 2, + .level_start_idx = { + [0] = 2, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static struct udma_match_data am64_bcdma_data = { + .type = DMA_TYPE_BCDMA, + .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */ + .enable_memcpy_support = true, /* Supported via bchan */ + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, + .statictr_z_mask = GENMASK(23, 0), + .oes = { + .bcdma_bchan_data = 0x2200, + .bcdma_bchan_ring = 0x2400, + .bcdma_tchan_data = 0x2800, + .bcdma_tchan_ring = 0x2a00, + .bcdma_rchan_data = 0x2e00, + .bcdma_rchan_ring = 0x3000, + }, + /* No throughput levels */ +}; + +static struct udma_match_data am64_pktdma_data = { + .type = DMA_TYPE_PKTDMA, + .psil_base = 0x1000, + .enable_memcpy_support = false, + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, + .statictr_z_mask = GENMASK(23, 0), + .oes = { + .pktdma_tchan_flow = 0x1200, + .pktdma_rchan_flow = 0x1600, + }, + /* No throughput levels */ +}; + +static const struct udevice_id udma_ids[] = { + { + .compatible = "ti,am654-navss-main-udmap", + .data = (ulong)&am654_main_data, + }, + { + .compatible = "ti,am654-navss-mcu-udmap", + .data = (ulong)&am654_mcu_data, + }, { + .compatible = "ti,j721e-navss-main-udmap", + .data = (ulong)&j721e_main_data, + }, { + .compatible = "ti,j721e-navss-mcu-udmap", + .data = (ulong)&j721e_mcu_data, + }, + { + .compatible = "ti,am64-dmss-bcdma", + .data = (ulong)&am64_bcdma_data, + }, + { + .compatible = "ti,am64-dmss-pktdma", + .data = (ulong)&am64_pktdma_data, + }, + { /* Sentinel */ }, +}; + +U_BOOT_DRIVER(ti_edma3) = { + .name = "ti-udma", + .id = UCLASS_DMA, + .of_match = udma_ids, + .ops = &udma_ops, + .probe = udma_probe, + .remove = udma_remove, + .priv_auto = sizeof(struct udma_dev), + .flags = DM_FLAG_OS_PREPARE, +}; diff --git a/drivers/dma/xilinx_dpdma.c b/drivers/dma/xilinx_dpdma.c new file mode 100644 index 00000000000..1d615ec2838 --- /dev/null +++ b/drivers/dma/xilinx_dpdma.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Xilinx Inc. + */ + +#include <cpu_func.h> +#include <dm.h> +#include <dma.h> +#include <dma-uclass.h> +#include <errno.h> +#include <dm/device_compat.h> + +/** + * struct zynqmp_dpdma_priv - Private structure + * @dev: Device uclass for video_ops + */ +struct zynqmp_dpdma_priv { + struct udevice *dev; +}; + +static int zynqmp_dpdma_probe(struct udevice *dev) +{ + /* Only placeholder for power domain driver */ + return 0; +} + +static const struct dma_ops zynqmp_dpdma_ops = { +}; + +static const struct udevice_id zynqmp_dpdma_ids[] = { + { .compatible = "xlnx,zynqmp-dpdma" }, + { } +}; + +U_BOOT_DRIVER(zynqmp_dpdma) = { + .name = "zynqmp_dpdma", + .id = UCLASS_DMA, + .of_match = zynqmp_dpdma_ids, + .ops = &zynqmp_dpdma_ops, + .probe = zynqmp_dpdma_probe, + .priv_auto = sizeof(struct zynqmp_dpdma_priv), +}; |