summaryrefslogtreecommitdiff
path: root/drivers/soc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c1
-rw-r--r--drivers/soc/ti/Kconfig42
-rw-r--r--drivers/soc/ti/Makefile2
-rw-r--r--drivers/soc/ti/k3-ringacc.c460
-rw-r--r--drivers/soc/ti/k3-socinfo.c5
-rw-r--r--drivers/soc/ti/keystone_dsp_mem.c401
-rw-r--r--drivers/soc/ti/knav_dma.c3
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c65
-rw-r--r--drivers/soc/ti/pm33xx.c4
-rw-r--r--drivers/soc/ti/pruss.c359
-rw-r--r--drivers/soc/ti/ti-pat.c670
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c12
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c214
13 files changed, 2063 insertions, 175 deletions
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index e5c68051fb17..ad9f28dc13f1 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -276,6 +276,7 @@ struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rp
strscpy(chinfo.name, name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
+ chinfo.desc[0] = '\0';
return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo);
}
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index f5b82ffa637b..fe4a1bb292ea 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -1,22 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-# 64-bit ARM SoCs from TI
-if ARM64
-
-if ARCH_K3
-
-config ARCH_K3_AM6_SOC
- bool "K3 AM6 SoC"
- help
- Enable support for TI's AM6 SoC Family support
-
-config ARCH_K3_J721E_SOC
- bool "K3 J721E SoC"
- help
- Enable support for TI's J721E SoC Family support
-
-endif
-
-endif
#
# TI SOC drivers
@@ -26,6 +8,17 @@ menuconfig SOC_TI
if SOC_TI
+config KEYSTONE_DSP_MEM
+ tristate "TI Keystone DSP Memory Mapping Driver"
+ depends on ARCH_KEYSTONE
+ help
+ Userspace memory mapping interface driver for TI Keystone SoCs.
+ Provides access to MSM SRAM memory regions and dedicated DDR
+ carveout memory regions to user space to aid userspace loading
+ of the DSPs within the SoC.
+
+ If unsure, say N.
+
config KEYSTONE_NAVIGATOR_QMSS
tristate "Keystone Queue Manager Sub System"
depends on ARCH_KEYSTONE
@@ -81,7 +74,7 @@ config TI_SCI_PM_DOMAINS
rootfs may be available.
config TI_K3_RINGACC
- bool "K3 Ring accelerator Sub System"
+ tristate "K3 Ring accelerator Sub System"
depends on ARCH_K3 || COMPILE_TEST
depends on TI_SCI_INTA_IRQCHIP
help
@@ -112,6 +105,17 @@ config TI_PRUSS
processors on various TI SoCs. It's safe to say N here if you're
not interested in the PRU or if you are unsure.
+config TI_PAT
+ tristate "TI PAT DMA-BUF exporter"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on ARCH_DMA_ADDR_T_64BIT
+ select REGMAP
+ help
+ Driver for TI Page-based Address Translator (PAT). This driver
+ provides the an API allowing the remapping of a non-contiguous
+ DMA-BUF into a contiguous one that is sutable for devices needing
+ contiguous memory.
+
endif # SOC_TI
config TI_SCI_INTA_MSI_DOMAIN
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index cc3c972fad2e..3cd6331328a4 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -2,6 +2,7 @@
#
# TI Keystone SOC drivers
#
+obj-$(CONFIG_KEYSTONE_DSP_MEM) += keystone_dsp_mem.o
obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o
knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o
obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
@@ -14,3 +15,4 @@ obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
obj-$(CONFIG_TI_K3_SOCINFO) += k3-socinfo.o
obj-$(CONFIG_TI_PRUSS) += pruss.o
obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
+obj-$(CONFIG_TI_PAT) += ti-pat.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 1147dc4c1d59..a5bf90646337 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -7,10 +7,12 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
+#include <linux/dma/ti-cppi5.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <linux/soc/ti/ti_sci_inta_msi.h>
@@ -21,6 +23,7 @@ static LIST_HEAD(k3_ringacc_list);
static DEFINE_MUTEX(k3_ringacc_list_lock);
#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
+#define K3_DMARING_CFG_RING_SIZE_ELCNT_MASK GENMASK(15, 0)
/**
* struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
@@ -43,7 +46,13 @@ struct k3_ring_rt_regs {
u32 hwindx;
};
-#define K3_RINGACC_RT_REGS_STEP 0x1000
+#define K3_RINGACC_RT_REGS_STEP 0x1000
+#define K3_DMARING_RT_REGS_STEP 0x2000
+#define K3_DMARING_RT_REGS_REVERSE_OFS 0x1000
+#define K3_RINGACC_RT_OCC_MASK GENMASK(20, 0)
+#define K3_DMARING_RT_OCC_TDOWN_COMPLETE BIT(31)
+#define K3_DMARING_RT_DB_ENTRY_MASK GENMASK(7, 0)
+#define K3_DMARING_RT_DB_TDOWN_ACK BIT(31)
/**
* struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
@@ -122,6 +131,7 @@ struct k3_ring_state {
u32 occ;
u32 windex;
u32 rindex;
+ u32 tdown_complete:1;
};
/**
@@ -137,10 +147,13 @@ struct k3_ring_state {
* @elm_size: Size of the ring element
* @mode: Ring mode
* @flags: flags
+ * @state: Ring state
* @ring_id: Ring Id
* @parent: Pointer on struct @k3_ringacc
* @use_count: Use count for shared rings
* @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ * @dma_dev: device to be used for DMA API (allocation, mapping)
+ * @asel: Address Space Select value for physical addresses
*/
struct k3_ring {
struct k3_ring_rt_regs __iomem *rt;
@@ -155,11 +168,15 @@ struct k3_ring {
u32 flags;
#define K3_RING_FLAG_BUSY BIT(1)
#define K3_RING_FLAG_SHARED BIT(2)
+#define K3_RING_FLAG_REVERSE BIT(3)
struct k3_ring_state state;
u32 ring_id;
struct k3_ringacc *parent;
u32 use_count;
int proxy_id;
+ struct device *dma_dev;
+ u32 asel;
+#define K3_ADDRESS_ASEL_SHIFT 48
};
struct k3_ringacc_ops {
@@ -185,6 +202,7 @@ struct k3_ringacc_ops {
* @tisci_ring_ops: ti-sci rings ops
* @tisci_dev_id: ti-sci device id
* @ops: SoC specific ringacc operation
+ * @dma_rings: indicate DMA ring (dual ring within BCDMA/PKTDMA)
*/
struct k3_ringacc {
struct device *dev;
@@ -207,6 +225,7 @@ struct k3_ringacc {
u32 tisci_dev_id;
const struct k3_ringacc_ops *ops;
+ bool dma_rings;
};
/**
@@ -218,6 +237,21 @@ struct k3_ringacc_soc_data {
unsigned dma_ring_reset_quirk:1;
};
+static int k3_ringacc_ring_read_occ(struct k3_ring *ring)
+{
+ return readl(&ring->rt->occ) & K3_RINGACC_RT_OCC_MASK;
+}
+
+static void k3_ringacc_ring_update_occ(struct k3_ring *ring)
+{
+ u32 val;
+
+ val = readl(&ring->rt->occ);
+
+ ring->state.occ = val & K3_RINGACC_RT_OCC_MASK;
+ ring->state.tdown_complete = !!(val & K3_DMARING_RT_OCC_TDOWN_COMPLETE);
+}
+
static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
{
return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
@@ -231,12 +265,24 @@ static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem);
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem);
static struct k3_ring_ops k3_ring_mode_ring_ops = {
.push_tail = k3_ringacc_ring_push_mem,
.pop_head = k3_ringacc_ring_pop_mem,
};
+static struct k3_ring_ops k3_dmaring_fwd_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_dmaring_fwd_pop,
+};
+
+static struct k3_ring_ops k3_dmaring_reverse_ops = {
+ /* Reverse side of the DMA ring can only be popped by SW */
+ .pop_head = k3_dmaring_reverse_pop,
+};
+
static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
@@ -290,6 +336,9 @@ struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
mutex_lock(&ringacc->req_lock);
+ if (!try_module_get(ringacc->dev->driver->owner))
+ goto err_module_get;
+
if (id == K3_RINGACC_RING_ID_ANY) {
/* Request for any general purpose ring */
struct ti_sci_resource_desc *gp_rings =
@@ -334,11 +383,48 @@ out:
return &ringacc->rings[id];
error:
+ module_put(ringacc->dev->driver->owner);
+
+err_module_get:
mutex_unlock(&ringacc->req_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+static int k3_dmaring_request_dual_ring(struct k3_ringacc *ringacc, int fwd_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring)
+{
+ int ret = 0;
+
+ /*
+ * DMA rings must be requested by ID, completion ring is the reverse
+ * side of the forward ring
+ */
+ if (fwd_id < 0)
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (test_bit(fwd_id, ringacc->rings_inuse)) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ *fwd_ring = &ringacc->rings[fwd_id];
+ *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings];
+ set_bit(fwd_id, ringacc->rings_inuse);
+ ringacc->rings[fwd_id].use_count++;
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", fwd_id);
+
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+
+error:
+ mutex_unlock(&ringacc->req_lock);
+ return ret;
+}
+
int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
int fwd_id, int compl_id,
struct k3_ring **fwd_ring,
@@ -349,6 +435,10 @@ int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
if (!fwd_ring || !compl_ring)
return -EINVAL;
+ if (ringacc->dma_rings)
+ return k3_dmaring_request_dual_ring(ringacc, fwd_id,
+ fwd_ring, compl_ring);
+
*fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
if (!(*fwd_ring))
return -ENODEV;
@@ -365,20 +455,16 @@ EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- ring->size,
- 0,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID;
+ ring_cfg.count = ring->size;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -398,20 +484,16 @@ EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
enum k3_ring_mode mode)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- 0,
- mode,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_MODE_VALID;
+ ring_cfg.mode = mode;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -426,7 +508,7 @@ void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
goto reset;
if (!occ)
- occ = readl(&ring->rt->occ);
+ occ = k3_ringacc_ring_read_occ(ring);
if (occ) {
u32 db_ring_cnt, db_ring_cnt_cur;
@@ -478,20 +560,15 @@ EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -506,6 +583,13 @@ int k3_ringacc_ring_free(struct k3_ring *ring)
ringacc = ring->parent;
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
if (!test_bit(ring->ring_id, ringacc->rings_inuse))
@@ -521,11 +605,14 @@ int k3_ringacc_ring_free(struct k3_ring *ring)
k3_ringacc_ring_free_sci(ring);
- dma_free_coherent(ringacc->dev,
+ dma_free_coherent(ring->dma_dev,
ring->size * (4 << ring->elm_size),
ring->ring_mem_virt, ring->ring_mem_dma);
ring->flags = 0;
ring->ops = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
+
if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
clear_bit(ring->proxy_id, ringacc->proxy_inuse);
ring->proxy = NULL;
@@ -535,6 +622,8 @@ int k3_ringacc_ring_free(struct k3_ring *ring)
no_init:
clear_bit(ring->ring_id, ringacc->rings_inuse);
+ module_put(ringacc->dev->driver->owner);
+
out:
mutex_unlock(&ringacc->req_lock);
return 0;
@@ -575,29 +664,112 @@ EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
- u32 ring_idx;
int ret;
if (!ringacc->tisci)
return -EINVAL;
- ring_idx = ring->ring_id;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
- ringacc->tisci_dev_id,
- ring_idx,
- lower_32_bits(ring->ring_mem_dma),
- upper_32_bits(ring->ring_mem_dma),
- ring->size,
- ring->mode,
- ring->elm_size,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+ ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma);
+ ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma);
+ ring_cfg.count = ring->size;
+ ring_cfg.mode = ring->mode;
+ ring_cfg.size = ring->elm_size;
+ ring_cfg.asel = ring->asel;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
- ret, ring_idx);
+ ret, ring->ring_id);
+
+ return ret;
+}
+
+static int k3_dmaring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc;
+ struct k3_ring *reverse_ring;
+ int ret = 0;
+
+ if (cfg->elm_size != K3_RINGACC_RING_ELSIZE_8 ||
+ cfg->mode != K3_RINGACC_RING_MODE_RING ||
+ cfg->size & ~K3_DMARING_CFG_RING_SIZE_ELCNT_MASK)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ ring->asel = cfg->asel;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev) {
+ dev_warn(ringacc->dev, "dma_dev is not provided for ring%d\n",
+ ring->ring_id);
+ ring->dma_dev = ringacc->dev;
+ }
+
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ ring->ops = &k3_dmaring_fwd_ops;
+
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+
+ k3_ringacc_ring_dump(ring);
+
+ /* DMA rings: configure reverse ring */
+ reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings];
+ reverse_ring->size = cfg->size;
+ reverse_ring->elm_size = cfg->elm_size;
+ reverse_ring->mode = cfg->mode;
+ reverse_ring->asel = cfg->asel;
+ memset(&reverse_ring->state, 0, sizeof(reverse_ring->state));
+ reverse_ring->ops = &k3_dmaring_reverse_ops;
+
+ reverse_ring->ring_mem_virt = ring->ring_mem_virt;
+ reverse_ring->ring_mem_dma = ring->ring_mem_dma;
+ reverse_ring->flags |= K3_RING_FLAG_BUSY;
+ k3_ringacc_ring_dump(reverse_ring);
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+ ring->proxy = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
return ret;
}
@@ -608,8 +780,12 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
if (!ring || !cfg)
return -EINVAL;
+
ringacc = ring->parent;
+ if (ringacc->dma_rings)
+ return k3_dmaring_cfg(ring, cfg);
+
if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
@@ -648,8 +824,12 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
switch (ring->mode) {
case K3_RINGACC_RING_MODE_RING:
ring->ops = &k3_ring_mode_ring_ops;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev)
+ ring->dma_dev = ringacc->dev;
break;
case K3_RINGACC_RING_MODE_MESSAGE:
+ ring->dma_dev = ringacc->dev;
if (ring->proxy)
ring->ops = &k3_ring_mode_proxy_ops;
else
@@ -661,9 +841,9 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
goto err_free_proxy;
}
- ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
- ring->size * (4 << ring->elm_size),
- &ring->ring_mem_dma, GFP_KERNEL);
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
if (!ring->ring_mem_virt) {
dev_err(ringacc->dev, "Failed to alloc ring mem\n");
ret = -ENOMEM;
@@ -684,12 +864,13 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
return 0;
err_free_mem:
- dma_free_coherent(ringacc->dev,
+ dma_free_coherent(ring->dma_dev,
ring->size * (4 << ring->elm_size),
ring->ring_mem_virt,
ring->ring_mem_dma);
err_free_ops:
ring->ops = NULL;
+ ring->dma_dev = NULL;
err_free_proxy:
ring->proxy = NULL;
return ret;
@@ -711,7 +892,7 @@ u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
return -EINVAL;
if (!ring->state.free)
- ring->state.free = ring->size - readl(&ring->rt->occ);
+ ring->state.free = ring->size - k3_ringacc_ring_read_occ(ring);
return ring->state.free;
}
@@ -722,7 +903,7 @@ u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- return readl(&ring->rt->occ);
+ return k3_ringacc_ring_read_occ(ring);
}
EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
@@ -898,6 +1079,72 @@ static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
K3_RINGACC_ACCESS_MODE_POP_HEAD);
}
+/*
+ * The element is 48 bits of address + ASEL bits in the ring.
+ * ASEL is used by the DMAs and should be removed for the kernel as it is not
+ * part of the physical memory address.
+ */
+static void k3_dmaring_remove_asel_from_elem(u64 *elem)
+{
+ *elem &= GENMASK_ULL(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+ u32 elem_idx;
+
+ /*
+ * DMA rings: forward ring is always tied DMA channel and HW does not
+ * maintain any state data required for POP operation and its unknown
+ * how much elements were consumed by HW. So, to actually
+ * do POP, the read pointer has to be recalculated every time.
+ */
+ ring->state.occ = k3_ringacc_ring_read_occ(ring);
+ if (ring->state.windex >= ring->state.occ)
+ elem_idx = ring->state.windex - ring->state.occ;
+ else
+ elem_idx = ring->size - (ring->state.occ - ring->state.windex);
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, elem_idx);
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "%s: occ%d Windex%d Rindex%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.windex, elem_idx,
+ elem_ptr);
+ return 0;
+}
+
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
+
+ if (ring->state.occ) {
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.rindex = (ring->state.rindex + 1) % ring->size;
+ ring->state.occ--;
+ writel(-1 & K3_DMARING_RT_DB_ENTRY_MASK, &ring->rt->db);
+ } else if (ring->state.tdown_complete) {
+ dma_addr_t *value = elem;
+
+ *value = CPPI5_TDCM_MARKER;
+ writel(K3_DMARING_RT_DB_TDOWN_ACK, &ring->rt->db);
+ ring->state.tdown_complete = false;
+ }
+
+ dev_dbg(ring->parent->dev, "%s: occ%d index%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.rindex, elem_ptr);
+ return 0;
+}
+
static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
{
void *elem_ptr;
@@ -905,6 +1152,11 @@ static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
memcpy(elem_ptr, elem, (4 << ring->elm_size));
+ if (ring->parent->dma_rings) {
+ u64 *addr = elem_ptr;
+
+ *addr |= ((u64)ring->asel << K3_ADDRESS_ASEL_SHIFT);
+ }
ring->state.windex = (ring->state.windex + 1) % ring->size;
ring->state.free--;
@@ -981,12 +1233,12 @@ int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
return -EINVAL;
if (!ring->state.occ)
- ring->state.occ = k3_ringacc_ring_get_occ(ring);
+ k3_ringacc_ring_update_occ(ring);
dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
ring->state.rindex);
- if (!ring->state.occ)
+ if (!ring->state.occ && !ring->state.tdown_complete)
return -ENODATA;
if (ring->ops && ring->ops->pop_head)
@@ -1004,7 +1256,7 @@ int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
return -EINVAL;
if (!ring->state.occ)
- ring->state.occ = k3_ringacc_ring_get_occ(ring);
+ k3_ringacc_ring_update_occ(ring);
dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
ring->state.occ, ring->state.rindex);
@@ -1115,7 +1367,7 @@ static int k3_ringacc_init(struct platform_device *pdev,
dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
if (!dev->msi_domain) {
- dev_err(dev, "Failed to get MSI domain\n");
+ dev_dbg(dev, "Failed to get MSI domain\n");
return -EPROBE_DEFER;
}
@@ -1208,19 +1460,80 @@ static const struct of_device_id k3_ringacc_of_match[] = {
{ .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, },
{},
};
+MODULE_DEVICE_TABLE(of, k3_ringacc_of_match);
+
+struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev,
+ struct k3_ringacc_init_data *data)
+{
+ struct device *dev = &pdev->dev;
+ struct k3_ringacc *ringacc;
+ void __iomem *base_rt;
+ struct resource *res;
+ int i;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return ERR_PTR(-ENOMEM);
+
+ ringacc->dev = dev;
+ ringacc->dma_rings = true;
+ ringacc->num_rings = data->num_rings;
+ ringacc->tisci = data->tisci;
+ ringacc->tisci_dev_id = data->tisci_dev_id;
+
+ mutex_init(&ringacc->req_lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ringrt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+ return ERR_CAST(base_rt);
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings * 2,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_rings),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ struct k3_ring *ring = &ringacc->rings[i];
+
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ ring = &ringacc->rings[ringacc->num_rings + i];
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i +
+ K3_DMARING_RT_REGS_REVERSE_OFS;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ ring->flags = K3_RING_FLAG_REVERSE;
+ }
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ dev_info(dev, "Number of rings: %u\n", ringacc->num_rings);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_dmarings_init);
static int k3_ringacc_probe(struct platform_device *pdev)
{
const struct ringacc_match_data *match_data;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct k3_ringacc *ringacc;
int ret;
- match = of_match_node(k3_ringacc_of_match, dev->of_node);
- if (!match)
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
return -ENODEV;
- match_data = match->data;
ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
if (!ringacc)
@@ -1243,12 +1556,27 @@ static int k3_ringacc_probe(struct platform_device *pdev)
return 0;
}
+static int k3_ringacc_remove(struct platform_device *pdev)
+{
+ struct k3_ringacc *ringacc = dev_get_drvdata(&pdev->dev);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_del(&ringacc->list);
+ mutex_unlock(&k3_ringacc_list_lock);
+ return 0;
+}
+
static struct platform_driver k3_ringacc_driver = {
.probe = k3_ringacc_probe,
+ .remove = k3_ringacc_remove,
.driver = {
.name = "k3-ringacc",
.of_match_table = k3_ringacc_of_match,
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver(k3_ringacc_driver);
+module_platform_driver(k3_ringacc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI Ringacc driver for K3 SOCs");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index bbbc2d2b7091..ad97e08a25f6 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -40,6 +40,11 @@ static const struct k3_soc_id {
{ 0xBB5A, "AM65X" },
{ 0xBB64, "J721E" },
{ 0xBB6D, "J7200" },
+ { 0xBB38, "AM64X" },
+ { 0xBB75, "J721S2"},
+ { 0xBB7E, "AM62X" },
+ { 0xBB80, "J784S4" },
+ { 0xBB8D, "AM62AX" },
};
static int
diff --git a/drivers/soc/ti/keystone_dsp_mem.c b/drivers/soc/ti/keystone_dsp_mem.c
new file mode 100644
index 000000000000..b102411bf660
--- /dev/null
+++ b/drivers/soc/ti/keystone_dsp_mem.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI Keystone DSP Memory Mapping Driver
+ *
+ * Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <uapi/linux/keystone_dsp_mem.h>
+
+#define KEYSTONE_ALIAS_PHYS_START 0x80000000ULL
+#define KEYSTONE_ALIAS_PHYS_SIZE 0x80000000ULL /* 2G */
+
+#define KEYSTONE_HIGH_PHYS_START 0x800000000ULL
+#define KEYSTONE_HIGH_PHYS_LIMIT (KEYSTONE_HIGH_PHYS_START + \
+ KEYSTONE_ALIAS_PHYS_SIZE)
+
+#define to_alias_addr(addr) (((addr) - KEYSTONE_HIGH_PHYS_START) + \
+ KEYSTONE_ALIAS_PHYS_START)
+
+/**
+ * struct keystone_dsp_mem - internal memory structure
+ * @addr: physical address on the bus to access the memory region
+ * @size: size of the memory region
+ * @kobj: kobject for the sysfs directory file
+ */
+struct keystone_dsp_mem {
+ phys_addr_t addr;
+ resource_size_t size;
+ struct kobject kobj;
+};
+
+#define to_dsp_mem(obj) container_of(obj, struct keystone_dsp_mem, kobj)
+
+/**
+ * struct keystone_dsp_mem_info - Keystone DSP Memory device structure
+ * @misc: child miscdevice structure
+ * @mem: memory region array pointer
+ * @num_maps: number of memory regions
+ */
+struct keystone_dsp_mem_info {
+ struct miscdevice misc;
+ struct keystone_dsp_mem *mem;
+ int num_maps;
+};
+
+static struct keystone_dsp_mem_info *dsp_mem;
+
+#define to_dsp_mem_info(m) container_of(m, struct keystone_dsp_mem_info, misc)
+
+static ssize_t mem_addr_show(struct keystone_dsp_mem *mem, char *buf)
+{
+ return sprintf(buf, "%pa\n", &mem->addr);
+}
+
+static ssize_t mem_size_show(struct keystone_dsp_mem *mem, char *buf)
+{
+ return sprintf(buf, "%pa\n", &mem->size);
+}
+
+struct mem_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct keystone_dsp_mem *mem, char *buf);
+ ssize_t (*store)(struct keystone_dsp_mem *mem, const char *buf,
+ size_t len);
+};
+
+static struct mem_sysfs_entry addr_attribute =
+ __ATTR(addr, 0444, mem_addr_show, NULL);
+static struct mem_sysfs_entry size_attribute =
+ __ATTR(size, 0444, mem_size_show, NULL);
+
+static struct attribute *attrs[] = {
+ &addr_attribute.attr,
+ &size_attribute.attr,
+ NULL, /* sentinel */
+};
+
+static ssize_t mem_type_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct keystone_dsp_mem *mem = to_dsp_mem(kobj);
+ struct mem_sysfs_entry *entry;
+
+ entry = container_of(attr, struct mem_sysfs_entry, attr);
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(mem, buf);
+}
+
+static const struct sysfs_ops mem_sysfs_ops = {
+ .show = mem_type_show,
+};
+
+static struct kobj_type mem_attr_type = {
+ .sysfs_ops = &mem_sysfs_ops,
+ .default_attrs = attrs,
+};
+
+static int keystone_dsp_mem_add_attrs(struct keystone_dsp_mem_info *dsp_mem)
+{
+ int i, ret;
+ struct keystone_dsp_mem *mem;
+ struct kobject *kobj_parent = &dsp_mem->misc.this_device->kobj;
+
+ for (i = 0; i < dsp_mem->num_maps; i++) {
+ mem = &dsp_mem->mem[i];
+ kobject_init(&mem->kobj, &mem_attr_type);
+ ret = kobject_add(&mem->kobj, kobj_parent, "memory%d", i);
+ if (ret)
+ goto err_kobj;
+ ret = kobject_uevent(&mem->kobj, KOBJ_ADD);
+ if (ret)
+ goto err_kobj;
+ }
+
+ return 0;
+
+err_kobj:
+ for (; i >= 0; i--) {
+ mem = &dsp_mem->mem[i];
+ kobject_put(&mem->kobj);
+ }
+ return ret;
+}
+
+static void keystone_dsp_mem_del_attrs(struct keystone_dsp_mem_info *dsp_mem)
+{
+ int i;
+ struct keystone_dsp_mem *mem;
+
+ for (i = 0; i < dsp_mem->num_maps; i++) {
+ mem = &dsp_mem->mem[i];
+ kobject_put(&mem->kobj);
+ }
+}
+
+static int keystone_dsp_mem_check_addr(struct keystone_dsp_mem_info *dsp_mem,
+ int mask, size_t size)
+{
+ size_t req_offset;
+ u32 index;
+
+ index = mask & KEYSTONE_DSP_MEM_MAP_INDEX_MASK;
+ if (index >= dsp_mem->num_maps) {
+ pr_err("%s: invalid mmap region index %d\n", __func__, index);
+ return -EINVAL;
+ }
+
+ req_offset = (mask - index) << PAGE_SHIFT;
+ if (req_offset + size < req_offset) {
+ pr_err("%s: invalid request - overflow, mmap offset = 0x%zx size 0x%zx region %d\n",
+ __func__, req_offset, size, index);
+ return -EINVAL;
+ }
+
+ if ((req_offset + size) > dsp_mem->mem[index].size) {
+ pr_err("%s: invalid request - out of range, mmap offset 0x%zx size 0x%zx region %d\n",
+ __func__, req_offset, size, index);
+ return -EINVAL;
+ }
+
+ return index;
+}
+
+/*
+ * This is a custom mmap function following semantics based on the UIO
+ * mmap implementation. The vm_pgoff passed in the vma structure is a
+ * combination of the memory region index and the actual page offset in
+ * that region. This checks if user request is in valid range before
+ * providing mmap access.
+ *
+ * XXX: Evaluate this approach, as the MSMC memory can be mapped in whole
+ * into userspace as it is not super-large, and the allowable kernel
+ * unmapped DDR memory can be mmaped using traditional mmap semantics.
+ */
+static int keystone_dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ size_t size = vma->vm_end - vma->vm_start;
+ struct miscdevice *misc = file->private_data;
+ struct keystone_dsp_mem_info *dsp_mem = to_dsp_mem_info(misc);
+ int index;
+
+ index = keystone_dsp_mem_check_addr(dsp_mem, vma->vm_pgoff, size);
+ if (index < 0)
+ return index;
+
+ vma->vm_page_prot =
+ phys_mem_access_prot(file,
+ (dsp_mem->mem[index].addr >> PAGE_SHIFT) +
+ (vma->vm_pgoff - index), size,
+ vma->vm_page_prot);
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ (dsp_mem->mem[index].addr >> PAGE_SHIFT) +
+ (vma->vm_pgoff - index), size, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static const struct file_operations keystone_dsp_mem_fops = {
+ .owner = THIS_MODULE,
+ .mmap = keystone_dsp_mem_mmap,
+};
+
+static int keystone_dsp_mem_parse(struct device_node *np, int index)
+{
+ phys_addr_t start, end, addr, size;
+ struct resource res;
+ resource_size_t rsize;
+ int ret, j;
+
+ if (!of_find_property(np, "no-map", NULL)) {
+ pr_err("dsp reserved memory regions without no-map are not supported\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+
+ /* make sure only aliased addresses are covered */
+ rsize = resource_size(&res);
+ start = res.start;
+ end = res.start + rsize;
+ if (start < KEYSTONE_HIGH_PHYS_START ||
+ start >= KEYSTONE_HIGH_PHYS_LIMIT ||
+ end > KEYSTONE_HIGH_PHYS_LIMIT) {
+ pr_err("invalid address/size for keystone dsp memory carveout: %pa of size %pa\n",
+ &start, &rsize);
+ return -EINVAL;
+ }
+
+ /* check for overlaps */
+ start = to_alias_addr(start);
+ end = to_alias_addr(end);
+ for (j = 0; j < index; j++) {
+ addr = dsp_mem->mem[j].addr;
+ size = dsp_mem->mem[j].size;
+ if ((end > addr && end <= addr + size) ||
+ (start >= addr && start < addr + size) ||
+ (start < addr && end > addr + size)) {
+ pr_err("dsp memory carveout (%pa of size %pa) overlaps with (%pa of size %pa)\n",
+ &start, &rsize, &addr, &size);
+ return -EINVAL;
+ }
+ }
+
+ dsp_mem->mem[index].addr = to_alias_addr(res.start);
+ dsp_mem->mem[index].size = resource_size(&res);
+
+ return 0;
+}
+
+static int keystone_dsp_mem_init(void)
+{
+ struct miscdevice *misc;
+ struct resource res;
+ struct device_node *rmem_np, *sram_np, *np;
+ int ret, i = 0;
+ int num_maps = 0, num_sram = 0;
+
+ if (!of_have_populated_dt())
+ return -EOPNOTSUPP;
+
+ /* module is supported only on TI Keystone SoCs */
+ if (!of_machine_is_compatible("ti,keystone"))
+ return -EOPNOTSUPP;
+
+ /* count the number of DDR regions */
+ rmem_np = of_find_node_by_path("/reserved-memory");
+ if (rmem_np) {
+ for_each_available_child_of_node(rmem_np, np) {
+ if (of_device_is_compatible(np,
+ "ti,keystone-dsp-mem-pool"))
+ num_maps++;
+ }
+ }
+
+ for_each_compatible_node(sram_np, NULL, "ti,keystone-dsp-msm-ram") {
+ if (!of_device_is_available(sram_np))
+ continue;
+ num_sram++;
+ }
+
+ if ((!num_maps && !num_sram) ||
+ (num_maps + num_sram > KEYSTONE_DSP_MEM_MAP_INDEX_MASK)) {
+ ret = -EINVAL;
+ goto put_rmem;
+ }
+
+ dsp_mem = kzalloc(sizeof(*dsp_mem), GFP_KERNEL);
+ if (!dsp_mem) {
+ ret = -ENOMEM;
+ goto put_rmem;
+ }
+
+ dsp_mem->mem = kcalloc(num_maps + num_sram, sizeof(*dsp_mem->mem),
+ GFP_KERNEL);
+ if (!dsp_mem->mem) {
+ ret = -ENOMEM;
+ goto free_dsp;
+ }
+
+ /* handle reserved-memory carveouts */
+ if (num_maps) {
+ for_each_available_child_of_node(rmem_np, np) {
+ if (!of_device_is_compatible(np, "ti,keystone-dsp-mem-pool"))
+ continue;
+
+ ret = keystone_dsp_mem_parse(np, i);
+ if (ret) {
+ of_node_put(np);
+ goto free_mem;
+ }
+ i++;
+ dsp_mem->num_maps++;
+ }
+ }
+
+ /* handle on-chip SRAM reserved regions */
+ if (num_sram) {
+ for_each_compatible_node(sram_np, NULL,
+ "ti,keystone-dsp-msm-ram") {
+ if (!of_device_is_available(sram_np))
+ continue;
+
+ ret = of_address_to_resource(sram_np, 0, &res);
+ if (ret) {
+ ret = -EINVAL;
+ of_node_put(sram_np);
+ goto free_mem;
+ }
+ dsp_mem->mem[i].addr = res.start;
+ dsp_mem->mem[i].size = resource_size(&res);
+ i++;
+ dsp_mem->num_maps++;
+ }
+ }
+
+ misc = &dsp_mem->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = "dspmem";
+ misc->fops = &keystone_dsp_mem_fops;
+ misc->parent = NULL;
+ ret = misc_register(misc);
+ if (ret) {
+ pr_err("%s: could not register dspmem misc device\n", __func__);
+ goto free_mem;
+ }
+
+ ret = keystone_dsp_mem_add_attrs(dsp_mem);
+ if (ret) {
+ pr_err("%s: error creating sysfs files (%d)\n", __func__, ret);
+ goto unregister_misc;
+ }
+ of_node_put(rmem_np);
+
+ pr_info("registered dspmem misc device\n");
+
+ return 0;
+
+unregister_misc:
+ misc_deregister(&dsp_mem->misc);
+free_mem:
+ kfree(dsp_mem->mem);
+free_dsp:
+ kfree(dsp_mem);
+ dsp_mem = NULL;
+put_rmem:
+ of_node_put(rmem_np);
+ return ret;
+}
+
+static void keystone_dsp_mem_exit(void)
+{
+ keystone_dsp_mem_del_attrs(dsp_mem);
+
+ misc_deregister(&dsp_mem->misc);
+
+ kfree(dsp_mem->mem);
+ kfree(dsp_mem);
+ dsp_mem = NULL;
+}
+
+module_init(keystone_dsp_mem_init);
+module_exit(keystone_dsp_mem_exit);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI Keystone DSP Memory Mapping Driver");
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 56597f6ea666..591d14ebcb11 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -500,7 +500,7 @@ EXPORT_SYMBOL_GPL(knav_dma_open_channel);
/**
* knav_dma_close_channel() - Destroy a dma channel
*
- * channel: dma channel handle
+ * @channel: dma channel handle
*
*/
void knav_dma_close_channel(void *channel)
@@ -758,6 +758,7 @@ static int knav_dma_probe(struct platform_device *pdev)
for_each_child_of_node(node, child) {
ret = dma_init(node, child);
if (ret) {
+ of_node_put(child);
dev_err(&pdev->dev, "init failed with %d\n", ret);
break;
}
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 20c84741639e..52389859395c 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
/**
* knav_queue_notify: qmss queue notfier call
*
- * @inst: qmss queue instance like accumulator
+ * @inst: - qmss queue instance like accumulator
*/
void knav_queue_notify(struct knav_queue_inst *inst)
{
@@ -511,10 +511,10 @@ static int knav_queue_flush(struct knav_queue *qh)
/**
* knav_queue_open() - open a hardware queue
- * @name - name to give the queue handle
- * @id - desired queue number if any or specifes the type
+ * @name: - name to give the queue handle
+ * @id: - desired queue number if any or specifes the type
* of queue
- * @flags - the following flags are applicable to queues:
+ * @flags: - the following flags are applicable to queues:
* KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
* exclusive by default.
* Subsequent attempts to open a shared queue should
@@ -545,7 +545,7 @@ EXPORT_SYMBOL_GPL(knav_queue_open);
/**
* knav_queue_close() - close a hardware queue handle
- * @qh - handle to close
+ * @qhandle: - handle to close
*/
void knav_queue_close(void *qhandle)
{
@@ -572,9 +572,9 @@ EXPORT_SYMBOL_GPL(knav_queue_close);
/**
* knav_queue_device_control() - Perform control operations on a queue
- * @qh - queue handle
- * @cmd - control commands
- * @arg - command argument
+ * @qhandle: - queue handle
+ * @cmd: - control commands
+ * @arg: - command argument
*
* Returns 0 on success, errno otherwise.
*/
@@ -623,10 +623,10 @@ EXPORT_SYMBOL_GPL(knav_queue_device_control);
/**
* knav_queue_push() - push data (or descriptor) to the tail of a queue
- * @qh - hardware queue handle
- * @data - data to push
- * @size - size of data to push
- * @flags - can be used to pass additional information
+ * @qhandle: - hardware queue handle
+ * @dma: - DMA data to push
+ * @size: - size of data to push
+ * @flags: - can be used to pass additional information
*
* Returns 0 on success, errno otherwise.
*/
@@ -646,8 +646,8 @@ EXPORT_SYMBOL_GPL(knav_queue_push);
/**
* knav_queue_pop() - pop data (or descriptor) from the head of a queue
- * @qh - hardware queue handle
- * @size - (optional) size of the data pop'ed.
+ * @qhandle: - hardware queue handle
+ * @size: - (optional) size of the data pop'ed.
*
* Returns a DMA address on success, 0 on failure.
*/
@@ -746,9 +746,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
/**
* knav_pool_create() - Create a pool of descriptors
- * @name - name to give the pool handle
- * @num_desc - numbers of descriptors in the pool
- * @region_id - QMSS region id from which the descriptors are to be
+ * @name: - name to give the pool handle
+ * @num_desc: - numbers of descriptors in the pool
+ * @region_id: - QMSS region id from which the descriptors are to be
* allocated.
*
* Returns a pool handle on success.
@@ -856,7 +856,7 @@ EXPORT_SYMBOL_GPL(knav_pool_create);
/**
* knav_pool_destroy() - Free a pool of descriptors
- * @pool - pool handle
+ * @ph: - pool handle
*/
void knav_pool_destroy(void *ph)
{
@@ -884,7 +884,7 @@ EXPORT_SYMBOL_GPL(knav_pool_destroy);
/**
* knav_pool_desc_get() - Get a descriptor from the pool
- * @pool - pool handle
+ * @ph: - pool handle
*
* Returns descriptor from the pool.
*/
@@ -905,7 +905,8 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_get);
/**
* knav_pool_desc_put() - return a descriptor to the pool
- * @pool - pool handle
+ * @ph: - pool handle
+ * @desc: - virtual address
*/
void knav_pool_desc_put(void *ph, void *desc)
{
@@ -918,11 +919,11 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_put);
/**
* knav_pool_desc_map() - Map descriptor for DMA transfer
- * @pool - pool handle
- * @desc - address of descriptor to map
- * @size - size of descriptor to map
- * @dma - DMA address return pointer
- * @dma_sz - adjusted return pointer
+ * @ph: - pool handle
+ * @desc: - address of descriptor to map
+ * @size: - size of descriptor to map
+ * @dma: - DMA address return pointer
+ * @dma_sz: - adjusted return pointer
*
* Returns 0 on success, errno otherwise.
*/
@@ -945,9 +946,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_map);
/**
* knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
- * @pool - pool handle
- * @dma - DMA address of descriptor to unmap
- * @dma_sz - size of descriptor to unmap
+ * @ph: - pool handle
+ * @dma: - DMA address of descriptor to unmap
+ * @dma_sz: - size of descriptor to unmap
*
* Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
* error values on return.
@@ -968,7 +969,7 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
/**
* knav_pool_count() - Get the number of descriptors in pool.
- * @pool - pool handle
+ * @ph: - pool handle
* Returns number of elements in the pool.
*/
int knav_pool_count(void *ph)
@@ -1086,6 +1087,7 @@ static int knav_queue_setup_regions(struct knav_device *kdev,
for_each_child_of_node(regions, child) {
region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
if (!region) {
+ of_node_put(child);
dev_err(dev, "out of memory allocating region\n");
return -ENOMEM;
}
@@ -1307,12 +1309,11 @@ static int knav_setup_queue_pools(struct knav_device *kdev,
struct device_node *queue_pools)
{
struct device_node *type, *range;
- int ret;
for_each_child_of_node(queue_pools, type) {
for_each_child_of_node(type, range) {
- ret = knav_setup_queue_range(kdev, range);
/* return value ignored, we init the rest... */
+ knav_setup_queue_range(kdev, range);
}
}
@@ -1399,6 +1400,7 @@ static int knav_queue_init_qmgrs(struct knav_device *kdev,
for_each_child_of_node(qmgrs, child) {
qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
if (!qmgr) {
+ of_node_put(child);
dev_err(dev, "out of memory allocating qmgr\n");
return -ENOMEM;
}
@@ -1498,6 +1500,7 @@ static int knav_queue_init_pdsps(struct knav_device *kdev,
for_each_child_of_node(pdsps, child) {
pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
if (!pdsp) {
+ of_node_put(child);
dev_err(dev, "out of memory allocating pdsp\n");
return -ENOMEM;
}
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index dc21aa855a45..73be958f6aa5 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -135,13 +135,11 @@ static int am33xx_push_sram_idle(void)
static int am33xx_do_sram_idle(u32 wfi_flags)
{
- int ret = 0;
-
if (!m3_ipc || !pm_ops)
return 0;
if (wfi_flags & WFI_FLAG_WAKE_M3)
- ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
+ m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
}
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index 30695172a508..434ebd5c4d9f 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -2,10 +2,11 @@
/*
* PRU-ICSS platform driver for various TI SoCs
*
- * Copyright (C) 2014-2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014-2021 Texas Instruments Incorporated - https://www.ti.com/
* Author(s):
* Suman Anna <s-anna@ti.com>
* Andrew F. Davis <afd@ti.com>
+ * Tero Kristo <t-kristo@ti.com>
*/
#include <linux/clk-provider.h>
@@ -18,18 +19,264 @@
#include <linux/pm_runtime.h>
#include <linux/pruss_driver.h>
#include <linux/regmap.h>
+#include <linux/remoteproc.h>
#include <linux/slab.h>
+#define SYSCFG_STANDBY_INIT BIT(4)
+#define SYSCFG_SUB_MWAIT_READY BIT(5)
+
/**
* struct pruss_private_data - PRUSS driver private data
* @has_no_sharedram: flag to indicate the absence of PRUSS Shared Data RAM
* @has_core_mux_clock: flag to indicate the presence of PRUSS core clock
+ * @has_ocp_syscfg: flag to indicate if OCP SYSCFG is present
*/
struct pruss_private_data {
bool has_no_sharedram;
bool has_core_mux_clock;
+ bool has_ocp_syscfg;
};
+/**
+ * pruss_get() - get the pruss for a given PRU remoteproc
+ * @rproc: remoteproc handle of a PRU instance
+ *
+ * Finds the parent pruss device for a PRU given the @rproc handle of the
+ * PRU remote processor. This function increments the pruss device's refcount,
+ * so always use pruss_put() to decrement it back once pruss isn't needed
+ * anymore.
+ *
+ * Return: pruss handle on success, and an ERR_PTR on failure using one
+ * of the following error values
+ * -EINVAL if invalid parameter
+ * -ENODEV if PRU device or PRUSS device is not found
+ */
+struct pruss *pruss_get(struct rproc *rproc)
+{
+ struct pruss *pruss;
+ struct device *dev;
+ struct platform_device *ppdev;
+
+ if (IS_ERR_OR_NULL(rproc))
+ return ERR_PTR(-EINVAL);
+
+ dev = &rproc->dev;
+
+ /* make sure it is PRU rproc */
+ if (!dev->parent || !is_pru_rproc(dev->parent))
+ return ERR_PTR(-ENODEV);
+
+ ppdev = to_platform_device(dev->parent->parent);
+ pruss = platform_get_drvdata(ppdev);
+ if (!pruss)
+ return ERR_PTR(-ENODEV);
+
+ get_device(pruss->dev);
+
+ return pruss;
+}
+EXPORT_SYMBOL_GPL(pruss_get);
+
+/**
+ * pruss_put() - decrement pruss device's usecount
+ * @pruss: pruss handle
+ *
+ * Complimentary function for pruss_get(). Needs to be called
+ * after the PRUSS is used, and only if the pruss_get() succeeds.
+ */
+void pruss_put(struct pruss *pruss)
+{
+ if (IS_ERR_OR_NULL(pruss))
+ return;
+
+ put_device(pruss->dev);
+}
+EXPORT_SYMBOL_GPL(pruss_put);
+
+/**
+ * pruss_request_mem_region() - request a memory resource
+ * @pruss: the pruss instance
+ * @mem_id: the memory resource id
+ * @region: pointer to memory region structure to be filled in
+ *
+ * This function allows a client driver to request a memory resource,
+ * and if successful, will let the client driver own the particular
+ * memory region until released using the pruss_release_mem_region()
+ * API.
+ *
+ * Return: 0 if requested memory region is available with the memory region
+ * values returned in memory pointed by @region, an error otherwise
+ */
+int pruss_request_mem_region(struct pruss *pruss, enum pruss_mem mem_id,
+ struct pruss_mem_region *region)
+{
+ if (!pruss || !region || mem_id >= PRUSS_MEM_MAX)
+ return -EINVAL;
+
+ mutex_lock(&pruss->lock);
+
+ if (pruss->mem_in_use[mem_id]) {
+ mutex_unlock(&pruss->lock);
+ return -EBUSY;
+ }
+
+ *region = pruss->mem_regions[mem_id];
+ pruss->mem_in_use[mem_id] = region;
+
+ mutex_unlock(&pruss->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pruss_request_mem_region);
+
+/**
+ * pruss_release_mem_region() - release a memory resource
+ * @pruss: the pruss instance
+ * @region: the memory region to release
+ *
+ * This function is the complimentary function to
+ * pruss_request_mem_region(), and allows the client drivers to
+ * release back a memory resource.
+ *
+ * Return: 0 on success, an error code otherwise
+ */
+int pruss_release_mem_region(struct pruss *pruss,
+ struct pruss_mem_region *region)
+{
+ int id;
+
+ if (!pruss || !region)
+ return -EINVAL;
+
+ mutex_lock(&pruss->lock);
+
+ /* find out the memory region being released */
+ for (id = 0; id < PRUSS_MEM_MAX; id++) {
+ if (pruss->mem_in_use[id] == region)
+ break;
+ }
+
+ if (id == PRUSS_MEM_MAX) {
+ mutex_unlock(&pruss->lock);
+ return -EINVAL;
+ }
+
+ pruss->mem_in_use[id] = NULL;
+ memset(region, 0, sizeof(*region));
+
+ mutex_unlock(&pruss->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pruss_release_mem_region);
+
+/**
+ * pruss_cfg_read() - read a PRUSS CFG sub-module register
+ * @pruss: the pruss instance handle
+ * @reg: register offset within the CFG sub-module
+ * @val: pointer to return the value in
+ *
+ * Reads a given register within the PRUSS CFG sub-module and
+ * returns it through the passed-in @val pointer
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int pruss_cfg_read(struct pruss *pruss, unsigned int reg, unsigned int *val)
+{
+ if (IS_ERR_OR_NULL(pruss))
+ return -EINVAL;
+
+ return regmap_read(pruss->cfg_regmap, reg, val);
+}
+EXPORT_SYMBOL_GPL(pruss_cfg_read);
+
+/**
+ * pruss_cfg_update() - configure a PRUSS CFG sub-module register
+ * @pruss: the pruss instance handle
+ * @reg: register offset within the CFG sub-module
+ * @mask: bit mask to use for programming the @val
+ * @val: value to write
+ *
+ * Programs a given register within the PRUSS CFG sub-module
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int pruss_cfg_update(struct pruss *pruss, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ if (IS_ERR_OR_NULL(pruss))
+ return -EINVAL;
+
+ return regmap_update_bits(pruss->cfg_regmap, reg, mask, val);
+}
+EXPORT_SYMBOL_GPL(pruss_cfg_update);
+
+/**
+ * pruss_cfg_ocp_master_ports() - configure PRUSS OCP master ports
+ * @pruss: the pruss instance handle
+ * @enable: set to true for enabling or false for disabling the OCP master ports
+ *
+ * This function programs the PRUSS_SYSCFG.STANDBY_INIT bit either to enable or
+ * disable the OCP master ports (applicable only on SoCs using OCP interconnect
+ * like the OMAP family). Clearing the bit achieves dual functionalities - one
+ * is to deassert the MStandby signal to the device PRCM, and the other is to
+ * enable OCP master ports to allow accesses outside of the PRU-ICSS. The
+ * function has to wait for the PRCM to acknowledge through the monitoring of
+ * the PRUSS_SYSCFG.SUB_MWAIT bit when enabling master ports. Setting the bit
+ * disables the master access, and also signals the PRCM that the PRUSS is ready
+ * for Standby.
+ *
+ * Return: 0 on success, or an error code otherwise. ETIMEDOUT is returned
+ * when the ready-state fails.
+ */
+int pruss_cfg_ocp_master_ports(struct pruss *pruss, bool enable)
+{
+ int ret;
+ u32 syscfg_val, i;
+ const struct pruss_private_data *data;
+
+ if (IS_ERR_OR_NULL(pruss))
+ return -EINVAL;
+
+ data = of_device_get_match_data(pruss->dev);
+
+ /* nothing to do on non OMAP-SoCs */
+ if (!data || !data->has_ocp_syscfg)
+ return 0;
+
+ /* assert the MStandby signal during disable path */
+ if (!enable)
+ return pruss_cfg_update(pruss, PRUSS_CFG_SYSCFG,
+ SYSCFG_STANDBY_INIT,
+ SYSCFG_STANDBY_INIT);
+
+ /* enable the OCP master ports and disable MStandby */
+ ret = pruss_cfg_update(pruss, PRUSS_CFG_SYSCFG, SYSCFG_STANDBY_INIT, 0);
+ if (ret)
+ return ret;
+
+ /* wait till we are ready for transactions - delay is arbitrary */
+ for (i = 0; i < 10; i++) {
+ ret = pruss_cfg_read(pruss, PRUSS_CFG_SYSCFG, &syscfg_val);
+ if (ret)
+ goto disable;
+
+ if (!(syscfg_val & SYSCFG_SUB_MWAIT_READY))
+ return 0;
+
+ udelay(5);
+ }
+
+ dev_err(pruss->dev, "timeout waiting for SUB_MWAIT_READY\n");
+ ret = -ETIMEDOUT;
+
+disable:
+ pruss_cfg_update(pruss, PRUSS_CFG_SYSCFG, SYSCFG_STANDBY_INIT,
+ SYSCFG_STANDBY_INIT);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pruss_cfg_ocp_master_ports);
+
static void pruss_of_free_clk_provider(void *data)
{
struct device_node *clk_mux_np = data;
@@ -126,8 +373,6 @@ static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
int ret = 0;
data = of_device_get_match_data(dev);
- if (IS_ERR(data))
- return -ENODEV;
clks_np = of_get_child_by_name(cfg_node, "clocks");
if (!clks_np) {
@@ -163,6 +408,53 @@ static struct regmap_config regmap_conf = {
.reg_stride = 4,
};
+static int pruss_cfg_of_init(struct device *dev, struct pruss *pruss)
+{
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child;
+ struct resource res;
+ int ret;
+
+ child = of_get_child_by_name(np, "cfg");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(child, 0, &res)) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!pruss->cfg_base) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
+ (u64)res.start);
+ regmap_conf.max_register = resource_size(&res) - 4;
+
+ pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
+ &regmap_conf);
+ kfree(regmap_conf.name);
+ if (IS_ERR(pruss->cfg_regmap)) {
+ dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
+ PTR_ERR(pruss->cfg_regmap));
+ ret = PTR_ERR(pruss->cfg_regmap);
+ goto node_put;
+ }
+
+ ret = pruss_clk_init(pruss, child);
+ if (ret)
+ dev_err(dev, "pruss_clk_init failed, ret = %d\n", ret);
+
+node_put:
+ of_node_put(child);
+ return ret;
+}
+
static int pruss_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -175,10 +467,6 @@ static int pruss_probe(struct platform_device *pdev)
const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
data = of_device_get_match_data(&pdev->dev);
- if (IS_ERR(data)) {
- dev_err(dev, "missing private data\n");
- return -ENODEV;
- }
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret) {
@@ -191,6 +479,7 @@ static int pruss_probe(struct platform_device *pdev)
return -ENOMEM;
pruss->dev = dev;
+ mutex_init(&pruss->lock);
child = of_get_child_by_name(np, "memories");
if (!child) {
@@ -245,56 +534,18 @@ static int pruss_probe(struct platform_device *pdev)
goto rpm_disable;
}
- child = of_get_child_by_name(np, "cfg");
- if (!child) {
- dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
- ret = -ENODEV;
+ ret = pruss_cfg_of_init(dev, pruss);
+ if (ret < 0)
goto rpm_put;
- }
-
- if (of_address_to_resource(child, 0, &res)) {
- ret = -ENOMEM;
- goto node_put;
- }
-
- pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
- if (!pruss->cfg_base) {
- ret = -ENOMEM;
- goto node_put;
- }
-
- regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
- (u64)res.start);
- regmap_conf.max_register = resource_size(&res) - 4;
-
- pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
- &regmap_conf);
- kfree(regmap_conf.name);
- if (IS_ERR(pruss->cfg_regmap)) {
- dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
- PTR_ERR(pruss->cfg_regmap));
- ret = PTR_ERR(pruss->cfg_regmap);
- goto node_put;
- }
-
- ret = pruss_clk_init(pruss, child);
- if (ret) {
- dev_err(dev, "failed to setup coreclk-mux\n");
- goto node_put;
- }
ret = devm_of_platform_populate(dev);
if (ret) {
dev_err(dev, "failed to register child devices\n");
- goto node_put;
+ goto rpm_put;
}
- of_node_put(child);
-
return 0;
-node_put:
- of_node_put(child);
rpm_put:
pm_runtime_put_sync(dev);
rpm_disable:
@@ -317,10 +568,16 @@ static int pruss_remove(struct platform_device *pdev)
/* instance-specific driver private data */
static const struct pruss_private_data am437x_pruss1_data = {
.has_no_sharedram = false,
+ .has_ocp_syscfg = true,
};
static const struct pruss_private_data am437x_pruss0_data = {
.has_no_sharedram = true,
+ .has_ocp_syscfg = false,
+};
+
+static const struct pruss_private_data am33xx_am57xx_data = {
+ .has_ocp_syscfg = true,
};
static const struct pruss_private_data am65x_j721e_pruss_data = {
@@ -328,13 +585,15 @@ static const struct pruss_private_data am65x_j721e_pruss_data = {
};
static const struct of_device_id pruss_of_match[] = {
- { .compatible = "ti,am3356-pruss" },
+ { .compatible = "ti,am3356-pruss", .data = &am33xx_am57xx_data },
{ .compatible = "ti,am4376-pruss0", .data = &am437x_pruss0_data, },
{ .compatible = "ti,am4376-pruss1", .data = &am437x_pruss1_data, },
- { .compatible = "ti,am5728-pruss" },
+ { .compatible = "ti,am5728-pruss", .data = &am33xx_am57xx_data },
{ .compatible = "ti,k2g-pruss" },
{ .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, },
{ .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,am642-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,am625-pruss", .data = &am65x_j721e_pruss_data, },
{},
};
MODULE_DEVICE_TABLE(of, pruss_of_match);
diff --git a/drivers/soc/ti/ti-pat.c b/drivers/soc/ti/ti-pat.c
new file mode 100644
index 000000000000..1671b32faf38
--- /dev/null
+++ b/drivers/soc/ti/ti-pat.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI PAT mapped DMA-BUF memory re-exporter
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
+ * Andrew Davis <afd@ti.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include <linux/ti-pat.h>
+
+/* TI PAT MMRS registers */
+#define TI_PAT_MMRS_PID 0x0 /* Revision Register */
+#define TI_PAT_MMRS_CONFIG 0x4 /* Config Register */
+#define TI_PAT_MMRS_CONTROL 0x10 /* Control Register */
+
+/* TI PAT CONTROL register field values */
+#define TI_PAT_CONTROL_ARB_MODE_UF 0x0 /* Updates first */
+#define TI_PAT_CONTROL_ARB_MODE_RR 0x2 /* Round-robin */
+
+#define TI_PAT_CONTROL_PAGE_SIZE_4KB 0x0
+#define TI_PAT_CONTROL_PAGE_SIZE_16KB 0x1
+#define TI_PAT_CONTROL_PAGE_SIZE_64KB 0x2
+#define TI_PAT_CONTROL_PAGE_SIZE_1MB 0x3
+
+/* TI PAT TABLE registers */
+#define TI_PAT_TABLE_ADDRL 0x0 /* Low address Register */
+#define TI_PAT_TABLE_ADDRH 0x4 /* High address and enable Register */
+
+static unsigned int ti_pat_page_sizes[] = {
+ [TI_PAT_CONTROL_PAGE_SIZE_4KB] = 4 * 1024,
+ [TI_PAT_CONTROL_PAGE_SIZE_16KB] = 16 * 1024,
+ [TI_PAT_CONTROL_PAGE_SIZE_64KB] = 64 * 1024,
+ [TI_PAT_CONTROL_PAGE_SIZE_1MB] = 1024 * 1024,
+};
+
+enum ti_pat_fields {
+ /* Revision */
+ F_PID_MAJOR,
+ F_PID_MINOR,
+
+ /* Controls */
+ F_CONTROL_ARB_MODE,
+ F_CONTROL_PAGE_SIZE,
+ F_CONTROL_REPLACE_OID_EN,
+ F_CONTROL_EN,
+
+ /* sentinel */
+ F_MMRS_FIELDS,
+
+ /* Table */
+ F_TABLE_ADDRL,
+ F_TABLE_ADDRH,
+ F_TABLE_ENABLE,
+
+ /* sentinel */
+ F_MAX_FIELDS
+};
+
+static struct reg_field ti_pat_reg_fields[] = {
+ /* Revision */
+ [F_PID_MAJOR] = REG_FIELD(TI_PAT_MMRS_PID, 8, 10),
+ [F_PID_MINOR] = REG_FIELD(TI_PAT_MMRS_PID, 0, 5),
+ /* Controls */
+ [F_CONTROL_ARB_MODE] = REG_FIELD(TI_PAT_MMRS_CONTROL, 6, 7),
+ [F_CONTROL_PAGE_SIZE] = REG_FIELD(TI_PAT_MMRS_CONTROL, 4, 5),
+ [F_CONTROL_REPLACE_OID_EN] = REG_FIELD(TI_PAT_MMRS_CONTROL, 1, 1),
+ [F_CONTROL_EN] = REG_FIELD(TI_PAT_MMRS_CONTROL, 0, 0),
+ /* Table */
+ [F_TABLE_ADDRL] = REG_FIELD(TI_PAT_TABLE_ADDRL, 0, 31),
+ [F_TABLE_ADDRH] = REG_FIELD(TI_PAT_TABLE_ADDRH, 0, 3),
+ [F_TABLE_ENABLE] = REG_FIELD(TI_PAT_TABLE_ADDRH, 31, 31),
+};
+
+/**
+ * struct ti_pat_data - PAT device instance data
+ * @dev: PAT device structure
+ * @mdev: misc device
+ * @mmrs_fields: Register fields for both MMRS and TABLE
+ * @page_count: Total number of pages in this PAT
+ * @page_size: Size of region mapped by each page in bytes
+ * @window_base: Base address of WINDOW region
+ * @pool: Pool for managing translation space
+ */
+struct ti_pat_data {
+ struct device *dev;
+ struct miscdevice mdev;
+ struct regmap_field *fields[F_MAX_FIELDS];
+ unsigned int page_count;
+ unsigned int page_size;
+ phys_addr_t window_base;
+ struct gen_pool *pool;
+};
+
+struct ti_pat_dma_buf_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct ti_pat_buffer *buffer;
+ struct list_head list;
+};
+
+/**
+ * struct ti_pat_buffer - Single buffer instance data
+ * @pat: PAT instance for whom this buffer belongs
+ * @i_dma_buf: Imported DMA-BUF buffer
+ * @size: Total allocated size of this buffer
+ * @offset: Allocated offset into the PAT window
+ * @e_dma_buf: Exported DMA-BUF buffer
+ * @attachment: Our attachment to the imported buffer
+ * @sgt: DMA map of our imported buffer
+ * @attachments: Attachments to this buffer
+ * @map_count: Reference count of mappings to this buffer
+ * @lock: Protect the attach list and map count
+ */
+struct ti_pat_buffer {
+ struct ti_pat_data *pat;
+ struct dma_buf *i_dma_buf;
+ size_t size;
+ unsigned long offset;
+ struct dma_buf *e_dma_buf;
+
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+
+ struct list_head attachments;
+ int map_count;
+
+ struct mutex lock;
+};
+
+static const struct regmap_config ti_pat_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int ti_pat_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct ti_pat_dma_buf_attachment *a;
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ a->dev = attachment->dev;
+ a->buffer = buffer;
+ INIT_LIST_HEAD(&a->list);
+
+ a->table = kzalloc(sizeof(*a->table), GFP_KERNEL);
+ if (!a->table) {
+ kfree(a);
+ return -ENOMEM;
+ }
+
+ if (sg_alloc_table(a->table, 1, GFP_KERNEL)) {
+ kfree(a->table);
+ kfree(a);
+ return -ENOMEM;
+ }
+
+ sg_set_page(a->table->sgl, pfn_to_page(PFN_DOWN(buffer->offset)), buffer->size, 0);
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ /* First time attachment we attach to parent */
+ if (list_empty(&buffer->attachments)) {
+ buffer->attachment = dma_buf_attach(buffer->i_dma_buf, buffer->pat->dev);
+ if (IS_ERR(buffer->attachment)) {
+ dev_err(buffer->pat->dev, "Unable to attach to parent DMA-BUF\n");
+ mutex_unlock(&buffer->lock);
+ sg_free_table(a->table);
+ kfree(a->table);
+ kfree(a);
+ return PTR_ERR(buffer->attachment);
+ }
+ }
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void ti_pat_dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct ti_pat_dma_buf_attachment *a = attachment->priv;
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ /* Last attachment we detach from parent */
+ if (list_empty(&buffer->attachments)) {
+ dma_buf_detach(buffer->i_dma_buf, buffer->attachment);
+ buffer->attachment = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(a->table);
+ kfree(a->table);
+ kfree(a);
+}
+
+static unsigned int ti_pat_table_index_from_page(size_t page)
+{
+ /* Every 256 pages start on a 4k boundary */
+ unsigned int boundery = page / 256;
+ unsigned int offset = page % 256;
+ /* Each page occupies 8 bytes in the table */
+ return (boundery * (4096 / 8)) + offset;
+}
+
+static void ti_pat_set_page(struct ti_pat_data *pat, size_t page_id, dma_addr_t dma_address)
+{
+ unsigned int index = ti_pat_table_index_from_page(page_id);
+
+ /*
+ * Addresses will always be at least 4K aligned, so both high and low
+ * addresses are shifted by an additional 12 bits before being written
+ * to the PAT.
+ */
+ u32 base_l = dma_address >> 12;
+ u32 base_h = dma_address >> 44;
+
+ dev_dbg(pat->dev, "Enabling PAT index: %zu pointing to %pad\n", page_id, &dma_address);
+
+ regmap_fields_write(pat->fields[F_TABLE_ADDRL], index, base_l);
+ regmap_fields_write(pat->fields[F_TABLE_ADDRH], index, base_h);
+ regmap_fields_write(pat->fields[F_TABLE_ENABLE], index, 1);
+}
+
+static void ti_pat_unset_page(struct ti_pat_data *pat, size_t page_id)
+{
+ unsigned int index = ti_pat_table_index_from_page(page_id);
+
+ dev_dbg(pat->dev, "Disabling PAT index: %zu\n", page_id);
+
+ regmap_fields_write(pat->fields[F_TABLE_ENABLE], index, 0);
+}
+
+static struct sg_table *ti_pat_dma_buf_map(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct ti_pat_dma_buf_attachment *a = attachment->priv;
+ struct ti_pat_buffer *buffer = a->buffer;
+ struct ti_pat_data *pat = buffer->pat;
+ struct sg_table *table = a->table;
+ struct scatterlist *s;
+ unsigned int i, s_len;
+ size_t page_id;
+ int ret;
+
+ mutex_lock(&buffer->lock);
+ /* First time mapping we map to parent */
+ if (!buffer->map_count) {
+ buffer->sgt = dma_buf_map_attachment(buffer->attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR(buffer->sgt)) {
+ dev_err(pat->dev, "Unable to map parent DMA-BUF\n");
+ return buffer->sgt;
+ }
+
+ /* And program PAT area for this set of pages */
+ page_id = (size_t)(buffer->offset - pat->window_base) / pat->page_size;
+ for_each_sg(buffer->sgt->sgl, s, buffer->sgt->nents, i) {
+ if (s->offset) {
+ dev_err(pat->dev, "Cannot use offset buffers\n");
+ ret = -EINVAL;
+ goto unmap;
+ }
+
+ if (s->length % pat->page_size) {
+ dev_err(pat->dev, "Cannot use buffers not a multiple of page size\n");
+ ret = -EINVAL;
+ goto unmap;
+ }
+
+ for (s_len = 0; s_len < s->length; s_len += pat->page_size)
+ ti_pat_set_page(pat, page_id++, s->dma_address + s_len);
+ }
+ }
+ buffer->map_count++;
+ mutex_unlock(&buffer->lock);
+
+ /* Map the attached device's table to get DMA addresses */
+ if (!dma_map_sg_attrs(attachment->dev, table->sgl, table->nents, direction, DMA_ATTR_SKIP_CPU_SYNC))
+ return ERR_PTR(-ENOMEM);
+
+ return table;
+
+unmap:
+ dma_buf_unmap_attachment(buffer->attachment, buffer->sgt, DMA_BIDIRECTIONAL);
+ mutex_unlock(&buffer->lock);
+ return ERR_PTR(ret);
+}
+
+static void ti_pat_dma_buf_unmap(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct ti_pat_dma_buf_attachment *a = attachment->priv;
+ struct ti_pat_buffer *buffer = a->buffer;
+ struct ti_pat_data *pat = buffer->pat;
+
+ /* Unmap the attached device's table */
+ dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents, direction, DMA_ATTR_SKIP_CPU_SYNC);
+
+ mutex_lock(&buffer->lock);
+ buffer->map_count--;
+ /* Last mapping we unmap from parent */
+ if (!buffer->map_count) {
+ /* Disable PAT pages for this area */
+ size_t page_start = (size_t)(buffer->offset - pat->window_base) / pat->page_size;
+ size_t page_end = page_start + (buffer->size / pat->page_size);
+
+ for (; page_start < page_end; page_start++)
+ ti_pat_unset_page(pat, page_start);
+
+ dma_buf_unmap_attachment(buffer->attachment, buffer->sgt, DMA_BIDIRECTIONAL);
+ buffer->sgt = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static void ti_pat_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ if (buffer->attachment && buffer->sgt)
+ dma_buf_unmap_attachment(buffer->attachment, buffer->sgt, DMA_BIDIRECTIONAL);
+ if (buffer->i_dma_buf && !IS_ERR_OR_NULL(buffer->attachment))
+ dma_buf_detach(buffer->i_dma_buf, buffer->attachment);
+ if (buffer->i_dma_buf)
+ dma_buf_put(buffer->i_dma_buf);
+
+ if (buffer->offset)
+ gen_pool_free(buffer->pat->pool, buffer->offset, buffer->size);
+
+ kfree(buffer);
+}
+
+static int ti_pat_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ return dma_buf_begin_cpu_access(buffer->i_dma_buf, direction);
+}
+
+static int ti_pat_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ return dma_buf_end_cpu_access(buffer->i_dma_buf, direction);
+}
+
+static int ti_pat_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ return dma_buf_mmap(buffer->i_dma_buf, vma, vma->vm_pgoff);
+}
+
+static void *ti_pat_dma_buf_vmap(struct dma_buf *dmabuf)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ return dma_buf_vmap(buffer->i_dma_buf);
+}
+
+static void ti_pat_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ return dma_buf_vunmap(buffer->i_dma_buf, vaddr);
+}
+
+static const struct dma_buf_ops dma_buf_ops = {
+ .attach = ti_pat_dma_buf_attach,
+ .detach = ti_pat_dma_buf_detach,
+
+ .map_dma_buf = ti_pat_dma_buf_map,
+ .unmap_dma_buf = ti_pat_dma_buf_unmap,
+
+ .release = ti_pat_dma_buf_release,
+
+ .begin_cpu_access = ti_pat_dma_buf_begin_cpu_access,
+ .end_cpu_access = ti_pat_dma_buf_end_cpu_access,
+ .mmap = ti_pat_dma_buf_mmap,
+ .vmap = ti_pat_dma_buf_vmap,
+ .vunmap = ti_pat_dma_buf_vunmap,
+};
+
+int ti_pat_export(struct ti_pat_data *pat,
+ struct dma_buf *i_dma_buf,
+ struct dma_buf **e_dma_buf)
+{
+ struct ti_pat_buffer *buffer;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ int ret;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ buffer->pat = pat;
+ buffer->i_dma_buf = i_dma_buf;
+ buffer->size = buffer->i_dma_buf->size;
+ mutex_init(&buffer->lock);
+ INIT_LIST_HEAD(&buffer->attachments);
+ buffer->map_count = 0;
+
+ /* Reserve PAT space */
+ buffer->offset = gen_pool_alloc(buffer->pat->pool, buffer->size);
+ if (!buffer->offset) {
+ ret = -ENOMEM;
+ goto free_buffer;
+ }
+
+ exp_info.ops = &dma_buf_ops;
+ exp_info.size = buffer->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = buffer;
+
+ *e_dma_buf = dma_buf_export(&exp_info);
+ if (IS_ERR(*e_dma_buf)) {
+ ret = PTR_ERR(*e_dma_buf);
+ goto free_pool;
+ }
+
+ return 0;
+
+free_pool:
+ gen_pool_free(buffer->pat->pool, buffer->offset, buffer->size);
+free_buffer:
+ kfree(buffer);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ti_pat_export);
+
+static long ti_pat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ti_pat_data *pat = container_of(file->private_data, struct ti_pat_data, mdev);
+
+ switch (cmd) {
+ case TI_PAT_IOC_EXPORT:
+ {
+ struct ti_pat_export_data export;
+ struct dma_buf *i_dma_buf;
+ struct dma_buf *e_dma_buf;
+ int ret;
+
+ if (_IOC_SIZE(cmd) > sizeof(export))
+ return -EINVAL;
+
+ if (copy_from_user(&export, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ i_dma_buf = dma_buf_get(export.fd);
+ if (IS_ERR(i_dma_buf))
+ return PTR_ERR(i_dma_buf);
+
+ ret = ti_pat_export(pat, i_dma_buf, &e_dma_buf);
+ if (ret) {
+ dma_buf_put(i_dma_buf);
+ return ret;
+ }
+
+ export.fd = dma_buf_fd(e_dma_buf, O_CLOEXEC);
+ if (export.fd < 0) {
+ dma_buf_put(e_dma_buf);
+ dma_buf_put(i_dma_buf);
+ return export.fd;
+ }
+
+ if (copy_to_user((void __user *)arg, &export, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static const struct file_operations ti_pat_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ti_pat_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ti_pat_ioctl,
+#endif
+};
+
+static int ti_pat_probe(struct platform_device *pdev)
+{
+ struct ti_pat_data *pat;
+ struct resource *res;
+ void __iomem *base;
+ struct regmap *mmrs_map;
+ struct regmap *table_map;
+ unsigned int revision_major;
+ unsigned int revision_minor;
+ resource_size_t size;
+ size_t page_size;
+ int i, ret;
+
+ pat = devm_kzalloc(&pdev->dev, sizeof(*pat), GFP_KERNEL);
+ if (!pat)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, pat);
+ pat->dev = &pdev->dev;
+
+ /* Set DMA mask to 64 bits */
+ ret = dma_set_mask_and_coherent(pat->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(pat->dev, "Unable to set coherent mask to 64");
+ return ret;
+ }
+
+ /* MMRS */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmrs");
+ if (!res) {
+ dev_err(pat->dev, "Unable to find MMRS IO resource\n");
+ return -ENOENT;
+ }
+ base = devm_ioremap_resource(pat->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mmrs_map = devm_regmap_init_mmio(pat->dev, base, &ti_pat_regmap_config);
+ if (IS_ERR(mmrs_map)) {
+ dev_err(pat->dev, "Unable to allocate MMRS register map\n");
+ return PTR_ERR(mmrs_map);
+ }
+
+ for (i = 0; i < F_MMRS_FIELDS; i++) {
+ pat->fields[i] = devm_regmap_field_alloc(pat->dev, mmrs_map, ti_pat_reg_fields[i]);
+ if (IS_ERR(pat->fields[i])) {
+ dev_err(pat->dev, "Unable to allocate Regmap fields\n");
+ return PTR_ERR(pat->fields[i]);
+ }
+ }
+
+ ret = regmap_read(mmrs_map, TI_PAT_MMRS_CONFIG, &pat->page_count);
+ if (ret) {
+ dev_err(pat->dev, "Unable to read device page count\n");
+ return ret;
+ }
+
+ ret = regmap_field_read(pat->fields[F_PID_MAJOR], &revision_major);
+ if (ret) {
+ dev_err(pat->dev, "Unable to read device major revision\n");
+ return ret;
+ }
+
+ ret = regmap_field_read(pat->fields[F_PID_MINOR], &revision_minor);
+ if (ret) {
+ dev_err(pat->dev, "Unable to read device minor revision\n");
+ return ret;
+ }
+
+ dev_info(pat->dev, "Found PAT Rev %d.%d with %d pages\n", revision_major, revision_minor, pat->page_count);
+
+ /* TABLE */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "table");
+ if (!res) {
+ dev_err(pat->dev, "Unable to find TABLE IO resource\n");
+ return -ENOENT;
+ }
+ base = devm_ioremap_resource(pat->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* 256 pages per 4KB of table space */
+ size = resource_size(res);
+ if (size != (pat->page_count << 4))
+ dev_warn(pat->dev, "TABLE region size (%pa) does not match reported page count\n", &size);
+
+ table_map = devm_regmap_init_mmio(pat->dev, base, &ti_pat_regmap_config);
+ if (IS_ERR(table_map)) {
+ dev_err(pat->dev, "Unable to allocate TABLE register map\n");
+ return PTR_ERR(table_map);
+ }
+
+ for (i = F_MMRS_FIELDS + 1; i < F_MAX_FIELDS; i++) {
+ ti_pat_reg_fields[i].id_size = ti_pat_table_index_from_page(pat->page_count);
+ ti_pat_reg_fields[i].id_offset = 8; /* 8 bytes per entry */
+ pat->fields[i] = devm_regmap_field_alloc(pat->dev, table_map, ti_pat_reg_fields[i]);
+ if (IS_ERR(pat->fields[i])) {
+ dev_err(pat->dev, "Unable to allocate Regmap fields\n");
+ return PTR_ERR(pat->fields[i]);
+ }
+ }
+
+ /* WINDOW */
+ ret = device_property_read_u64(pat->dev, "ti,pat-window-base", &pat->window_base);
+ if (ret) {
+ dev_err(pat->dev, "Unable to find ti,pat-window-base\n");
+ return -ENOENT;
+ }
+
+ ret = device_property_read_u64(pat->dev, "ti,pat-window-size", &size);
+ if (ret) {
+ dev_err(pat->dev, "Unable to find ti,pat-window-size\n");
+ return -ENOENT;
+ }
+
+ pat->page_size = PAGE_SIZE;
+ for (page_size = 0; page_size < ARRAY_SIZE(ti_pat_page_sizes); page_size++)
+ if (ti_pat_page_sizes[page_size] == pat->page_size)
+ break;
+ if (page_size == ARRAY_SIZE(ti_pat_page_sizes)) {
+ dev_err(pat->dev, "Unsupported PAGE_SIZE (%d)\n", pat->page_size);
+ return -EINVAL;
+ }
+ regmap_field_write(pat->fields[F_CONTROL_PAGE_SIZE], page_size);
+
+ /* Enable this PAT module */
+ regmap_field_write(pat->fields[F_CONTROL_EN], 1);
+
+ pat->pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!pat->pool)
+ return -ENOMEM;
+ gen_pool_add(pat->pool, pat->window_base, size, -1);
+
+ pat->mdev.minor = MISC_DYNAMIC_MINOR;
+ pat->mdev.name = pdev->name;
+ pat->mdev.fops = &ti_pat_fops;
+ pat->mdev.parent = NULL;
+ ret = misc_register(&pat->mdev);
+ if (ret) {
+ dev_err(pat->dev, "Unable to register misc device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ti_pat_of_match[] = {
+ { .compatible = "ti,j721e-pat", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_pat_of_match);
+
+static struct platform_driver ti_pat_driver = {
+ .probe = ti_pat_probe,
+ .driver = {
+ .name = "ti-pat",
+ .of_match_table = ti_pat_of_match,
+ },
+};
+module_platform_driver(ti_pat_driver);
+
+MODULE_AUTHOR("Andrew Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TI PAT mapped DMA-BUF memory exporter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index 0eb9462f609e..a1d9c027022a 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -89,6 +89,18 @@ static int ti_sci_inta_msi_alloc_descs(struct device *dev,
list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
count++;
}
+ for (i = 0; i < res->desc[set].num_sec; i++) {
+ msi_desc = alloc_msi_entry(dev, 1, NULL);
+ if (!msi_desc) {
+ ti_sci_inta_msi_free_descs(dev);
+ return -ENOMEM;
+ }
+
+ msi_desc->inta.dev_index = res->desc[set].start_sec + i;
+ INIT_LIST_HEAD(&msi_desc->list);
+ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+ count++;
+ }
}
return count;
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index ef3f95fefab5..3b0abf38cdf4 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -7,7 +7,9 @@
* Dave Gerlach <d-gerlach@ti.com>
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
@@ -40,12 +42,30 @@
#define M3_FW_VERSION_MASK 0xffff
#define M3_WAKE_SRC_MASK 0xff
+#define IPC_MEM_TYPE_SHIFT (0x0)
+#define IPC_MEM_TYPE_MASK (0x7 << 0)
+#define IPC_VTT_STAT_SHIFT (0x3)
+#define IPC_VTT_STAT_MASK (0x1 << 3)
+#define IPC_VTT_GPIO_PIN_SHIFT (0x4)
+#define IPC_VTT_GPIO_PIN_MASK (0x3f << 4)
+#define IPC_IO_ISOLATION_STAT_SHIFT (10)
+#define IPC_IO_ISOLATION_STAT_MASK (0x1 << 10)
+
+#define IPC_DBG_HALT_SHIFT (11)
+#define IPC_DBG_HALT_MASK (0x1 << 11)
+
#define M3_STATE_UNKNOWN 0
#define M3_STATE_RESET 1
#define M3_STATE_INITED 2
#define M3_STATE_MSG_FOR_LP 3
#define M3_STATE_MSG_FOR_RESET 4
+#define WKUP_M3_SD_FW_MAGIC 0x570C
+
+#define WKUP_M3_DMEM_START 0x80000
+#define WKUP_M3_AUXDATA_OFFSET 0x1000
+#define WKUP_M3_AUXDATA_SIZE 0xFF
+
static struct wkup_m3_ipc *m3_ipc_state;
static const struct wkup_m3_wakeup_src wakeups[] = {
@@ -66,6 +86,147 @@ static const struct wkup_m3_wakeup_src wakeups[] = {
{.irq_nr = 0, .src = "Unknown"},
};
+/**
+ * wkup_m3_copy_aux_data - Copy auxiliary data to special region of m3 dmem
+ * @data - pointer to data
+ * @sz - size of data to copy (limit 256 bytes)
+ *
+ * Copies any additional blob of data to the wkup_m3 dmem to be used by the
+ * firmware
+ */
+static unsigned long wkup_m3_copy_aux_data(struct wkup_m3_ipc *m3_ipc,
+ const void *data, int sz)
+{
+ unsigned long aux_data_dev_addr;
+ void *aux_data_addr;
+
+ aux_data_dev_addr = WKUP_M3_DMEM_START + WKUP_M3_AUXDATA_OFFSET;
+ aux_data_addr = rproc_da_to_va(m3_ipc->rproc,
+ aux_data_dev_addr,
+ WKUP_M3_AUXDATA_SIZE);
+ memcpy(aux_data_addr, data, sz);
+
+ return WKUP_M3_AUXDATA_OFFSET;
+}
+
+static void wkup_m3_scale_data_fw_cb(const struct firmware *fw, void *context)
+{
+ unsigned long val, aux_base;
+ struct wkup_m3_scale_data_header hdr;
+ struct wkup_m3_ipc *m3_ipc = context;
+ struct device *dev = m3_ipc->dev;
+
+ if (!fw) {
+ dev_err(dev, "Voltage scale fw name given but file missing.\n");
+ return;
+ }
+
+ memcpy(&hdr, fw->data, sizeof(hdr));
+
+ if (hdr.magic != WKUP_M3_SD_FW_MAGIC) {
+ dev_err(dev, "PM: Voltage Scale Data binary does not appear valid.\n");
+ goto release_sd_fw;
+ }
+
+ aux_base = wkup_m3_copy_aux_data(m3_ipc, fw->data + sizeof(hdr),
+ fw->size - sizeof(hdr));
+
+ val = (aux_base + hdr.sleep_offset);
+ val |= ((aux_base + hdr.wake_offset) << 16);
+
+ m3_ipc->volt_scale_offsets = val;
+
+release_sd_fw:
+ release_firmware(fw);
+};
+
+static int wkup_m3_init_scale_data(struct wkup_m3_ipc *m3_ipc,
+ struct device *dev)
+{
+ int ret = 0;
+
+ /*
+ * If no name is provided, user has already been warned, pm will
+ * still work so return 0
+ */
+
+ if (!m3_ipc->sd_fw_name)
+ return ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ m3_ipc->sd_fw_name, dev, GFP_ATOMIC,
+ m3_ipc, wkup_m3_scale_data_fw_cb);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void wkup_m3_set_halt_late(bool enabled)
+{
+ if (enabled)
+ m3_ipc_state->halt = (1 << IPC_DBG_HALT_SHIFT);
+ else
+ m3_ipc_state->halt = 0;
+}
+
+static int option_get(void *data, u64 *val)
+{
+ u32 *option = data;
+
+ *val = *option;
+
+ return 0;
+}
+
+static int option_set(void *data, u64 val)
+{
+ u32 *option = data;
+
+ *option = val;
+
+ if (option == &m3_ipc_state->halt) {
+ if (val)
+ wkup_m3_set_halt_late(true);
+ else
+ wkup_m3_set_halt_late(false);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(wkup_m3_ipc_option_fops, option_get, option_set,
+ "%llu\n");
+
+static int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->dbg_path = debugfs_create_dir("wkup_m3_ipc", NULL);
+
+ if (!m3_ipc->dbg_path)
+ return -EINVAL;
+
+ (void)debugfs_create_file("enable_late_halt", 0644,
+ m3_ipc->dbg_path,
+ &m3_ipc->halt,
+ &wkup_m3_ipc_option_fops);
+
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+ debugfs_remove_recursive(m3_ipc->dbg_path);
+}
+#else
+static inline int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
{
writel(AM33XX_M3_TXEV_ACK,
@@ -130,6 +291,7 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
}
m3_ipc->state = M3_STATE_INITED;
+ wkup_m3_init_scale_data(m3_ipc, dev);
complete(&m3_ipc->sync_complete);
break;
case M3_STATE_MSG_FOR_RESET:
@@ -215,9 +377,21 @@ static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
(m3_ipc->state != M3_STATE_UNKNOWN));
}
+static void wkup_m3_set_vtt_gpio(struct wkup_m3_ipc *m3_ipc, int gpio)
+{
+ m3_ipc->vtt_conf = (1 << IPC_VTT_STAT_SHIFT) |
+ (gpio << IPC_VTT_GPIO_PIN_SHIFT);
+}
+
+static void wkup_m3_set_io_isolation(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->isolation_conf = (1 << IPC_IO_ISOLATION_STAT_SHIFT);
+}
+
/* Public functions */
/**
* wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @mem_type: memory type value read directly from emif
*
* wkup_m3 must know what memory type is in use to properly suspend
@@ -230,6 +404,7 @@ static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
/**
* wkup_m3_set_resume_address - Pass wkup_m3 resume address
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @addr: Physical address from which resume code should execute
*/
static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
@@ -239,6 +414,7 @@ static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
/**
* wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*
* Returns code representing the status of a low power mode transition.
* 0 - Successful transition
@@ -260,6 +436,7 @@ static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
/**
* wkup_m3_prepare_low_power - Request preparation for transition to
* low power state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @state: A kernel suspend state to enter, either MEM or STANDBY
*
* Returns 0 if preparation was successful, otherwise returns error code
@@ -276,12 +453,15 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
switch (state) {
case WKUP_M3_DEEPSLEEP:
m3_power_state = IPC_CMD_DS0;
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->volt_scale_offsets, 5);
break;
case WKUP_M3_STANDBY:
m3_power_state = IPC_CMD_STANDBY;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break;
case WKUP_M3_IDLE:
m3_power_state = IPC_CMD_IDLE;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break;
default:
return 1;
@@ -290,11 +470,13 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
/* Program each required IPC register then write defaults to others */
wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
- wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4);
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type |
+ m3_ipc->vtt_conf |
+ m3_ipc->isolation_conf |
+ m3_ipc->halt, 4);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
- wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
@@ -315,6 +497,7 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
/**
* wkup_m3_finish_low_power - Return m3 to reset state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*
* Returns 0 if reset was successful, otherwise returns error code
*/
@@ -362,8 +545,7 @@ static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
/**
* wkup_m3_set_rtc_only - Set the rtc_only flag
- * @wkup_m3_wakeup: struct wkup_m3_wakeup_src * gets assigned the
- * wakeup src value
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*/
static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
{
@@ -428,12 +610,13 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
static int wkup_m3_ipc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int irq, ret;
+ int irq, ret, temp;
phandle rproc_phandle;
struct rproc *m3_rproc;
struct resource *res;
struct task_struct *task;
struct wkup_m3_ipc *m3_ipc;
+ struct device_node *np = dev->of_node;
m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
if (!m3_ipc)
@@ -493,6 +676,23 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
m3_ipc->ops = &ipc_ops;
+ if (of_find_property(np, "ti,needs-vtt-toggle", NULL) &&
+ !(of_property_read_u32(np, "ti,vtt-gpio-pin", &temp))) {
+ if (temp >= 0 && temp <= 31)
+ wkup_m3_set_vtt_gpio(m3_ipc, temp);
+ else
+ dev_warn(dev, "Invalid VTT GPIO(%d) pin\n", temp);
+ }
+
+ if (of_find_property(np, "ti,set-io-isolation", NULL))
+ wkup_m3_set_io_isolation(m3_ipc);
+
+ ret = of_property_read_string(np, "ti,scale-data-fw",
+ &m3_ipc->sd_fw_name);
+ if (ret) {
+ dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
+ };
+
/*
* Wait for firmware loading completion in a thread so we
* can boot the wkup_m3 as soon as it's ready without holding
@@ -507,6 +707,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
goto err_put_rproc;
}
+ wkup_m3_ipc_dbg_init(m3_ipc);
+
return 0;
err_put_rproc:
@@ -518,6 +720,8 @@ err_free_mbox:
static int wkup_m3_ipc_remove(struct platform_device *pdev)
{
+ wkup_m3_ipc_dbg_destroy(m3_ipc_state);
+
mbox_free_channel(m3_ipc_state->mbox);
rproc_shutdown(m3_ipc_state->rproc);