summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/imx/Kconfig1
-rw-r--r--drivers/gpu/imx/Makefile1
-rw-r--r--drivers/gpu/imx/dpu/Kconfig8
-rw-r--r--drivers/gpu/imx/dpu/Makefile7
-rw-r--r--drivers/gpu/imx/dpu/dpu-common.c1093
-rw-r--r--drivers/gpu/imx/dpu/dpu-constframe.c240
-rw-r--r--drivers/gpu/imx/dpu/dpu-disengcfg.c134
-rw-r--r--drivers/gpu/imx/dpu/dpu-extdst.c498
-rw-r--r--drivers/gpu/imx/dpu/dpu-fetchdecode.c596
-rw-r--r--drivers/gpu/imx/dpu/dpu-fetcheco.c403
-rw-r--r--drivers/gpu/imx/dpu/dpu-fetchlayer.c289
-rw-r--r--drivers/gpu/imx/dpu/dpu-fetchunit.c213
-rw-r--r--drivers/gpu/imx/dpu/dpu-fetchwarp.c301
-rw-r--r--drivers/gpu/imx/dpu/dpu-framegen.c487
-rw-r--r--drivers/gpu/imx/dpu/dpu-hscaler.c386
-rw-r--r--drivers/gpu/imx/dpu/dpu-layerblend.c328
-rw-r--r--drivers/gpu/imx/dpu/dpu-prv.h420
-rw-r--r--drivers/gpu/imx/dpu/dpu-sc-misc.c76
-rw-r--r--drivers/gpu/imx/dpu/dpu-tcon.c240
-rw-r--r--drivers/gpu/imx/dpu/dpu-vscaler.c438
-rw-r--r--include/video/dpu.h644
21 files changed, 6803 insertions, 0 deletions
diff --git a/drivers/gpu/imx/Kconfig b/drivers/gpu/imx/Kconfig
index 57277de697dd..e170d3677f61 100644
--- a/drivers/gpu/imx/Kconfig
+++ b/drivers/gpu/imx/Kconfig
@@ -1 +1,2 @@
source "drivers/gpu/imx/ipu-v3/Kconfig"
+source "drivers/gpu/imx/dpu/Kconfig"
diff --git a/drivers/gpu/imx/Makefile b/drivers/gpu/imx/Makefile
index c3cb11425cf5..3ac4d4b25035 100644
--- a/drivers/gpu/imx/Makefile
+++ b/drivers/gpu/imx/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
+obj-$(CONFIG_IMX_DPU_CORE) += dpu/
diff --git a/drivers/gpu/imx/dpu/Kconfig b/drivers/gpu/imx/dpu/Kconfig
new file mode 100644
index 000000000000..d62891118907
--- /dev/null
+++ b/drivers/gpu/imx/dpu/Kconfig
@@ -0,0 +1,8 @@
+config IMX_DPU_CORE
+ tristate "i.MX DPU core support"
+ depends on ARCH_MXC
+ select GENERIC_IRQ_CHIP
+ help
+ Choose this if you have a Freescale i.MX8QM or i.MX8QXP system and
+ want to use the Display Processing Unit. This option only enables
+ DPU base support.
diff --git a/drivers/gpu/imx/dpu/Makefile b/drivers/gpu/imx/dpu/Makefile
new file mode 100644
index 000000000000..fda0149d17fc
--- /dev/null
+++ b/drivers/gpu/imx/dpu/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_IMX_DPU_CORE) += imx-dpu-core.o
+
+imx-dpu-core-objs := dpu-common.o dpu-constframe.o dpu-disengcfg.o \
+ dpu-extdst.o dpu-fetchdecode.o dpu-fetcheco.o \
+ dpu-fetchlayer.o dpu-fetchwarp.o dpu-fetchunit.o \
+ dpu-framegen.o dpu-hscaler.o dpu-layerblend.o \
+ dpu-sc-misc.o dpu-tcon.o dpu-vscaler.o
diff --git a/drivers/gpu/imx/dpu/dpu-common.c b/drivers/gpu/imx/dpu/dpu-common.c
new file mode 100644
index 000000000000..02fbce24c31a
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-common.c
@@ -0,0 +1,1093 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/clk.h>
+#include <linux/fb.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+static bool display_plane_video_proc = true;
+module_param(display_plane_video_proc, bool, 0444);
+MODULE_PARM_DESC(display_plane_video_proc,
+ "Enable video processing for display [default=true]");
+
+#define DPU_CM_REG_DEFINE1(name1, name2) \
+static inline u32 name1(const struct cm_reg_ofs *ofs) \
+{ \
+ return ofs->name2; \
+}
+
+#define DPU_CM_REG_DEFINE2(name1, name2) \
+static inline u32 name1(const struct cm_reg_ofs *ofs, \
+ unsigned int n) \
+{ \
+ return ofs->name2 + (4 * n); \
+}
+
+DPU_CM_REG_DEFINE1(LOCKUNLOCK, lockunlock);
+DPU_CM_REG_DEFINE1(LOCKSTATUS, lockstatus);
+DPU_CM_REG_DEFINE2(USERINTERRUPTMASK, userinterruptmask);
+DPU_CM_REG_DEFINE2(INTERRUPTENABLE, interruptenable);
+DPU_CM_REG_DEFINE2(INTERRUPTPRESET, interruptpreset);
+DPU_CM_REG_DEFINE2(INTERRUPTCLEAR, interruptclear);
+DPU_CM_REG_DEFINE2(INTERRUPTSTATUS, interruptstatus);
+DPU_CM_REG_DEFINE2(USERINTERRUPTENABLE, userinterruptenable);
+DPU_CM_REG_DEFINE2(USERINTERRUPTPRESET, userinterruptpreset);
+DPU_CM_REG_DEFINE2(USERINTERRUPTCLEAR, userinterruptclear);
+DPU_CM_REG_DEFINE2(USERINTERRUPTSTATUS, userinterruptstatus);
+DPU_CM_REG_DEFINE1(GENERALPURPOSE, generalpurpose);
+
+static inline u32 dpu_cm_read(struct dpu_soc *dpu, unsigned int offset)
+{
+ return readl(dpu->cm_reg + offset);
+}
+
+static inline void dpu_cm_write(struct dpu_soc *dpu,
+ unsigned int offset, u32 value)
+{
+ writel(value, dpu->cm_reg + offset);
+}
+
+/* Constant Frame Unit */
+static const unsigned long cf_ofss[] = {0x4400, 0x5400, 0x4c00, 0x5c00};
+static const unsigned long cf_pec_ofss[] = {0x960, 0x9e0, 0x9a0, 0xa20};
+
+/* Display Engine Configuration Unit */
+static const unsigned long dec_ofss[] = {0xb400, 0xb420};
+
+/* External Destination Unit */
+static const unsigned long ed_ofss[] = {0x4800, 0x5800, 0x5000, 0x6000};
+static const unsigned long ed_pec_ofss[] = {0x980, 0xa00, 0x9c0, 0xa40};
+
+/* Fetch Decode Unit */
+static const unsigned long fd_ofss[] = {0x6c00, 0x7800};
+static const unsigned long fd_pec_ofss[] = {0xa80, 0xaa0};
+
+/* Fetch ECO Unit */
+static const unsigned long fe_ofss[] = {0x7400, 0x8000, 0x6800, 0x1c00};
+static const unsigned long fe_pec_ofss[] = {0xa90, 0xab0, 0xa70, 0x850};
+
+/* Frame Generator Unit */
+static const unsigned long fg_ofss[] = {0xb800, 0xd400};
+
+/* Fetch Layer Unit */
+static const unsigned long fl_ofss[] = {0x8400};
+static const unsigned long fl_pec_ofss[] = {0xac0};
+
+/* Fetch Warp Unit */
+static const unsigned long fw_ofss[] = {0x6400};
+static const unsigned long fw_pec_ofss[] = {0xa60};
+
+/* Horizontal Scaler Unit */
+static const unsigned long hs_ofss[] = {0x9000, 0x9c00, 0x3000};
+static const unsigned long hs_pec_ofss[] = {0xb00, 0xb60, 0x8c0};
+
+/* Layer Blend Unit */
+static const unsigned long lb_ofss[] = {0xa400, 0xa800, 0xac00, 0xb000};
+static const unsigned long lb_pec_ofss[] = {0xba0, 0xbc0, 0xbe0, 0xc00};
+
+/* Timing Controller Unit */
+static const unsigned long tcon_ofss[] = {0xcc00, 0xe800};
+
+/* Vertical Scaler Unit */
+static const unsigned long vs_ofss[] = {0x9400, 0xa000, 0x3400};
+static const unsigned long vs_pec_ofss[] = {0xb20, 0xb80, 0x8e0};
+
+static const struct dpu_unit _cfs = {
+ .name = "ConstFrame",
+ .num = ARRAY_SIZE(cf_ids),
+ .ids = cf_ids,
+ .pec_ofss = cf_pec_ofss,
+ .ofss = cf_ofss,
+};
+
+static const struct dpu_unit _decs = {
+ .name = "DisEngCfg",
+ .num = ARRAY_SIZE(dec_ids),
+ .ids = dec_ids,
+ .pec_ofss = NULL,
+ .ofss = dec_ofss,
+};
+
+static const struct dpu_unit _eds = {
+ .name = "ExtDst",
+ .num = ARRAY_SIZE(ed_ids),
+ .ids = ed_ids,
+ .pec_ofss = ed_pec_ofss,
+ .ofss = ed_ofss,
+};
+
+static const struct dpu_unit _fds = {
+ .name = "FetchDecode",
+ .num = ARRAY_SIZE(fd_ids),
+ .ids = fd_ids,
+ .pec_ofss = fd_pec_ofss,
+ .ofss = fd_ofss,
+};
+
+static const struct dpu_unit _fes = {
+ .name = "FetchECO",
+ .num = ARRAY_SIZE(fe_ids),
+ .ids = fe_ids,
+ .pec_ofss = fe_pec_ofss,
+ .ofss = fe_ofss,
+};
+
+static const struct dpu_unit _fgs = {
+ .name = "FrameGen",
+ .num = ARRAY_SIZE(fg_ids),
+ .ids = fg_ids,
+ .pec_ofss = NULL,
+ .ofss = fg_ofss,
+};
+
+static const struct dpu_unit _fls = {
+ .name = "FetchLayer",
+ .num = ARRAY_SIZE(fl_ids),
+ .ids = fl_ids,
+ .pec_ofss = fl_pec_ofss,
+ .ofss = fl_ofss,
+};
+
+static const struct dpu_unit _fws = {
+ .name = "FetchWarp",
+ .num = ARRAY_SIZE(fw_ids),
+ .ids = fw_ids,
+ .pec_ofss = fw_pec_ofss,
+ .ofss = fw_ofss,
+};
+
+static const struct dpu_unit _hss = {
+ .name = "HScaler",
+ .num = ARRAY_SIZE(hs_ids),
+ .ids = hs_ids,
+ .pec_ofss = hs_pec_ofss,
+ .ofss = hs_ofss,
+};
+
+static const struct dpu_unit _lbs = {
+ .name = "LayerBlend",
+ .num = ARRAY_SIZE(lb_ids),
+ .ids = lb_ids,
+ .pec_ofss = lb_pec_ofss,
+ .ofss = lb_ofss,
+};
+
+static const struct dpu_unit _tcons = {
+ .name = "TCon",
+ .num = ARRAY_SIZE(tcon_ids),
+ .ids = tcon_ids,
+ .pec_ofss = NULL,
+ .ofss = tcon_ofss,
+};
+
+static const struct dpu_unit _vss = {
+ .name = "VScaler",
+ .num = ARRAY_SIZE(vs_ids),
+ .ids = vs_ids,
+ .pec_ofss = vs_pec_ofss,
+ .ofss = vs_ofss,
+};
+
+static const struct cm_reg_ofs _cm_reg_ofs = {
+ .ipidentifier = 0,
+ .lockunlock = 0x40,
+ .lockstatus = 0x44,
+ .userinterruptmask = 0x48,
+ .interruptenable = 0x50,
+ .interruptpreset = 0x58,
+ .interruptclear = 0x60,
+ .interruptstatus = 0x68,
+ .userinterruptenable = 0x80,
+ .userinterruptpreset = 0x88,
+ .userinterruptclear = 0x90,
+ .userinterruptstatus = 0x98,
+ .generalpurpose = 0x100,
+};
+
+static const unsigned long unused_irq[] = {0x00000000, 0xfffe0008};
+
+static const struct dpu_data dpu_data = {
+ .cm_ofs = 0x0,
+ .cfs = &_cfs,
+ .decs = &_decs,
+ .eds = &_eds,
+ .fds = &_fds,
+ .fes = &_fes,
+ .fgs = &_fgs,
+ .fls = &_fls,
+ .fws = &_fws,
+ .hss = &_hss,
+ .lbs = &_lbs,
+ .tcons = &_tcons,
+ .vss = &_vss,
+ .cm_reg_ofs = &_cm_reg_ofs,
+ .unused_irq = unused_irq,
+ .plane_src_mask = DPU_PLANE_SRC_FL0_ID | DPU_PLANE_SRC_FW2_ID |
+ DPU_PLANE_SRC_FD0_ID | DPU_PLANE_SRC_FD1_ID,
+};
+
+static const struct of_device_id dpu_dt_ids[] = {
+ {
+ .compatible = "fsl,imx8qxp-dpu",
+ .data = &dpu_data,
+ }, {
+ .compatible = "fsl,imx8qm-dpu",
+ .data = &dpu_data,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_ids);
+
+bool dpu_vproc_has_fetcheco_cap(u32 cap_mask)
+{
+ return !!(cap_mask & DPU_VPROC_CAP_FETCHECO);
+}
+EXPORT_SYMBOL_GPL(dpu_vproc_has_fetcheco_cap);
+
+bool dpu_vproc_has_hscale_cap(u32 cap_mask)
+{
+ return !!(cap_mask & DPU_VPROC_CAP_HSCALE);
+}
+EXPORT_SYMBOL_GPL(dpu_vproc_has_hscale_cap);
+
+bool dpu_vproc_has_vscale_cap(u32 cap_mask)
+{
+ return !!(cap_mask & DPU_VPROC_CAP_VSCALE);
+}
+EXPORT_SYMBOL_GPL(dpu_vproc_has_vscale_cap);
+
+u32 dpu_vproc_get_fetcheco_cap(u32 cap_mask)
+{
+ return cap_mask & DPU_VPROC_CAP_FETCHECO;
+}
+EXPORT_SYMBOL_GPL(dpu_vproc_get_fetcheco_cap);
+
+u32 dpu_vproc_get_hscale_cap(u32 cap_mask)
+{
+ return cap_mask & DPU_VPROC_CAP_HSCALE;
+}
+EXPORT_SYMBOL_GPL(dpu_vproc_get_hscale_cap);
+
+u32 dpu_vproc_get_vscale_cap(u32 cap_mask)
+{
+ return cap_mask & DPU_VPROC_CAP_VSCALE;
+}
+EXPORT_SYMBOL_GPL(dpu_vproc_get_vscale_cap);
+
+int dpu_format_horz_chroma_subsampling(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+int dpu_format_vert_chroma_subsampling(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+int dpu_format_num_planes(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+int dpu_format_plane_width(int width, u32 format, int plane)
+{
+ if (plane >= dpu_format_num_planes(format))
+ return 0;
+
+ if (plane == 0)
+ return width;
+
+ return width / dpu_format_horz_chroma_subsampling(format);
+}
+
+int dpu_format_plane_height(int height, u32 format, int plane)
+{
+ if (plane >= dpu_format_num_planes(format))
+ return 0;
+
+ if (plane == 0)
+ return height;
+
+ return height / dpu_format_vert_chroma_subsampling(format);
+}
+
+static void dpu_detach_pm_domains(struct dpu_soc *dpu)
+{
+ if (dpu->pd_pll1_link && !IS_ERR(dpu->pd_pll1_link))
+ device_link_del(dpu->pd_pll1_link);
+ if (dpu->pd_pll1_dev && !IS_ERR(dpu->pd_pll1_dev))
+ dev_pm_domain_detach(dpu->pd_pll1_dev, true);
+
+ if (dpu->pd_pll0_link && !IS_ERR(dpu->pd_pll0_link))
+ device_link_del(dpu->pd_pll0_link);
+ if (dpu->pd_pll0_dev && !IS_ERR(dpu->pd_pll0_dev))
+ dev_pm_domain_detach(dpu->pd_pll0_dev, true);
+
+ if (dpu->pd_dc_link && !IS_ERR(dpu->pd_dc_link))
+ device_link_del(dpu->pd_dc_link);
+ if (dpu->pd_dc_dev && !IS_ERR(dpu->pd_dc_dev))
+ dev_pm_domain_detach(dpu->pd_dc_dev, true);
+
+ dpu->pd_dc_dev = NULL;
+ dpu->pd_dc_link = NULL;
+ dpu->pd_pll0_dev = NULL;
+ dpu->pd_pll0_link = NULL;
+ dpu->pd_pll1_dev = NULL;
+ dpu->pd_pll1_link = NULL;
+}
+
+static int dpu_attach_pm_domains(struct dpu_soc *dpu)
+{
+ struct device *dev = dpu->dev;
+ u32 flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE;
+ int ret = 0;
+
+ dpu->pd_dc_dev = dev_pm_domain_attach_by_name(dev, "dc");
+ if (IS_ERR(dpu->pd_dc_dev)) {
+ ret = PTR_ERR(dpu->pd_dc_dev);
+ dev_err(dev, "Failed to attach dc pd dev: %d\n", ret);
+ goto fail;
+ }
+ dpu->pd_dc_link = device_link_add(dev, dpu->pd_dc_dev, flags);
+ if (IS_ERR(dpu->pd_dc_link)) {
+ ret = PTR_ERR(dpu->pd_dc_link);
+ dev_err(dev, "Failed to add device link to dc pd dev: %d\n",
+ ret);
+ goto fail;
+ }
+
+ dpu->pd_pll0_dev = dev_pm_domain_attach_by_name(dev, "pll0");
+ if (IS_ERR(dpu->pd_pll0_dev)) {
+ ret = PTR_ERR(dpu->pd_pll0_dev);
+ dev_err(dev, "Failed to attach pll0 pd dev: %d\n", ret);
+ goto fail;
+ }
+ dpu->pd_pll0_link = device_link_add(dev, dpu->pd_pll0_dev, flags);
+ if (IS_ERR(dpu->pd_pll0_link)) {
+ ret = PTR_ERR(dpu->pd_pll0_link);
+ dev_err(dev, "Failed to add device link to pll0 pd dev: %d\n",
+ ret);
+ goto fail;
+ }
+
+ dpu->pd_pll1_dev = dev_pm_domain_attach_by_name(dev, "pll1");
+ if (IS_ERR(dpu->pd_pll1_dev)) {
+ ret = PTR_ERR(dpu->pd_pll1_dev);
+ dev_err(dev, "Failed to attach pll0 pd dev: %d\n", ret);
+ goto fail;
+ }
+ dpu->pd_pll1_link = device_link_add(dev, dpu->pd_pll1_dev, flags);
+ if (IS_ERR(dpu->pd_pll1_link)) {
+ ret = PTR_ERR(dpu->pd_pll1_link);
+ dev_err(dev, "Failed to add device link to pll1 pd dev: %d\n",
+ ret);
+ goto fail;
+ }
+fail:
+ dpu_detach_pm_domains(dpu);
+ return ret;
+}
+
+#define DPU_UNITS_ADDR_DBG(unit) \
+{ \
+ const struct dpu_unit *us = data->unit##s; \
+ int i; \
+ for (i = 0; i < us->num; i++) { \
+ if (us->pec_ofss) { \
+ dev_dbg(&pdev->dev, "%s%d: pixengcfg @ 0x%08lx,"\
+ " unit @ 0x%08lx\n", us->name, \
+ us->ids[i], \
+ dpu_base + us->pec_ofss[i], \
+ dpu_base + us->ofss[i]); \
+ } else { \
+ dev_dbg(&pdev->dev, \
+ "%s%d: unit @ 0x%08lx\n", us->name, \
+ us->ids[i], dpu_base + us->ofss[i]); \
+ } \
+ } \
+}
+
+static void dpu_units_addr_dbg(struct dpu_soc *dpu,
+ struct platform_device *pdev, unsigned long dpu_base)
+{
+ const struct dpu_data *data = dpu->data;
+
+ dev_dbg(dpu->dev, "Common: 0x%08lx\n", dpu_base + data->cm_ofs);
+ DPU_UNITS_ADDR_DBG(cf);
+ DPU_UNITS_ADDR_DBG(dec);
+ DPU_UNITS_ADDR_DBG(ed);
+ DPU_UNITS_ADDR_DBG(fd);
+ DPU_UNITS_ADDR_DBG(fe);
+ DPU_UNITS_ADDR_DBG(fg);
+ DPU_UNITS_ADDR_DBG(fl);
+ DPU_UNITS_ADDR_DBG(fw);
+ DPU_UNITS_ADDR_DBG(hs);
+ DPU_UNITS_ADDR_DBG(lb);
+ DPU_UNITS_ADDR_DBG(tcon);
+ DPU_UNITS_ADDR_DBG(vs);
+}
+
+static int dpu_get_irq(struct platform_device *pdev, struct dpu_soc *dpu)
+{
+#define DPU_GET_IRQ(name) \
+{ \
+ dpu->irq_##name = platform_get_irq_byname(pdev, "" #name ""); \
+ dev_dbg(dpu->dev, "irq_" #name ": %d\n", dpu->irq_##name); \
+ if (dpu->irq_##name < 0) { \
+ dev_err(dpu->dev, "failed to get irq " #name "\n"); \
+ return dpu->irq_##name; \
+ } \
+}
+
+ DPU_GET_IRQ(extdst0_shdload);
+ DPU_GET_IRQ(extdst4_shdload);
+ DPU_GET_IRQ(extdst1_shdload);
+ DPU_GET_IRQ(extdst5_shdload);
+ DPU_GET_IRQ(disengcfg_shdload0);
+ DPU_GET_IRQ(disengcfg_framecomplete0);
+ DPU_GET_IRQ(disengcfg_shdload1);
+ DPU_GET_IRQ(disengcfg_framecomplete1);
+
+ return 0;
+}
+
+static void dpu_irq_handle(struct irq_desc *desc, enum dpu_irq irq)
+{
+ struct dpu_soc *dpu = irq_desc_get_handler_data(desc);
+ const struct dpu_data *data = dpu->data;
+ const struct cm_reg_ofs *ofs = data->cm_reg_ofs;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int virq;
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+
+ status = dpu_cm_read(dpu, USERINTERRUPTSTATUS(ofs, irq / 32));
+ status &= dpu_cm_read(dpu, USERINTERRUPTENABLE(ofs, irq / 32));
+
+ if (status & BIT(irq % 32)) {
+ virq = irq_linear_revmap(dpu->domain, irq);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+#define DPU_IRQ_HANDLER_DEFINE(name1, name2) \
+static void dpu_##name1##_irq_handler(struct irq_desc *desc) \
+{ \
+ dpu_irq_handle(desc, IRQ_##name2); \
+}
+
+DPU_IRQ_HANDLER_DEFINE(extdst0_shdload, EXTDST0_SHDLOAD)
+DPU_IRQ_HANDLER_DEFINE(extdst4_shdload, EXTDST4_SHDLOAD)
+DPU_IRQ_HANDLER_DEFINE(extdst1_shdload, EXTDST1_SHDLOAD)
+DPU_IRQ_HANDLER_DEFINE(extdst5_shdload, EXTDST5_SHDLOAD)
+DPU_IRQ_HANDLER_DEFINE(disengcfg_shdload0, DISENGCFG_SHDLOAD0)
+DPU_IRQ_HANDLER_DEFINE(disengcfg_framecomplete0, DISENGCFG_FRAMECOMPLETE0)
+DPU_IRQ_HANDLER_DEFINE(disengcfg_shdload1, DISENGCFG_SHDLOAD1)
+DPU_IRQ_HANDLER_DEFINE(disengcfg_framecomplete1, DISENGCFG_FRAMECOMPLETE1)
+
+int dpu_map_irq(struct dpu_soc *dpu, int irq)
+{
+ int virq = irq_linear_revmap(dpu->domain, irq);
+
+ if (!virq)
+ virq = irq_create_mapping(dpu->domain, irq);
+
+ return virq;
+}
+EXPORT_SYMBOL_GPL(dpu_map_irq);
+
+static int dpu_irq_init(struct dpu_soc *dpu)
+{
+ const struct dpu_data *data = dpu->data;
+ const struct cm_reg_ofs *ofs = data->cm_reg_ofs;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ int ret, i;
+
+ dpu->domain = irq_domain_add_linear(dpu->dev->of_node,
+ dpu->irq_line_num,
+ &irq_generic_chip_ops, dpu);
+ if (!dpu->domain) {
+ dev_err(dpu->dev, "failed to add irq domain\n");
+ return -ENODEV;
+ }
+
+ ret = irq_alloc_domain_generic_chips(dpu->domain, 32, 1, "DPU",
+ handle_level_irq, 0, 0, 0);
+ if (ret < 0) {
+ dev_err(dpu->dev, "failed to alloc generic irq chips\n");
+ irq_domain_remove(dpu->domain);
+ return ret;
+ }
+
+ for (i = 0; i < dpu->irq_line_num; i += 32) {
+ /* Mask and clear all interrupts */
+ dpu_cm_write(dpu, USERINTERRUPTENABLE(ofs, i / 32), 0);
+ dpu_cm_write(dpu, USERINTERRUPTCLEAR(ofs, i / 32),
+ ~data->unused_irq[i / 32]);
+ dpu_cm_write(dpu, INTERRUPTENABLE(ofs, i / 32), 0);
+ dpu_cm_write(dpu, INTERRUPTCLEAR(ofs, i / 32),
+ ~data->unused_irq[i / 32]);
+
+ /* Set all interrupts to user mode */
+ dpu_cm_write(dpu, USERINTERRUPTMASK(ofs, i / 32),
+ ~data->unused_irq[i / 32]);
+
+ gc = irq_get_domain_generic_chip(dpu->domain, i);
+ gc->reg_base = dpu->cm_reg;
+ gc->unused = data->unused_irq[i / 32];
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.ack = USERINTERRUPTCLEAR(ofs, i / 32);
+ ct->regs.mask = USERINTERRUPTENABLE(ofs, i / 32);
+ }
+
+#define DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(name) \
+irq_set_chained_handler_and_data(dpu->irq_##name, dpu_##name##_irq_handler, dpu)
+
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(extdst0_shdload);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(extdst4_shdload);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(extdst1_shdload);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(extdst5_shdload);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(disengcfg_shdload0);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(disengcfg_framecomplete0);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(disengcfg_shdload1);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(disengcfg_framecomplete1);
+
+ return 0;
+}
+
+static void dpu_irq_exit(struct dpu_soc *dpu)
+{
+ unsigned int i, irq;
+
+#define DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(name) \
+irq_set_chained_handler_and_data(dpu->irq_##name, NULL, NULL)
+
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(extdst0_shdload);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(extdst4_shdload);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(extdst1_shdload);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(extdst5_shdload);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(disengcfg_shdload0);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(disengcfg_framecomplete0);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(disengcfg_shdload1);
+ DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(disengcfg_framecomplete1);
+
+ for (i = 0; i < dpu->irq_line_num; i++) {
+ irq = irq_linear_revmap(dpu->domain, i);
+ if (irq)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(dpu->domain);
+}
+
+#define _DPU_UNITS_INIT(unit) \
+{ \
+ const struct dpu_unit *us = data->unit##s; \
+ int i; \
+ \
+ /* software check */ \
+ if (WARN_ON(us->num > ARRAY_SIZE(unit##_ids))) \
+ return -EINVAL; \
+ \
+ for (i = 0; i < us->num; i++) \
+ _dpu_##unit##_init(dpu, us->ids[i]); \
+}
+
+static int
+_dpu_submodules_init(struct dpu_soc *dpu, struct platform_device *pdev)
+{
+ const struct dpu_data *data = dpu->data;
+
+ _DPU_UNITS_INIT(cf);
+ _DPU_UNITS_INIT(dec);
+ _DPU_UNITS_INIT(ed);
+ _DPU_UNITS_INIT(fd);
+ _DPU_UNITS_INIT(fe);
+ _DPU_UNITS_INIT(fg);
+ _DPU_UNITS_INIT(fl);
+ _DPU_UNITS_INIT(fw);
+ _DPU_UNITS_INIT(hs);
+ _DPU_UNITS_INIT(lb);
+ _DPU_UNITS_INIT(tcon);
+ _DPU_UNITS_INIT(vs);
+
+ return 0;
+}
+
+#define DPU_UNIT_INIT(dpu, base, unit, name, id, pec_ofs, ofs) \
+{ \
+ int ret; \
+ ret = dpu_##unit##_init((dpu), (id), \
+ (pec_ofs) ? (base) + (pec_ofs) : 0, \
+ (base) + (ofs)); \
+ if (ret) { \
+ dev_err((dpu)->dev, "init %s%d failed with %d\n", \
+ (name), (id), ret); \
+ return ret; \
+ } \
+}
+
+#define DPU_UNITS_INIT(unit) \
+{ \
+ const struct dpu_unit *us = data->unit##s; \
+ int i; \
+ \
+ /* software check */ \
+ if (WARN_ON(us->num > ARRAY_SIZE(unit##_ids))) \
+ return -EINVAL; \
+ \
+ for (i = 0; i < us->num; i++) \
+ DPU_UNIT_INIT(dpu, dpu_base, unit, us->name, \
+ us->ids[i], \
+ us->pec_ofss ? us->pec_ofss[i] : 0, \
+ us->ofss[i]); \
+}
+
+static int dpu_submodules_init(struct dpu_soc *dpu,
+ struct platform_device *pdev, unsigned long dpu_base)
+{
+ const struct dpu_data *data = dpu->data;
+
+ DPU_UNITS_INIT(cf);
+ DPU_UNITS_INIT(dec);
+ DPU_UNITS_INIT(ed);
+ DPU_UNITS_INIT(fd);
+ DPU_UNITS_INIT(fe);
+ DPU_UNITS_INIT(fg);
+ DPU_UNITS_INIT(fl);
+ DPU_UNITS_INIT(fw);
+ DPU_UNITS_INIT(hs);
+ DPU_UNITS_INIT(lb);
+ DPU_UNITS_INIT(tcon);
+ DPU_UNITS_INIT(vs);
+
+ return 0;
+}
+
+static int platform_remove_devices_fn(struct device *dev, void *unused)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static void platform_device_unregister_children(struct platform_device *pdev)
+{
+ device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
+}
+
+struct dpu_platform_reg {
+ struct dpu_client_platformdata pdata;
+ const char *name;
+};
+
+static struct dpu_platform_reg client_reg[] = {
+ {
+ .pdata = {
+ .stream_id = 0,
+ },
+ .name = "imx-dpu-crtc",
+ }, {
+ .pdata = {
+ .stream_id = 1,
+ },
+ .name = "imx-dpu-crtc",
+ }
+};
+
+static DEFINE_MUTEX(dpu_client_id_mutex);
+static int dpu_client_id;
+
+static int dpu_get_plane_resource(struct dpu_soc *dpu,
+ struct dpu_plane_res *res)
+{
+ const struct dpu_unit *fds = dpu->data->fds;
+ const struct dpu_unit *fls = dpu->data->fls;
+ const struct dpu_unit *fws = dpu->data->fws;
+ const struct dpu_unit *lbs = dpu->data->lbs;
+ struct dpu_plane_grp *grp = plane_res_to_grp(res);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res->cf); i++) {
+ res->cf[i] = dpu_cf_get(dpu, i);
+ if (IS_ERR(res->cf[i]))
+ return PTR_ERR(res->cf[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(res->ed); i++) {
+ res->ed[i] = dpu_ed_get(dpu, i);
+ if (IS_ERR(res->ed[i]))
+ return PTR_ERR(res->ed[i]);
+ }
+ for (i = 0; i < fds->num; i++) {
+ res->fd[i] = dpu_fd_get(dpu, i);
+ if (IS_ERR(res->fd[i]))
+ return PTR_ERR(res->fd[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(res->fe); i++) {
+ res->fe[i] = dpu_fe_get(dpu, i);
+ if (IS_ERR(res->fe[i]))
+ return PTR_ERR(res->fe[i]);
+ grp->hw_plane_fetcheco_num = ARRAY_SIZE(res->fe);
+ }
+ for (i = 0; i < fls->num; i++) {
+ res->fl[i] = dpu_fl_get(dpu, i);
+ if (IS_ERR(res->fl[i]))
+ return PTR_ERR(res->fl[i]);
+ }
+ for (i = 0; i < fws->num; i++) {
+ res->fw[i] = dpu_fw_get(dpu, fw_ids[i]);
+ if (IS_ERR(res->fw[i]))
+ return PTR_ERR(res->fw[i]);
+ }
+ /* HScaler could be shared with capture. */
+ if (display_plane_video_proc) {
+ for (i = 0; i < ARRAY_SIZE(res->hs); i++) {
+ res->hs[i] = dpu_hs_get(dpu, hs_ids[i]);
+ if (IS_ERR(res->hs[i]))
+ return PTR_ERR(res->hs[i]);
+ }
+ grp->hw_plane_hscaler_num = ARRAY_SIZE(res->hs);
+ }
+ for (i = 0; i < lbs->num; i++) {
+ res->lb[i] = dpu_lb_get(dpu, i);
+ if (IS_ERR(res->lb[i]))
+ return PTR_ERR(res->lb[i]);
+ }
+ /* VScaler could be shared with capture. */
+ if (display_plane_video_proc) {
+ for (i = 0; i < ARRAY_SIZE(res->vs); i++) {
+ res->vs[i] = dpu_vs_get(dpu, vs_ids[i]);
+ if (IS_ERR(res->vs[i]))
+ return PTR_ERR(res->vs[i]);
+ }
+ grp->hw_plane_vscaler_num = ARRAY_SIZE(res->vs);
+ }
+
+ grp->hw_plane_num = fds->num + fls->num + fws->num;
+
+ return 0;
+}
+
+static void dpu_put_plane_resource(struct dpu_plane_res *res)
+{
+ struct dpu_plane_grp *grp = plane_res_to_grp(res);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res->cf); i++) {
+ if (!IS_ERR_OR_NULL(res->cf[i]))
+ dpu_cf_put(res->cf[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(res->ed); i++) {
+ if (!IS_ERR_OR_NULL(res->ed[i]))
+ dpu_ed_put(res->ed[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(res->fd); i++) {
+ if (!IS_ERR_OR_NULL(res->fd[i]))
+ dpu_fd_put(res->fd[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(res->fe); i++) {
+ if (!IS_ERR_OR_NULL(res->fe[i]))
+ dpu_fe_put(res->fe[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(res->fl); i++) {
+ if (!IS_ERR_OR_NULL(res->fl[i]))
+ dpu_fl_put(res->fl[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(res->fw); i++) {
+ if (!IS_ERR_OR_NULL(res->fw[i]))
+ dpu_fw_put(res->fw[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(res->hs); i++) {
+ if (!IS_ERR_OR_NULL(res->hs[i]))
+ dpu_hs_put(res->hs[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(res->lb); i++) {
+ if (!IS_ERR_OR_NULL(res->lb[i]))
+ dpu_lb_put(res->lb[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(res->vs); i++) {
+ if (!IS_ERR_OR_NULL(res->vs[i]))
+ dpu_vs_put(res->vs[i]);
+ }
+
+ grp->hw_plane_num = 0;
+}
+
+static int dpu_add_client_devices(struct dpu_soc *dpu)
+{
+ const struct dpu_data *data = dpu->data;
+ struct device *dev = dpu->dev;
+ struct dpu_platform_reg *reg;
+ struct dpu_plane_grp *plane_grp;
+ size_t client_num, reg_size;
+ int i, id, ret;
+
+ client_num = ARRAY_SIZE(client_reg);
+
+ reg = devm_kcalloc(dev, client_num, sizeof(*reg), GFP_KERNEL);
+ if (!reg)
+ return -ENODEV;
+
+ plane_grp = devm_kzalloc(dev, sizeof(*plane_grp), GFP_KERNEL);
+ if (!plane_grp)
+ return -ENODEV;
+
+ mutex_init(&plane_grp->mutex);
+
+ mutex_lock(&dpu_client_id_mutex);
+ id = dpu_client_id;
+ dpu_client_id += client_num;
+ mutex_unlock(&dpu_client_id_mutex);
+
+ reg_size = client_num * sizeof(struct dpu_platform_reg);
+ memcpy(reg, &client_reg[0], reg_size);
+
+ plane_grp->src_mask = data->plane_src_mask;
+ plane_grp->id = id / client_num;
+ plane_grp->has_vproc = display_plane_video_proc;
+
+ ret = dpu_get_plane_resource(dpu, &plane_grp->res);
+ if (ret)
+ goto err_get_plane_res;
+
+ for (i = 0; i < client_num; i++) {
+ struct platform_device *pdev;
+ struct device_node *of_node = NULL;
+
+ /* Associate subdevice with the corresponding port node. */
+ of_node = of_graph_get_port_by_id(dev->of_node, i);
+ if (!of_node) {
+ dev_info(dev,
+ "no port@%d node in %s, not using DISP%d\n",
+ i, dev->of_node->full_name, i);
+ continue;
+ }
+
+ reg[i].pdata.plane_grp = plane_grp;
+
+ pdev = platform_device_alloc(reg[i].name, id++);
+ if (!pdev) {
+ ret = -ENOMEM;
+ goto err_register;
+ }
+
+ pdev->dev.parent = dev;
+
+ reg[i].pdata.of_node = of_node;
+ ret = platform_device_add_data(pdev, &reg[i].pdata,
+ sizeof(reg[i].pdata));
+ if (!ret)
+ ret = platform_device_add(pdev);
+ if (ret) {
+ platform_device_put(pdev);
+ goto err_register;
+ }
+ }
+
+ return 0;
+
+err_register:
+ platform_device_unregister_children(to_platform_device(dev));
+err_get_plane_res:
+ dpu_put_plane_resource(&plane_grp->res);
+
+ return ret;
+}
+
+static int dpu_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(dpu_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct dpu_soc *dpu;
+ struct resource *res;
+ unsigned long dpu_base;
+ const struct dpu_data *data = of_id->data;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ dpu_base = res->start;
+
+ dpu = devm_kzalloc(&pdev->dev, sizeof(*dpu), GFP_KERNEL);
+ if (!dpu)
+ return -ENODEV;
+
+ dpu->dev = &pdev->dev;
+ dpu->data = data;
+ dpu->id = of_alias_get_id(np, "dpu");
+ dpu->irq_line_num = of_irq_count(np);
+
+ dpu_units_addr_dbg(dpu, pdev, dpu_base);
+
+ ret = dpu_get_irq(pdev, dpu);
+ if (ret < 0)
+ return ret;
+
+ ret = dpu_sc_misc_init(dpu);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_init(&dpu->lock);
+
+ dpu->cm_reg = devm_ioremap(dpu->dev, dpu_base + data->cm_ofs, SZ_1K);
+ if (!dpu->cm_reg)
+ return -ENOMEM;
+
+ ret = dpu_attach_pm_domains(dpu);
+ if (ret)
+ return ret;
+
+ ret = dpu_irq_init(dpu);
+ if (ret)
+ goto failed_irq;
+
+ ret = dpu_submodules_init(dpu, pdev, dpu_base);
+ if (ret)
+ goto failed_submodules_init;
+
+ ret = dpu_pxlink_init(dpu);
+ if (ret < 0) {
+ dev_err(dpu->dev,
+ "failed to initialize pixel link %d\n", ret);
+ goto failed_pxlink_init;
+ }
+
+ platform_set_drvdata(pdev, dpu);
+
+ ret = dpu_add_client_devices(dpu);
+ if (ret) {
+ dev_err(dpu->dev,
+ "adding client devices failed with %d\n", ret);
+ goto failed_add_clients;
+ }
+
+ dev_info(dpu->dev, "driver probed\n");
+
+ return 0;
+
+failed_add_clients:
+ platform_set_drvdata(pdev, NULL);
+failed_pxlink_init:
+failed_submodules_init:
+ dpu_irq_exit(dpu);
+failed_irq:
+ dpu_detach_pm_domains(dpu);
+ return ret;
+}
+
+static int dpu_remove(struct platform_device *pdev)
+{
+ struct dpu_soc *dpu = platform_get_drvdata(pdev);
+
+ platform_device_unregister_children(pdev);
+
+ dpu_irq_exit(dpu);
+ dpu_detach_pm_domains(dpu);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dpu_suspend(struct device *dev)
+{
+ /*
+ * The dpu core driver currently depends on the client drivers
+ * to do suspend operations to leave dpu a cleaned up state
+ * machine status before the system enters sleep mode.
+ */
+ return 0;
+}
+
+static int dpu_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_soc *dpu = platform_get_drvdata(pdev);
+
+ _dpu_submodules_init(dpu, pdev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops dpu_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(dpu_suspend, dpu_resume)
+};
+
+static struct platform_driver dpu_driver = {
+ .driver = {
+ .pm = &dpu_pm_ops,
+ .name = "dpu-core",
+ .of_match_table = dpu_dt_ids,
+ },
+ .probe = dpu_probe,
+ .remove = dpu_remove,
+};
+
+module_platform_driver(dpu_driver);
+
+MODULE_DESCRIPTION("i.MX DPU driver");
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/imx/dpu/dpu-constframe.c b/drivers/gpu/imx/dpu/dpu-constframe.c
new file mode 100644
index 000000000000..4255c3969238
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-constframe.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+static unsigned int safety_stream_cf_color = 0x0;
+module_param(safety_stream_cf_color, uint, 0444);
+MODULE_PARM_DESC(safety_stream_cf_color,
+"Safety stream constframe color in hex(0xRRGGBBAA) [default=0x00000000]");
+
+#define FRAMEDIMENSIONS 0xC
+#define WIDTH(w) (((w) - 1) & 0x3FFF)
+#define HEIGHT(h) ((((h) - 1) & 0x3FFF) << 16)
+#define CONSTANTCOLOR 0x10
+#define RED(r) (((r) & 0xFF) << 24)
+#define GREEN(g) (((g) & 0xFF) << 16)
+#define BLUE(b) (((b) & 0xFF) << 8)
+#define ALPHA(a) ((a) & 0xFF)
+#define CONTROLTRIGGER 0x14
+#define START 0x18
+#define STATUS 0x1C
+
+struct dpu_constframe {
+ void __iomem *pec_base;
+ void __iomem *base;
+ struct mutex mutex;
+ int id;
+ bool inuse;
+ struct dpu_soc *dpu;
+};
+
+static inline u32 dpu_cf_read(struct dpu_constframe *cf, unsigned int offset)
+{
+ return readl(cf->base + offset);
+}
+
+static inline void dpu_cf_write(struct dpu_constframe *cf,
+ unsigned int offset, u32 value)
+{
+ writel(value, cf->base + offset);
+}
+
+void constframe_shden(struct dpu_constframe *cf, bool enable)
+{
+ u32 val;
+
+ val = enable ? SHDEN : 0;
+
+ mutex_lock(&cf->mutex);
+ dpu_cf_write(cf, STATICCONTROL, val);
+ mutex_unlock(&cf->mutex);
+}
+EXPORT_SYMBOL_GPL(constframe_shden);
+
+void constframe_framedimensions(struct dpu_constframe *cf, unsigned int w,
+ unsigned int h)
+{
+ u32 val;
+
+ val = WIDTH(w) | HEIGHT(h);
+
+ mutex_lock(&cf->mutex);
+ dpu_cf_write(cf, FRAMEDIMENSIONS, val);
+ mutex_unlock(&cf->mutex);
+}
+EXPORT_SYMBOL_GPL(constframe_framedimensions);
+
+void constframe_framedimensions_copy_prim(struct dpu_constframe *cf)
+{
+ struct dpu_constframe *prim_cf = NULL;
+ unsigned int prim_id;
+ int i;
+ u32 val;
+
+ if (cf->id != 0 && cf->id != 1) {
+ dev_warn(cf->dpu->dev, "ConstFrame%d is not a secondary one\n",
+ cf->id);
+ return;
+ }
+
+ prim_id = cf->id + 4;
+
+ for (i = 0; i < ARRAY_SIZE(cf_ids); i++)
+ if (cf_ids[i] == prim_id)
+ prim_cf = cf->dpu->cf_priv[i];
+
+ if (!prim_cf) {
+ dev_warn(cf->dpu->dev, "cannot find ConstFrame%d's primary peer\n",
+ cf->id);
+ return;
+ }
+
+ mutex_lock(&cf->mutex);
+ val = dpu_cf_read(prim_cf, FRAMEDIMENSIONS);
+ dpu_cf_write(cf, FRAMEDIMENSIONS, val);
+ mutex_unlock(&cf->mutex);
+}
+EXPORT_SYMBOL_GPL(constframe_framedimensions_copy_prim);
+
+void constframe_constantcolor(struct dpu_constframe *cf, unsigned int r,
+ unsigned int g, unsigned int b, unsigned int a)
+{
+ u32 val;
+
+ val = RED(r) | GREEN(g) | BLUE(b) | ALPHA(a);
+
+ mutex_lock(&cf->mutex);
+ dpu_cf_write(cf, CONSTANTCOLOR, val);
+ mutex_unlock(&cf->mutex);
+}
+EXPORT_SYMBOL_GPL(constframe_constantcolor);
+
+void constframe_controltrigger(struct dpu_constframe *cf, bool trigger)
+{
+ u32 val;
+
+ val = trigger ? SHDTOKGEN : 0;
+
+ mutex_lock(&cf->mutex);
+ dpu_cf_write(cf, CONTROLTRIGGER, val);
+ mutex_unlock(&cf->mutex);
+}
+EXPORT_SYMBOL_GPL(constframe_controltrigger);
+
+struct dpu_constframe *dpu_cf_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_constframe *cf;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cf_ids); i++)
+ if (cf_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(cf_ids))
+ return ERR_PTR(-EINVAL);
+
+ cf = dpu->cf_priv[i];
+
+ mutex_lock(&cf->mutex);
+
+ if (cf->inuse) {
+ mutex_unlock(&cf->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ cf->inuse = true;
+
+ mutex_unlock(&cf->mutex);
+
+ return cf;
+}
+EXPORT_SYMBOL_GPL(dpu_cf_get);
+
+void dpu_cf_put(struct dpu_constframe *cf)
+{
+ mutex_lock(&cf->mutex);
+
+ cf->inuse = false;
+
+ mutex_unlock(&cf->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_cf_put);
+
+void _dpu_cf_init(struct dpu_soc *dpu, unsigned int id)
+{
+ struct dpu_constframe *cf;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cf_ids); i++)
+ if (cf_ids[i] == id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(cf_ids)))
+ return;
+
+ cf = dpu->cf_priv[i];
+
+ constframe_shden(cf, true);
+
+ if (id == 4 || id == 5) {
+ mutex_lock(&cf->mutex);
+ dpu_cf_write(cf, CONSTANTCOLOR, safety_stream_cf_color);
+ mutex_unlock(&cf->mutex);
+ }
+}
+
+int dpu_cf_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long pec_base, unsigned long base)
+{
+ struct dpu_constframe *cf;
+ int i;
+
+ cf = devm_kzalloc(dpu->dev, sizeof(*cf), GFP_KERNEL);
+ if (!cf)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(cf_ids); i++)
+ if (cf_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(cf_ids))
+ return -EINVAL;
+
+ dpu->cf_priv[i] = cf;
+
+ cf->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16);
+ if (!cf->pec_base)
+ return -ENOMEM;
+
+ cf->base = devm_ioremap(dpu->dev, base, SZ_32);
+ if (!cf->base)
+ return -ENOMEM;
+
+ cf->dpu = dpu;
+ cf->id = id;
+
+ mutex_init(&cf->mutex);
+
+ _dpu_cf_init(dpu, id);
+
+ return 0;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-disengcfg.c b/drivers/gpu/imx/dpu/dpu-disengcfg.c
new file mode 100644
index 000000000000..e59b7d74e1c2
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-disengcfg.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <drm/drm_mode.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include "dpu-prv.h"
+
+#define CLOCKCTRL 0x8
+typedef enum {
+ DSPCLKDIVIDE__DIV1, /* Ext disp clk signal has pix clk freq. */
+ DSPCLKDIVIDE__DIV2, /* Ext disp clk signal has 2x the pix clk freq. */
+} clkdivide_t;
+#define POLARITYCTRL 0xC
+#define POLHS_HIGH BIT(0)
+#define POLVS_HIGH BIT(1)
+#define POLEN_HIGH BIT(2)
+#define PIXINV_INV BIT(3)
+#define SRCSELECT 0x10
+
+struct dpu_disengcfg {
+ void __iomem *base;
+ struct mutex mutex;
+ int id;
+ bool inuse;
+ struct dpu_soc *dpu;
+};
+
+static inline u32 dpu_dec_read(struct dpu_disengcfg *dec, unsigned int offset)
+{
+ return readl(dec->base + offset);
+}
+
+static inline void dpu_dec_write(struct dpu_disengcfg *dec,
+ unsigned int offset, u32 value)
+{
+ writel(value, dec->base + offset);
+}
+
+struct dpu_disengcfg *dpu_dec_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_disengcfg *dec;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dec_ids); i++)
+ if (dec_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(dec_ids))
+ return ERR_PTR(-EINVAL);
+
+ dec = dpu->dec_priv[i];
+
+ mutex_lock(&dec->mutex);
+
+ if (dec->inuse) {
+ mutex_unlock(&dec->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ dec->inuse = true;
+
+ mutex_unlock(&dec->mutex);
+
+ return dec;
+}
+EXPORT_SYMBOL_GPL(dpu_dec_get);
+
+void dpu_dec_put(struct dpu_disengcfg *dec)
+{
+ mutex_lock(&dec->mutex);
+
+ dec->inuse = false;
+
+ mutex_unlock(&dec->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_dec_put);
+
+void _dpu_dec_init(struct dpu_soc *dpu, unsigned int id)
+{
+ struct dpu_disengcfg *dec;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dec_ids); i++)
+ if (ed_ids[i] == id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(dec_ids)))
+ return;
+
+ dec = dpu->dec_priv[i];
+
+ val = dpu_dec_read(dec, POLARITYCTRL);
+ val &= ~POLHS_HIGH;
+ val &= ~POLVS_HIGH;
+ dpu_dec_write(dec, POLARITYCTRL, val);
+}
+
+int dpu_dec_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long unused, unsigned long base)
+{
+ struct dpu_disengcfg *dec;
+
+ dec = devm_kzalloc(dpu->dev, sizeof(*dec), GFP_KERNEL);
+ if (!dec)
+ return -ENOMEM;
+
+ dpu->dec_priv[id] = dec;
+
+ dec->base = devm_ioremap(dpu->dev, base, SZ_16);
+ if (!dec->base)
+ return -ENOMEM;
+
+ dec->dpu = dpu;
+ dec->id = id;
+ mutex_init(&dec->mutex);
+
+ return 0;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-extdst.c b/drivers/gpu/imx/dpu/dpu-extdst.c
new file mode 100644
index 000000000000..09191b6d5769
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-extdst.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+#define PIXENGCFG_STATIC 0x8
+#define POWERDOWN BIT(4)
+#define SYNC_MODE BIT(8)
+#define SW_RESET BIT(11)
+#define DIV(n) (((n) & 0xFF) << 16)
+#define DIV_RESET 0x80
+#define PIXENGCFG_DYNAMIC 0xC
+#define PIXENGCFG_REQUEST 0x10
+#define SHDLDREQ(n) BIT(n)
+#define SEL_SHDLDREQ BIT(0)
+#define PIXENGCFG_TRIGGER 0x14
+#define SYNC_TRIGGER BIT(0)
+#define TRIGGER_SEQUENCE_COMPLETE BIT(4)
+#define PIXENGCFG_STATUS 0x18
+#define SYNC_BUSY BIT(8)
+#define KICK_MODE BIT(8)
+#define PERFCOUNTMODE BIT(12)
+#define CONTROL 0xC
+#define GAMMAAPPLYENABLE BIT(0)
+#define SOFTWAREKICK 0x10
+#define KICK BIT(0)
+#define STATUS 0x14
+#define CNT_ERR_STS BIT(0)
+#define CONTROLWORD 0x18
+#define CURPIXELCNT 0x1C
+static u16 get_xval(u32 pixel_cnt)
+{
+ return pixel_cnt & 0xFFFF;
+}
+
+static u16 get_yval(u32 pixel_cnt)
+{
+ return pixel_cnt >> 16;
+}
+#define LASTPIXELCNT 0x20
+#define PERFCOUNTER 0x24
+
+struct dpu_extdst {
+ void __iomem *pec_base;
+ void __iomem *base;
+ struct mutex mutex;
+ int id;
+ bool inuse;
+ struct dpu_soc *dpu;
+};
+
+static inline u32 dpu_pec_ed_read(struct dpu_extdst *ed, unsigned int offset)
+{
+ return readl(ed->pec_base + offset);
+}
+
+static inline void dpu_pec_ed_write(struct dpu_extdst *ed,
+ unsigned int offset, u32 value)
+{
+ writel(value, ed->pec_base + offset);
+}
+
+static inline u32 dpu_ed_read(struct dpu_extdst *ed, unsigned int offset)
+{
+ return readl(ed->base + offset);
+}
+
+static inline void dpu_ed_write(struct dpu_extdst *ed,
+ unsigned int offset, u32 value)
+{
+ writel(value, ed->base + offset);
+}
+
+static inline bool dpu_ed_is_safety_stream(struct dpu_extdst *ed)
+{
+ if (ed->id == 4 || ed->id == 5)
+ return true;
+
+ return false;
+}
+
+void extdst_pixengcfg_shden(struct dpu_extdst *ed, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC);
+ if (enable)
+ val |= SHDEN;
+ else
+ val &= ~SHDEN;
+ dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_shden);
+
+void extdst_pixengcfg_powerdown(struct dpu_extdst *ed, bool powerdown)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC);
+ if (powerdown)
+ val |= POWERDOWN;
+ else
+ val &= ~POWERDOWN;
+ dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_powerdown);
+
+void extdst_pixengcfg_sync_mode(struct dpu_extdst *ed, ed_sync_mode_t mode)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC);
+ if (mode == AUTO)
+ val |= SYNC_MODE;
+ else
+ val &= ~SYNC_MODE;
+ dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_sync_mode);
+
+void extdst_pixengcfg_reset(struct dpu_extdst *ed, bool reset)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC);
+ if (reset)
+ val |= SW_RESET;
+ else
+ val &= ~SW_RESET;
+ dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_reset);
+
+void extdst_pixengcfg_div(struct dpu_extdst *ed, u16 div)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC);
+ val &= ~0xFF0000;
+ val |= DIV(div);
+ dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_div);
+
+int extdst_pixengcfg_src_sel(struct dpu_extdst *ed, extdst_src_sel_t src)
+{
+ mutex_lock(&ed->mutex);
+ dpu_pec_ed_write(ed, PIXENGCFG_DYNAMIC, src);
+ mutex_unlock(&ed->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_src_sel);
+
+void extdst_pixengcfg_sel_shdldreq(struct dpu_extdst *ed)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_pec_ed_read(ed, PIXENGCFG_REQUEST);
+ val |= SEL_SHDLDREQ;
+ dpu_pec_ed_write(ed, PIXENGCFG_REQUEST, val);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_sel_shdldreq);
+
+void extdst_pixengcfg_shdldreq(struct dpu_extdst *ed, u32 req_mask)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_pec_ed_read(ed, PIXENGCFG_REQUEST);
+ val |= req_mask;
+ dpu_pec_ed_write(ed, PIXENGCFG_REQUEST, val);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_shdldreq);
+
+void extdst_pixengcfg_sync_trigger(struct dpu_extdst *ed)
+{
+ mutex_lock(&ed->mutex);
+ dpu_pec_ed_write(ed, PIXENGCFG_TRIGGER, SYNC_TRIGGER);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_sync_trigger);
+
+void extdst_pixengcfg_trigger_sequence_complete(struct dpu_extdst *ed)
+{
+ mutex_lock(&ed->mutex);
+ dpu_pec_ed_write(ed, PIXENGCFG_TRIGGER, TRIGGER_SEQUENCE_COMPLETE);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_trigger_sequence_complete);
+
+bool extdst_pixengcfg_is_sync_busy(struct dpu_extdst *ed)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_pec_ed_read(ed, PIXENGCFG_STATUS);
+ mutex_unlock(&ed->mutex);
+
+ return val & SYNC_BUSY;
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_is_sync_busy);
+
+ed_pipeline_status_t extdst_pixengcfg_pipeline_status(struct dpu_extdst *ed)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_pec_ed_read(ed, PIXENGCFG_STATUS);
+ mutex_unlock(&ed->mutex);
+
+ return val & 0x3;
+}
+EXPORT_SYMBOL_GPL(extdst_pixengcfg_pipeline_status);
+
+void extdst_shden(struct dpu_extdst *ed, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_ed_read(ed, STATICCONTROL);
+ if (enable)
+ val |= SHDEN;
+ else
+ val &= ~SHDEN;
+ dpu_ed_write(ed, STATICCONTROL, val);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_shden);
+
+void extdst_kick_mode(struct dpu_extdst *ed, ed_kick_mode_t mode)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_ed_read(ed, STATICCONTROL);
+ val &= ~KICK_MODE;
+ val |= mode;
+ dpu_ed_write(ed, STATICCONTROL, val);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_kick_mode);
+
+void extdst_perfcountmode(struct dpu_extdst *ed, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_ed_read(ed, STATICCONTROL);
+ if (enable)
+ val |= PERFCOUNTMODE;
+ else
+ val &= ~PERFCOUNTMODE;
+ dpu_ed_write(ed, STATICCONTROL, val);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_perfcountmode);
+
+void extdst_gamma_apply_enable(struct dpu_extdst *ed, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_ed_read(ed, CONTROL);
+ if (enable)
+ val |= GAMMAAPPLYENABLE;
+ else
+ val &= ~GAMMAAPPLYENABLE;
+ dpu_ed_write(ed, CONTROL, val);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_gamma_apply_enable);
+
+void extdst_kick(struct dpu_extdst *ed)
+{
+ mutex_lock(&ed->mutex);
+ dpu_ed_write(ed, SOFTWAREKICK, KICK);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_kick);
+
+void extdst_cnt_err_clear(struct dpu_extdst *ed)
+{
+ mutex_lock(&ed->mutex);
+ dpu_ed_write(ed, STATUS, CNT_ERR_STS);
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(extdst_cnt_err_clear);
+
+bool extdst_cnt_err_status(struct dpu_extdst *ed)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_ed_read(ed, STATUS);
+ mutex_unlock(&ed->mutex);
+
+ return val & CNT_ERR_STS;
+}
+EXPORT_SYMBOL_GPL(extdst_cnt_err_status);
+
+u32 extdst_last_control_word(struct dpu_extdst *ed)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_ed_read(ed, CONTROLWORD);
+ mutex_unlock(&ed->mutex);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(extdst_last_control_word);
+
+void extdst_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_ed_read(ed, CURPIXELCNT);
+ mutex_unlock(&ed->mutex);
+
+ *x = get_xval(val);
+ *y = get_yval(val);
+}
+EXPORT_SYMBOL_GPL(extdst_pixel_cnt);
+
+void extdst_last_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_ed_read(ed, LASTPIXELCNT);
+ mutex_unlock(&ed->mutex);
+
+ *x = get_xval(val);
+ *y = get_yval(val);
+}
+EXPORT_SYMBOL_GPL(extdst_last_pixel_cnt);
+
+u32 extdst_perfresult(struct dpu_extdst *ed)
+{
+ u32 val;
+
+ mutex_lock(&ed->mutex);
+ val = dpu_ed_read(ed, PERFCOUNTER);
+ mutex_unlock(&ed->mutex);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(extdst_perfresult);
+
+struct dpu_extdst *dpu_ed_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_extdst *ed;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ed_ids); i++)
+ if (ed_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(ed_ids))
+ return ERR_PTR(-EINVAL);
+
+ ed = dpu->ed_priv[i];
+
+ mutex_lock(&ed->mutex);
+
+ if (ed->inuse) {
+ mutex_unlock(&ed->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ ed->inuse = true;
+
+ mutex_unlock(&ed->mutex);
+
+ return ed;
+}
+EXPORT_SYMBOL_GPL(dpu_ed_get);
+
+void dpu_ed_put(struct dpu_extdst *ed)
+{
+ mutex_lock(&ed->mutex);
+
+ ed->inuse = false;
+
+ mutex_unlock(&ed->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_ed_put);
+
+struct dpu_extdst *dpu_aux_ed_peek(struct dpu_extdst *ed)
+{
+ unsigned int aux_id = ed->id ^ 1;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ed_ids); i++)
+ if (ed_ids[i] == aux_id)
+ return ed->dpu->ed_priv[i];
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dpu_aux_ed_peek);
+
+void _dpu_ed_init(struct dpu_soc *dpu, unsigned int id)
+{
+ struct dpu_extdst *ed;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ed_ids); i++)
+ if (ed_ids[i] == id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(ed_ids)))
+ return;
+
+ ed = dpu->ed_priv[i];
+
+ extdst_pixengcfg_src_sel(ed, ED_SRC_DISABLE);
+ extdst_pixengcfg_shden(ed, true);
+ extdst_pixengcfg_powerdown(ed, false);
+ extdst_pixengcfg_sync_mode(ed, SINGLE);
+ extdst_pixengcfg_reset(ed, false);
+ extdst_pixengcfg_div(ed, DIV_RESET);
+ extdst_shden(ed, true);
+ extdst_perfcountmode(ed, false);
+ extdst_kick_mode(ed, EXTERNAL);
+}
+
+int dpu_ed_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long pec_base, unsigned long base)
+{
+ struct dpu_extdst *ed;
+ int ret, i;
+
+ ed = devm_kzalloc(dpu->dev, sizeof(*ed), GFP_KERNEL);
+ if (!ed)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ed_ids); i++)
+ if (ed_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(ed_ids))
+ return -EINVAL;
+
+ dpu->ed_priv[i] = ed;
+
+ ed->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_32);
+ if (!ed->pec_base)
+ return -ENOMEM;
+
+ ed->base = devm_ioremap(dpu->dev, base, SZ_64);
+ if (!ed->base)
+ return -ENOMEM;
+
+ ed->dpu = dpu;
+ ed->id = id;
+ mutex_init(&ed->mutex);
+
+ ret = extdst_pixengcfg_src_sel(ed, ED_SRC_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ _dpu_ed_init(dpu, id);
+
+ return 0;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-fetchdecode.c b/drivers/gpu/imx/dpu/dpu-fetchdecode.c
new file mode 100644
index 000000000000..c37ed1717b4c
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-fetchdecode.c
@@ -0,0 +1,596 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+static const u32 fd_vproc_cap[2] = {
+ DPU_VPROC_CAP_HSCALER4 | DPU_VPROC_CAP_VSCALER4 |
+ DPU_VPROC_CAP_FETCHECO0,
+ DPU_VPROC_CAP_HSCALER5 | DPU_VPROC_CAP_VSCALER5 |
+ DPU_VPROC_CAP_FETCHECO1,
+};
+
+#define PIXENGCFG_DYNAMIC 0x8
+static const fd_dynamic_src_sel_t fd_srcs[2][4] = {
+ {
+ FD_SRC_DISABLE, FD_SRC_FETCHECO0,
+ FD_SRC_FETCHDECODE1, FD_SRC_FETCHWARP2
+ }, {
+ FD_SRC_DISABLE, FD_SRC_FETCHECO1,
+ FD_SRC_FETCHDECODE0, FD_SRC_FETCHWARP2
+ },
+};
+
+#define PIXENGCFG_STATUS 0xC
+
+#define RINGBUFSTARTADDR0 0x10
+#define RINGBUFWRAPADDR0 0x14
+#define FRAMEPROPERTIES0 0x18
+#define BASEADDRESS0 0x1C
+#define SOURCEBUFFERATTRIBUTES0 0x20
+#define SOURCEBUFFERDIMENSION0 0x24
+#define COLORCOMPONENTBITS0 0x28
+#define COLORCOMPONENTSHIFT0 0x2C
+#define LAYEROFFSET0 0x30
+#define CLIPWINDOWOFFSET0 0x34
+#define CLIPWINDOWDIMENSIONS0 0x38
+#define CONSTANTCOLOR0 0x3C
+#define LAYERPROPERTY0 0x40
+#define FRAMEDIMENSIONS 0x44
+#define FRAMERESAMPLING 0x48
+#define DECODECONTROL 0x4C
+#define SOURCEBUFFERLENGTH 0x50
+#define CONTROL 0x54
+#define CONTROLTRIGGER 0x58
+#define START 0x5C
+#define FETCHTYPE 0x60
+#define DECODERSTATUS 0x64
+#define READADDRESS0 0x68
+#define BURSTBUFFERPROPERTIES 0x6C
+#define STATUS 0x70
+#define HIDDENSTATUS 0x74
+
+struct dpu_fetchdecode {
+ struct dpu_fetchunit fu;
+ fetchtype_t fetchtype;
+};
+
+int fetchdecode_pixengcfg_dynamic_src_sel(struct dpu_fetchunit *fu,
+ fd_dynamic_src_sel_t src)
+{
+ int i;
+
+ mutex_lock(&fu->mutex);
+ for (i = 0; i < 4; i++) {
+ if (fd_srcs[fu->id][i] == src) {
+ dpu_pec_fu_write(fu, PIXENGCFG_DYNAMIC, src);
+ mutex_unlock(&fu->mutex);
+ return 0;
+ }
+ }
+ mutex_unlock(&fu->mutex);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(fetchdecode_pixengcfg_dynamic_src_sel);
+
+static void
+fetchdecode_set_baseaddress(struct dpu_fetchunit *fu, dma_addr_t baddr)
+{
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, BASEADDRESS0, baddr);
+ mutex_unlock(&fu->mutex);
+}
+
+static void fetchdecode_set_src_bpp(struct dpu_fetchunit *fu, int bpp)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES0);
+ val &= ~0x3f0000;
+ val |= BITSPERPIXEL(bpp);
+ dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES0, val);
+ mutex_unlock(&fu->mutex);
+}
+
+static void
+fetchdecode_set_src_stride(struct dpu_fetchunit *fu, unsigned int stride)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES0);
+ val &= ~0xffff;
+ val |= STRIDE(stride);
+ dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES0, val);
+ mutex_unlock(&fu->mutex);
+}
+
+static void
+fetchdecode_set_src_buf_dimensions(struct dpu_fetchunit *fu,
+ unsigned int w, unsigned int h,
+ u32 unused, bool deinterlace)
+{
+ u32 val;
+
+ if (deinterlace)
+ h /= 2;
+
+ val = LINEWIDTH(w) | LINECOUNT(h);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, SOURCEBUFFERDIMENSION0, val);
+ mutex_unlock(&fu->mutex);
+}
+
+static void
+fetchdecode_set_fmt(struct dpu_fetchunit *fu, u32 fmt, bool deinterlace)
+{
+ u32 val, bits, shift;
+ bool is_planar_yuv = false, is_rastermode_yuv422 = false;
+ bool is_yuv422upsamplingmode_interpolate = false;
+ bool is_inputselect_compact = false;
+ bool need_csc = false;
+ int i;
+
+ switch (fmt) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ is_rastermode_yuv422 = true;
+ is_yuv422upsamplingmode_interpolate = true;
+ need_csc = true;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ is_yuv422upsamplingmode_interpolate = true;
+ /* fall-through */
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ if (deinterlace)
+ is_yuv422upsamplingmode_interpolate = true;
+ is_planar_yuv = true;
+ is_rastermode_yuv422 = true;
+ is_inputselect_compact = true;
+ need_csc = true;
+ break;
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ is_planar_yuv = true;
+ is_yuv422upsamplingmode_interpolate = true;
+ is_inputselect_compact = true;
+ need_csc = true;
+ break;
+ default:
+ break;
+ }
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, CONTROL);
+ val &= ~YUV422UPSAMPLINGMODE_MASK;
+ val &= ~INPUTSELECT_MASK;
+ val &= ~RASTERMODE_MASK;
+ if (is_yuv422upsamplingmode_interpolate)
+ val |= YUV422UPSAMPLINGMODE(YUV422UPSAMPLINGMODE__INTERPOLATE);
+ else
+ val |= YUV422UPSAMPLINGMODE(YUV422UPSAMPLINGMODE__REPLICATE);
+ if (is_inputselect_compact)
+ val |= INPUTSELECT(INPUTSELECT__COMPPACK);
+ else
+ val |= INPUTSELECT(INPUTSELECT__INACTIVE);
+ if (is_rastermode_yuv422)
+ val |= RASTERMODE(RASTERMODE__YUV422);
+ else
+ val |= RASTERMODE(RASTERMODE__NORMAL);
+ dpu_fu_write(fu, CONTROL, val);
+
+ val = dpu_fu_read(fu, LAYERPROPERTY0);
+ val &= ~YUVCONVERSIONMODE_MASK;
+ if (need_csc)
+ /*
+ * assuming fetchdecode always ouputs RGB pixel formats
+ *
+ * FIXME:
+ * determine correct standard here - ITU601 or ITU601_FR
+ * or ITU709
+ */
+ val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__ITU601_FR);
+ else
+ val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__OFF);
+ dpu_fu_write(fu, LAYERPROPERTY0, val);
+ mutex_unlock(&fu->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) {
+ if (dpu_pixel_format_matrix[i].pixel_format == fmt) {
+ bits = dpu_pixel_format_matrix[i].bits;
+ shift = dpu_pixel_format_matrix[i].shift;
+
+ if (is_planar_yuv) {
+ bits &= ~(U_BITS_MASK | V_BITS_MASK);
+ shift &= ~(U_SHIFT_MASK | V_SHIFT_MASK);
+ }
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, COLORCOMPONENTBITS0, bits);
+ dpu_fu_write(fu, COLORCOMPONENTSHIFT0, shift);
+ mutex_unlock(&fu->mutex);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+void fetchdecode_layeroffset(struct dpu_fetchunit *fu, unsigned int x,
+ unsigned int y)
+{
+ u32 val;
+
+ val = LAYERXOFFSET(x) | LAYERYOFFSET(y);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, LAYEROFFSET0, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchdecode_layeroffset);
+
+void fetchdecode_clipoffset(struct dpu_fetchunit *fu, unsigned int x,
+ unsigned int y)
+{
+ u32 val;
+
+ val = CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CLIPWINDOWOFFSET0, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchdecode_clipoffset);
+
+static void fetchdecode_enable_src_buf(struct dpu_fetchunit *fu)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, LAYERPROPERTY0);
+ val |= SOURCEBUFFERENABLE;
+ dpu_fu_write(fu, LAYERPROPERTY0, val);
+ mutex_unlock(&fu->mutex);
+}
+
+static void fetchdecode_disable_src_buf(struct dpu_fetchunit *fu)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, LAYERPROPERTY0);
+ val &= ~SOURCEBUFFERENABLE;
+ dpu_fu_write(fu, LAYERPROPERTY0, val);
+ mutex_unlock(&fu->mutex);
+}
+
+static bool fetchdecode_is_enabled(struct dpu_fetchunit *fu)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, LAYERPROPERTY0);
+ mutex_unlock(&fu->mutex);
+
+ return !!(val & SOURCEBUFFERENABLE);
+}
+
+void fetchdecode_clipdimensions(struct dpu_fetchunit *fu, unsigned int w,
+ unsigned int h)
+{
+ u32 val;
+
+ val = CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CLIPWINDOWDIMENSIONS0, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchdecode_clipdimensions);
+
+static void
+fetchdecode_set_framedimensions(struct dpu_fetchunit *fu,
+ unsigned int w, unsigned int h,
+ bool deinterlace)
+{
+ u32 val;
+
+ if (deinterlace)
+ h /= 2;
+
+ val = FRAMEWIDTH(w) | FRAMEHEIGHT(h);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, FRAMEDIMENSIONS, val);
+ mutex_unlock(&fu->mutex);
+}
+
+void fetchdecode_rgb_constantcolor(struct dpu_fetchunit *fu,
+ u8 r, u8 g, u8 b, u8 a)
+{
+ u32 val;
+
+ val = rgb_color(r, g, b, a);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CONSTANTCOLOR0, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchdecode_rgb_constantcolor);
+
+void fetchdecode_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v)
+{
+ u32 val;
+
+ val = yuv_color(y, u, v);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CONSTANTCOLOR0, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchdecode_yuv_constantcolor);
+
+static void fetchdecode_set_controltrigger(struct dpu_fetchunit *fu)
+{
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN);
+ mutex_unlock(&fu->mutex);
+}
+
+int fetchdecode_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type)
+{
+ struct dpu_soc *dpu = fu->dpu;
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, FETCHTYPE);
+ val &= FETCHTYPE_MASK;
+ mutex_unlock(&fu->mutex);
+
+ switch (val) {
+ case FETCHTYPE__DECODE:
+ case FETCHTYPE__LAYER:
+ case FETCHTYPE__WARP:
+ case FETCHTYPE__ECO:
+ case FETCHTYPE__PERSP:
+ case FETCHTYPE__ROT:
+ case FETCHTYPE__DECODEL:
+ case FETCHTYPE__LAYERL:
+ case FETCHTYPE__ROTL:
+ break;
+ default:
+ dev_warn(dpu->dev, "Invalid fetch type %u for FetchDecode%d\n",
+ val, fu->id);
+ return -EINVAL;
+ }
+
+ *type = val;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fetchdecode_fetchtype);
+
+u32 fetchdecode_get_vproc_mask(struct dpu_fetchunit *fu)
+{
+ return fd_vproc_cap[fu->id];
+}
+EXPORT_SYMBOL_GPL(fetchdecode_get_vproc_mask);
+
+struct dpu_fetchunit *fetchdecode_get_fetcheco(struct dpu_fetchunit *fu)
+{
+ struct dpu_soc *dpu = fu->dpu;
+
+ switch (fu->id) {
+ case 0:
+ case 1:
+ return dpu->fe_priv[fu->id];
+ default:
+ WARN_ON(1);
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(fetchdecode_get_fetcheco);
+
+bool fetchdecode_need_fetcheco(struct dpu_fetchunit *fu, u32 fmt)
+{
+ struct dpu_fetchunit *fe = fetchdecode_get_fetcheco(fu);
+
+ if (IS_ERR_OR_NULL(fe))
+ return false;
+
+ switch (fmt) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(fetchdecode_need_fetcheco);
+
+struct dpu_hscaler *fetchdecode_get_hscaler(struct dpu_fetchunit *fu)
+{
+ struct dpu_soc *dpu = fu->dpu;
+
+ switch (fu->id) {
+ case 0:
+ case 2:
+ return dpu->hs_priv[0];
+ case 1:
+ case 3:
+ return dpu->hs_priv[1];
+ default:
+ WARN_ON(1);
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(fetchdecode_get_hscaler);
+
+struct dpu_vscaler *fetchdecode_get_vscaler(struct dpu_fetchunit *fu)
+{
+ struct dpu_soc *dpu = fu->dpu;
+
+ switch (fu->id) {
+ case 0:
+ case 2:
+ return dpu->vs_priv[0];
+ case 1:
+ case 3:
+ return dpu->vs_priv[1];
+ default:
+ WARN_ON(1);
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(fetchdecode_get_vscaler);
+
+struct dpu_fetchunit *dpu_fd_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_fetchunit *fu;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fd_ids); i++)
+ if (fd_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(fd_ids))
+ return ERR_PTR(-EINVAL);
+
+ fu = dpu->fd_priv[i];
+
+ mutex_lock(&fu->mutex);
+
+ if (fu->inuse) {
+ mutex_unlock(&fu->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ fu->inuse = true;
+
+ mutex_unlock(&fu->mutex);
+
+ return fu;
+}
+EXPORT_SYMBOL_GPL(dpu_fd_get);
+
+void dpu_fd_put(struct dpu_fetchunit *fu)
+{
+ mutex_lock(&fu->mutex);
+
+ fu->inuse = false;
+
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_fd_put);
+
+static const struct dpu_fetchunit_ops fd_ops = {
+ .set_burstlength = fetchunit_set_burstlength,
+ .set_baseaddress = fetchdecode_set_baseaddress,
+ .set_src_bpp = fetchdecode_set_src_bpp,
+ .set_src_stride = fetchdecode_set_src_stride,
+ .set_src_buf_dimensions = fetchdecode_set_src_buf_dimensions,
+ .set_fmt = fetchdecode_set_fmt,
+ .enable_src_buf = fetchdecode_enable_src_buf,
+ .disable_src_buf = fetchdecode_disable_src_buf,
+ .is_enabled = fetchdecode_is_enabled,
+ .set_framedimensions = fetchdecode_set_framedimensions,
+ .set_controltrigger = fetchdecode_set_controltrigger,
+ .get_stream_id = fetchunit_get_stream_id,
+ .set_stream_id = fetchunit_set_stream_id,
+};
+
+void _dpu_fd_init(struct dpu_soc *dpu, unsigned int id)
+{
+ struct dpu_fetchunit *fu;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fd_ids); i++)
+ if (fd_ids[i] == id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(fd_ids)))
+ return;
+
+ fu = dpu->fd_priv[i];
+
+ fetchdecode_pixengcfg_dynamic_src_sel(fu, FD_SRC_DISABLE);
+ fetchunit_baddr_autoupdate(fu, 0x0);
+ fetchunit_shden(fu, true);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, BURSTBUFFERMANAGEMENT,
+ SETNUMBUFFERS(16) | SETBURSTLENGTH(16));
+ mutex_unlock(&fu->mutex);
+}
+
+int dpu_fd_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long pec_base, unsigned long base)
+{
+ struct dpu_fetchdecode *fd;
+ struct dpu_fetchunit *fu;
+ int ret;
+
+ fd = devm_kzalloc(dpu->dev, sizeof(*fd), GFP_KERNEL);
+ if (!fd)
+ return -ENOMEM;
+
+ fu = &fd->fu;
+ dpu->fd_priv[id] = fu;
+
+ fu->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16);
+ if (!fu->pec_base)
+ return -ENOMEM;
+
+ fu->base = devm_ioremap(dpu->dev, base, SZ_1K);
+ if (!fu->base)
+ return -ENOMEM;
+
+ fu->dpu = dpu;
+ fu->id = id;
+ fu->type = FU_T_FD;
+ fu->ops = &fd_ops;
+ fu->name = "fetchdecode";
+
+ mutex_init(&fu->mutex);
+
+ ret = fetchdecode_pixengcfg_dynamic_src_sel(fu, FD_SRC_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = fetchdecode_fetchtype(fu, &fd->fetchtype);
+ if (ret < 0)
+ return ret;
+
+ _dpu_fd_init(dpu, id);
+
+ return 0;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-fetcheco.c b/drivers/gpu/imx/dpu/dpu-fetcheco.c
new file mode 100644
index 000000000000..e6d4cdecba79
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-fetcheco.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+#define BASEADDRESS0 0x10
+#define SOURCEBUFFERATTRIBUTES0 0x14
+#define SOURCEBUFFERDIMENSION0 0x18
+#define COLORCOMPONENTBITS0 0x1C
+#define COLORCOMPONENTSHIFT0 0x20
+#define LAYEROFFSET0 0x24
+#define CLIPWINDOWOFFSET0 0x28
+#define CLIPWINDOWDIMENSIONS0 0x2C
+#define CONSTANTCOLOR0 0x30
+#define LAYERPROPERTY0 0x34
+#define FRAMEDIMENSIONS 0x38
+#define FRAMERESAMPLING 0x3C
+#define CONTROL 0x40
+#define CONTROLTRIGGER 0x44
+#define START 0x48
+#define FETCHTYPE 0x4C
+#define BURSTBUFFERPROPERTIES 0x50
+#define HIDDENSTATUS 0x54
+
+struct dpu_fetcheco {
+ struct dpu_fetchunit fu;
+};
+
+static void
+fetcheco_set_src_buf_dimensions(struct dpu_fetchunit *fu,
+ unsigned int w, unsigned int h,
+ u32 fmt, bool deinterlace)
+{
+ int width, height;
+ u32 val;
+
+ if (deinterlace) {
+ width = w;
+ height = h / 2;
+ } else {
+ width = dpu_format_plane_width(w, fmt, 1);
+ height = dpu_format_plane_height(h, fmt, 1);
+ }
+
+ switch (fmt) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ break;
+ default:
+ WARN(1, "Unsupported FetchEco pixel format 0x%08x\n", fmt);
+ return;
+ }
+
+ val = LINEWIDTH(width) | LINECOUNT(height);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, SOURCEBUFFERDIMENSION0, val);
+ mutex_unlock(&fu->mutex);
+}
+
+static void fetcheco_set_fmt(struct dpu_fetchunit *fu, u32 fmt, bool unused)
+{
+ u32 val, bits, shift;
+ int i, hsub, vsub;
+ unsigned int x, y;
+
+ switch (fmt) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ break;
+ default:
+ WARN(1, "Unsupported FetchEco pixel format 0x%08x\n", fmt);
+ return;
+ }
+
+ hsub = dpu_format_horz_chroma_subsampling(fmt);
+ switch (hsub) {
+ case 1:
+ x = 0x4;
+ break;
+ case 2:
+ x = 0x2;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ vsub = dpu_format_vert_chroma_subsampling(fmt);
+ switch (vsub) {
+ case 1:
+ y = 0x4;
+ break;
+ case 2:
+ y = 0x2;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, FRAMERESAMPLING);
+ val &= ~(DELTAX_MASK | DELTAY_MASK);
+ val |= DELTAX(x) | DELTAY(y);
+ dpu_fu_write(fu, FRAMERESAMPLING, val);
+
+ val = dpu_fu_read(fu, CONTROL);
+ val &= ~RASTERMODE_MASK;
+ val |= RASTERMODE(RASTERMODE__NORMAL);
+ dpu_fu_write(fu, CONTROL, val);
+ mutex_unlock(&fu->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) {
+ if (dpu_pixel_format_matrix[i].pixel_format == fmt) {
+ bits = dpu_pixel_format_matrix[i].bits;
+ shift = dpu_pixel_format_matrix[i].shift;
+
+ bits &= ~Y_BITS_MASK;
+ shift &= ~Y_SHIFT_MASK;
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, COLORCOMPONENTBITS0, bits);
+ dpu_fu_write(fu, COLORCOMPONENTSHIFT0, shift);
+ mutex_unlock(&fu->mutex);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+void fetcheco_layeroffset(struct dpu_fetchunit *fu, unsigned int x,
+ unsigned int y)
+{
+ u32 val;
+
+ val = LAYERXOFFSET(x) | LAYERYOFFSET(y);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, LAYEROFFSET0, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetcheco_layeroffset);
+
+void fetcheco_clipoffset(struct dpu_fetchunit *fu, unsigned int x,
+ unsigned int y)
+{
+ u32 val;
+
+ val = CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CLIPWINDOWOFFSET0, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetcheco_clipoffset);
+
+void fetcheco_clipdimensions(struct dpu_fetchunit *fu, unsigned int w,
+ unsigned int h)
+{
+ u32 val;
+
+ val = CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CLIPWINDOWDIMENSIONS0, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetcheco_clipdimensions);
+
+static void
+fetcheco_set_framedimensions(struct dpu_fetchunit *fu,
+ unsigned int w, unsigned int h,
+ bool deinterlace)
+{
+ u32 val;
+
+ if (deinterlace)
+ h /= 2;
+
+ val = FRAMEWIDTH(w) | FRAMEHEIGHT(h);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, FRAMEDIMENSIONS, val);
+ mutex_unlock(&fu->mutex);
+}
+
+void fetcheco_frameresampling(struct dpu_fetchunit *fu, unsigned int x,
+ unsigned int y)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, FRAMERESAMPLING);
+ val &= ~(DELTAX_MASK | DELTAY_MASK);
+ val |= DELTAX(x) | DELTAY(y);
+ dpu_fu_write(fu, FRAMERESAMPLING, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetcheco_frameresampling);
+
+static void fetcheco_set_controltrigger(struct dpu_fetchunit *fu)
+{
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN);
+ mutex_unlock(&fu->mutex);
+}
+
+int fetcheco_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type)
+{
+ struct dpu_soc *dpu = fu->dpu;
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, FETCHTYPE);
+ val &= FETCHTYPE_MASK;
+ mutex_unlock(&fu->mutex);
+
+ switch (val) {
+ case FETCHTYPE__DECODE:
+ case FETCHTYPE__LAYER:
+ case FETCHTYPE__WARP:
+ case FETCHTYPE__ECO:
+ case FETCHTYPE__PERSP:
+ case FETCHTYPE__ROT:
+ case FETCHTYPE__DECODEL:
+ case FETCHTYPE__LAYERL:
+ case FETCHTYPE__ROTL:
+ break;
+ default:
+ dev_warn(dpu->dev, "Invalid fetch type %u for FetchEco%d\n",
+ val, fu->id);
+ return -EINVAL;
+ }
+
+ *type = val;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fetcheco_fetchtype);
+
+dpu_block_id_t fetcheco_get_block_id(struct dpu_fetchunit *fu)
+{
+ switch (fu->id) {
+ case 0:
+ return ID_FETCHECO0;
+ case 1:
+ return ID_FETCHECO1;
+ case 2:
+ return ID_FETCHECO2;
+ case 9:
+ return ID_FETCHECO9;
+ default:
+ WARN_ON(1);
+ }
+
+ return ID_NONE;
+}
+EXPORT_SYMBOL_GPL(fetcheco_get_block_id);
+
+struct dpu_fetchunit *dpu_fe_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_fetchunit *fu;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fe_ids); i++)
+ if (fe_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(fe_ids))
+ return ERR_PTR(-EINVAL);
+
+ fu = dpu->fe_priv[i];
+
+ mutex_lock(&fu->mutex);
+
+ if (fu->inuse) {
+ mutex_unlock(&fu->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ fu->inuse = true;
+
+ mutex_unlock(&fu->mutex);
+
+ return fu;
+}
+EXPORT_SYMBOL_GPL(dpu_fe_get);
+
+void dpu_fe_put(struct dpu_fetchunit *fu)
+{
+ mutex_lock(&fu->mutex);
+
+ fu->inuse = false;
+
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_fe_put);
+
+static const struct dpu_fetchunit_ops fe_ops = {
+ .set_burstlength = fetchunit_set_burstlength,
+ .set_baseaddress = fetchunit_set_baseaddress,
+ .set_src_bpp = fetchunit_set_src_bpp,
+ .set_src_stride = fetchunit_set_src_stride,
+ .set_src_buf_dimensions = fetcheco_set_src_buf_dimensions,
+ .set_fmt = fetcheco_set_fmt,
+ .enable_src_buf = fetchunit_enable_src_buf,
+ .disable_src_buf = fetchunit_disable_src_buf,
+ .is_enabled = fetchunit_is_enabled,
+ .set_framedimensions = fetcheco_set_framedimensions,
+ .set_controltrigger = fetcheco_set_controltrigger,
+ .get_stream_id = fetchunit_get_stream_id,
+ .set_stream_id = fetchunit_set_stream_id,
+};
+
+void _dpu_fe_init(struct dpu_soc *dpu, unsigned int id)
+{
+ struct dpu_fetchunit *fu;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fe_ids); i++)
+ if (fe_ids[i] == id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(fe_ids)))
+ return;
+
+ fu = dpu->fe_priv[i];
+
+ fetchunit_shden(fu, true);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, BURSTBUFFERMANAGEMENT,
+ SETNUMBUFFERS(16) | SETBURSTLENGTH(16));
+ mutex_unlock(&fu->mutex);
+}
+
+int dpu_fe_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long pec_base, unsigned long base)
+{
+ struct dpu_fetcheco *fe;
+ struct dpu_fetchunit *fu;
+ int i;
+
+ fe = devm_kzalloc(dpu->dev, sizeof(*fe), GFP_KERNEL);
+ if (!fe)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(fe_ids); i++)
+ if (fe_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(fe_ids))
+ return -EINVAL;
+
+ fu = &fe->fu;
+ dpu->fe_priv[i] = fu;
+
+ fu->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16);
+ if (!fu->pec_base)
+ return -ENOMEM;
+
+ fu->base = devm_ioremap(dpu->dev, base, SZ_128);
+ if (!fu->base)
+ return -ENOMEM;
+
+ fu->dpu = dpu;
+ fu->id = id;
+ fu->type = FU_T_FE;
+ fu->ops = &fe_ops;
+ fu->name = "fetcheco";
+
+ mutex_init(&fu->mutex);
+
+ _dpu_fe_init(dpu, id);
+
+ return 0;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-fetchlayer.c b/drivers/gpu/imx/dpu/dpu-fetchlayer.c
new file mode 100644
index 000000000000..c039c154c565
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-fetchlayer.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+#define PIXENGCFG_STATUS 0x8
+#define BASEADDRESS(n) (0x10 + (n) * 0x28)
+#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28)
+#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28)
+#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28)
+#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28)
+#define LAYEROFFSET(n) (0x24 + (n) * 0x28)
+#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28)
+#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28)
+#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28)
+#define LAYERPROPERTY(n) (0x34 + (n) * 0x28)
+#define FRAMEDIMENSIONS 0x150
+#define FRAMERESAMPLING 0x154
+#define CONTROL 0x158
+#define TRIGGERENABLE 0x15C
+#define SHDLDREQ(lm) ((lm) & 0xFF)
+#define CONTROLTRIGGER 0x160
+#define START 0x164
+#define FETCHTYPE 0x168
+#define BURSTBUFFERPROPERTIES 0x16C
+#define STATUS 0x170
+#define HIDDENSTATUS 0x174
+
+struct dpu_fetchlayer {
+ struct dpu_fetchunit fu;
+ fetchtype_t fetchtype;
+};
+
+static void
+fetchlayer_set_src_buf_dimensions(struct dpu_fetchunit *fu,
+ unsigned int w, unsigned int h,
+ u32 unused1, bool unused2)
+{
+ u32 val;
+
+ val = LINEWIDTH(w) | LINECOUNT(h);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, SOURCEBUFFERDIMENSION(fu->sub_id), val);
+ mutex_unlock(&fu->mutex);
+}
+
+static void fetchlayer_set_fmt(struct dpu_fetchunit *fu, u32 fmt, bool unused)
+{
+ u32 val, bits, shift;
+ int i, sub_id = fu->sub_id;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, LAYERPROPERTY(sub_id));
+ val &= ~YUVCONVERSIONMODE_MASK;
+ val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__OFF);
+ dpu_fu_write(fu, LAYERPROPERTY(sub_id), val);
+ mutex_unlock(&fu->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) {
+ if (dpu_pixel_format_matrix[i].pixel_format == fmt) {
+ bits = dpu_pixel_format_matrix[i].bits;
+ shift = dpu_pixel_format_matrix[i].shift;
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, COLORCOMPONENTBITS(sub_id), bits);
+ dpu_fu_write(fu, COLORCOMPONENTSHIFT(sub_id), shift);
+ mutex_unlock(&fu->mutex);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+static void
+fetchlayer_set_framedimensions(struct dpu_fetchunit *fu, unsigned int w,
+ unsigned int h, bool unused)
+{
+ u32 val;
+
+ val = FRAMEWIDTH(w) | FRAMEHEIGHT(h);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, FRAMEDIMENSIONS, val);
+ mutex_unlock(&fu->mutex);
+}
+
+void fetchlayer_rgb_constantcolor(struct dpu_fetchunit *fu,
+ u8 r, u8 g, u8 b, u8 a)
+{
+ u32 val;
+
+ val = rgb_color(r, g, b, a);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchlayer_rgb_constantcolor);
+
+void fetchlayer_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v)
+{
+ u32 val;
+
+ val = yuv_color(y, u, v);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchlayer_yuv_constantcolor);
+
+static void fetchlayer_set_controltrigger(struct dpu_fetchunit *fu)
+{
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN);
+ mutex_unlock(&fu->mutex);
+}
+
+int fetchlayer_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type)
+{
+ struct dpu_soc *dpu = fu->dpu;
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, FETCHTYPE);
+ val &= FETCHTYPE_MASK;
+ mutex_unlock(&fu->mutex);
+
+ switch (val) {
+ case FETCHTYPE__DECODE:
+ case FETCHTYPE__LAYER:
+ case FETCHTYPE__WARP:
+ case FETCHTYPE__ECO:
+ case FETCHTYPE__PERSP:
+ case FETCHTYPE__ROT:
+ case FETCHTYPE__DECODEL:
+ case FETCHTYPE__LAYERL:
+ case FETCHTYPE__ROTL:
+ break;
+ default:
+ dev_warn(dpu->dev, "Invalid fetch type %u for FetchLayer%d\n",
+ val, fu->id);
+ return -EINVAL;
+ }
+
+ *type = val;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fetchlayer_fetchtype);
+
+struct dpu_fetchunit *dpu_fl_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_fetchunit *fu;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fl_ids); i++)
+ if (fl_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(fl_ids))
+ return ERR_PTR(-EINVAL);
+
+ fu = dpu->fl_priv[i];
+
+ mutex_lock(&fu->mutex);
+
+ if (fu->inuse) {
+ mutex_unlock(&fu->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ fu->inuse = true;
+
+ mutex_unlock(&fu->mutex);
+
+ return fu;
+}
+EXPORT_SYMBOL_GPL(dpu_fl_get);
+
+void dpu_fl_put(struct dpu_fetchunit *fu)
+{
+ mutex_lock(&fu->mutex);
+
+ fu->inuse = false;
+
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_fl_put);
+
+static const struct dpu_fetchunit_ops fl_ops = {
+ .set_burstlength = fetchunit_set_burstlength,
+ .set_baseaddress = fetchunit_set_baseaddress,
+ .set_src_bpp = fetchunit_set_src_bpp,
+ .set_src_stride = fetchunit_set_src_stride,
+ .set_src_buf_dimensions = fetchlayer_set_src_buf_dimensions,
+ .set_fmt = fetchlayer_set_fmt,
+ .enable_src_buf = fetchunit_enable_src_buf,
+ .disable_src_buf = fetchunit_disable_src_buf,
+ .is_enabled = fetchunit_is_enabled,
+ .set_framedimensions = fetchlayer_set_framedimensions,
+ .set_controltrigger = fetchlayer_set_controltrigger,
+ .get_stream_id = fetchunit_get_stream_id,
+ .set_stream_id = fetchunit_set_stream_id,
+};
+
+void _dpu_fl_init(struct dpu_soc *dpu, unsigned int id)
+{
+ struct dpu_fetchunit *fu;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fl_ids); i++)
+ if (fl_ids[i] == id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(fl_ids)))
+ return;
+
+ fu = dpu->fl_priv[i];
+
+ fetchunit_baddr_autoupdate(fu, 0x0);
+ fetchunit_shden(fu, true);
+ fetchunit_shdldreq_sticky(fu, 0xFF);
+ fetchunit_disable_src_buf(fu);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, BURSTBUFFERMANAGEMENT,
+ SETNUMBUFFERS(16) | SETBURSTLENGTH(16));
+ mutex_unlock(&fu->mutex);
+}
+
+int dpu_fl_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long pec_base, unsigned long base)
+{
+ struct dpu_fetchlayer *fl;
+ struct dpu_fetchunit *fu;
+ int ret;
+
+ fl = devm_kzalloc(dpu->dev, sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ fu = &fl->fu;
+ dpu->fl_priv[id] = fu;
+
+ fu->pec_base = devm_ioremap(dpu->dev, base, SZ_16);
+ if (!fu->pec_base)
+ return -ENOMEM;
+
+ fu->base = devm_ioremap(dpu->dev, base, SZ_512);
+ if (!fu->base)
+ return -ENOMEM;
+
+ fu->dpu = dpu;
+ fu->id = id;
+ fu->sub_id = 0;
+ fu->type = FU_T_FL;
+ fu->ops = &fl_ops;
+ fu->name = "fetchlayer";
+
+ mutex_init(&fu->mutex);
+
+ ret = fetchlayer_fetchtype(fu, &fl->fetchtype);
+ if (ret < 0)
+ return ret;
+
+ _dpu_fl_init(dpu, id);
+
+ return 0;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-fetchunit.c b/drivers/gpu/imx/dpu/dpu-fetchunit.c
new file mode 100644
index 000000000000..66aa75bd6bd6
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-fetchunit.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2018-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+#define BASEADDRESS(n) (0x10 + (n) * 0x28)
+#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28)
+#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28)
+#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28)
+#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28)
+#define LAYEROFFSET(n) (0x24 + (n) * 0x28)
+#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28)
+#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28)
+#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28)
+#define LAYERPROPERTY(n) (0x34 + (n) * 0x28)
+
+void fetchunit_shden(struct dpu_fetchunit *fu, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, STATICCONTROL);
+ if (enable)
+ val |= SHDEN;
+ else
+ val &= ~SHDEN;
+ dpu_fu_write(fu, STATICCONTROL, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchunit_shden);
+
+void fetchunit_baddr_autoupdate(struct dpu_fetchunit *fu, u8 layer_mask)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, STATICCONTROL);
+ val &= ~BASEADDRESSAUTOUPDATE_MASK;
+ val |= BASEADDRESSAUTOUPDATE(layer_mask);
+ dpu_fu_write(fu, STATICCONTROL, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchunit_baddr_autoupdate);
+
+void fetchunit_shdldreq_sticky(struct dpu_fetchunit *fu, u8 layer_mask)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, STATICCONTROL);
+ val &= ~SHDLDREQSTICKY_MASK;
+ val |= SHDLDREQSTICKY(layer_mask);
+ dpu_fu_write(fu, STATICCONTROL, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchunit_shdldreq_sticky);
+
+void fetchunit_set_burstlength(struct dpu_fetchunit *fu)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, BURSTBUFFERMANAGEMENT);
+ val &= ~SETBURSTLENGTH_MASK;
+ val |= SETBURSTLENGTH(16);
+ dpu_fu_write(fu, BURSTBUFFERMANAGEMENT, val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchunit_set_burstlength);
+
+void fetchunit_set_baseaddress(struct dpu_fetchunit *fu, dma_addr_t baddr)
+{
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, BASEADDRESS(fu->sub_id), baddr);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchunit_set_baseaddress);
+
+void fetchunit_set_src_bpp(struct dpu_fetchunit *fu, int bpp)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id));
+ val &= ~0x3f0000;
+ val |= BITSPERPIXEL(bpp);
+ dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id), val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchunit_set_src_bpp);
+
+void fetchunit_set_src_stride(struct dpu_fetchunit *fu, unsigned int stride)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id));
+ val &= ~0xffff;
+ val |= STRIDE(stride);
+ dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id), val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchunit_set_src_stride);
+
+void fetchunit_enable_src_buf(struct dpu_fetchunit *fu)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id));
+ val |= SOURCEBUFFERENABLE;
+ dpu_fu_write(fu, LAYERPROPERTY(fu->sub_id), val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchunit_enable_src_buf);
+
+void fetchunit_disable_src_buf(struct dpu_fetchunit *fu)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id));
+ val &= ~SOURCEBUFFERENABLE;
+ dpu_fu_write(fu, LAYERPROPERTY(fu->sub_id), val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchunit_disable_src_buf);
+
+bool fetchunit_is_enabled(struct dpu_fetchunit *fu)
+{
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id));
+ mutex_unlock(&fu->mutex);
+
+ return !!(val & SOURCEBUFFERENABLE);
+}
+EXPORT_SYMBOL_GPL(fetchunit_is_enabled);
+
+unsigned int fetchunit_get_stream_id(struct dpu_fetchunit *fu)
+{
+ if (WARN_ON(!fu))
+ return DPU_PLANE_SRC_DISABLED;
+
+ return fu->stream_id;
+}
+EXPORT_SYMBOL_GPL(fetchunit_get_stream_id);
+
+void fetchunit_set_stream_id(struct dpu_fetchunit *fu, unsigned int id)
+{
+ if (WARN_ON(!fu))
+ return;
+
+ switch (id) {
+ case DPU_PLANE_SRC_TO_DISP_STREAM0:
+ case DPU_PLANE_SRC_TO_DISP_STREAM1:
+ case DPU_PLANE_SRC_DISABLED:
+ fu->stream_id = id;
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+EXPORT_SYMBOL_GPL(fetchunit_set_stream_id);
+
+bool fetchunit_is_fetchdecode(struct dpu_fetchunit *fu)
+{
+ if (WARN_ON(!fu))
+ return false;
+
+ return fu->type == FU_T_FD;
+}
+EXPORT_SYMBOL_GPL(fetchunit_is_fetchdecode);
+
+bool fetchunit_is_fetcheco(struct dpu_fetchunit *fu)
+{
+ if (WARN_ON(!fu))
+ return false;
+
+ return fu->type == FU_T_FE;
+}
+EXPORT_SYMBOL_GPL(fetchunit_is_fetcheco);
+
+bool fetchunit_is_fetchlayer(struct dpu_fetchunit *fu)
+{
+ if (WARN_ON(!fu))
+ return false;
+
+ return fu->type == FU_T_FL;
+}
+EXPORT_SYMBOL_GPL(fetchunit_is_fetchlayer);
+
+bool fetchunit_is_fetchwarp(struct dpu_fetchunit *fu)
+{
+ if (WARN_ON(!fu))
+ return false;
+
+ return fu->type == FU_T_FW;
+}
+EXPORT_SYMBOL_GPL(fetchunit_is_fetchwarp);
diff --git a/drivers/gpu/imx/dpu/dpu-fetchwarp.c b/drivers/gpu/imx/dpu/dpu-fetchwarp.c
new file mode 100644
index 000000000000..ac5a10a27261
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-fetchwarp.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2018-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+#define PIXENGCFG_STATUS 0x8
+#define BASEADDRESS(n) (0x10 + (n) * 0x28)
+#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28)
+#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28)
+#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28)
+#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28)
+#define LAYEROFFSET(n) (0x24 + (n) * 0x28)
+#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28)
+#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28)
+#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28)
+#define LAYERPROPERTY(n) (0x34 + (n) * 0x28)
+#define FRAMEDIMENSIONS 0x150
+#define FRAMERESAMPLING 0x154
+#define WARPCONTROL 0x158
+#define ARBSTARTX 0x15c
+#define ARBSTARTY 0x160
+#define ARBDELTA 0x164
+#define FIRPOSITIONS 0x168
+#define FIRCOEFFICIENTS 0x16c
+#define CONTROL 0x170
+#define TRIGGERENABLE 0x174
+#define SHDLDREQ(lm) ((lm) & 0xFF)
+#define CONTROLTRIGGER 0x178
+#define START 0x17c
+#define FETCHTYPE 0x180
+#define BURSTBUFFERPROPERTIES 0x184
+#define STATUS 0x188
+#define HIDDENSTATUS 0x18c
+
+struct dpu_fetchwarp {
+ struct dpu_fetchunit fu;
+ fetchtype_t fetchtype;
+};
+
+static void
+fetchwarp_set_src_buf_dimensions(struct dpu_fetchunit *fu,
+ unsigned int w, unsigned int h,
+ u32 unused1, bool unused2)
+{
+ u32 val;
+
+ val = LINEWIDTH(w) | LINECOUNT(h);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, SOURCEBUFFERDIMENSION(fu->sub_id), val);
+ mutex_unlock(&fu->mutex);
+}
+
+static void fetchwarp_set_fmt(struct dpu_fetchunit *fu,
+ u32 fmt, bool unused)
+{
+ u32 val, bits, shift;
+ int i, sub_id = fu->sub_id;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, LAYERPROPERTY(sub_id));
+ val &= ~YUVCONVERSIONMODE_MASK;
+ dpu_fu_write(fu, LAYERPROPERTY(sub_id), val);
+ mutex_unlock(&fu->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) {
+ if (dpu_pixel_format_matrix[i].pixel_format == fmt) {
+ bits = dpu_pixel_format_matrix[i].bits;
+ shift = dpu_pixel_format_matrix[i].shift;
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, COLORCOMPONENTBITS(sub_id), bits);
+ dpu_fu_write(fu, COLORCOMPONENTSHIFT(sub_id), shift);
+ mutex_unlock(&fu->mutex);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+static void
+fetchwarp_set_framedimensions(struct dpu_fetchunit *fu,
+ unsigned int w, unsigned int h, bool unused)
+{
+ u32 val;
+
+ val = FRAMEWIDTH(w) | FRAMEHEIGHT(h);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, FRAMEDIMENSIONS, val);
+ mutex_unlock(&fu->mutex);
+}
+
+void fetchwarp_rgb_constantcolor(struct dpu_fetchunit *fu,
+ u8 r, u8 g, u8 b, u8 a)
+{
+ u32 val;
+
+ val = rgb_color(r, g, b, a);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchwarp_rgb_constantcolor);
+
+void fetchwarp_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v)
+{
+ u32 val;
+
+ val = yuv_color(y, u, v);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val);
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(fetchwarp_yuv_constantcolor);
+
+static void fetchwarp_set_controltrigger(struct dpu_fetchunit *fu)
+{
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN);
+ mutex_unlock(&fu->mutex);
+}
+
+int fetchwarp_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type)
+{
+ struct dpu_soc *dpu = fu->dpu;
+ u32 val;
+
+ mutex_lock(&fu->mutex);
+ val = dpu_fu_read(fu, FETCHTYPE);
+ val &= FETCHTYPE_MASK;
+ mutex_unlock(&fu->mutex);
+
+ switch (val) {
+ case FETCHTYPE__DECODE:
+ case FETCHTYPE__LAYER:
+ case FETCHTYPE__WARP:
+ case FETCHTYPE__ECO:
+ case FETCHTYPE__PERSP:
+ case FETCHTYPE__ROT:
+ case FETCHTYPE__DECODEL:
+ case FETCHTYPE__LAYERL:
+ case FETCHTYPE__ROTL:
+ break;
+ default:
+ dev_warn(dpu->dev, "Invalid fetch type %u for FetchWarp%d\n",
+ val, fu->id);
+ return -EINVAL;
+ }
+
+ *type = val;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fetchwarp_fetchtype);
+
+struct dpu_fetchunit *dpu_fw_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_fetchunit *fu;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_ids); i++)
+ if (fw_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(fw_ids))
+ return ERR_PTR(-EINVAL);
+
+ fu = dpu->fw_priv[i];
+
+ mutex_lock(&fu->mutex);
+
+ if (fu->inuse) {
+ mutex_unlock(&fu->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ fu->inuse = true;
+
+ mutex_unlock(&fu->mutex);
+
+ return fu;
+}
+EXPORT_SYMBOL_GPL(dpu_fw_get);
+
+void dpu_fw_put(struct dpu_fetchunit *fu)
+{
+ mutex_lock(&fu->mutex);
+
+ fu->inuse = false;
+
+ mutex_unlock(&fu->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_fw_put);
+
+static const struct dpu_fetchunit_ops fw_ops = {
+ .set_burstlength = fetchunit_set_burstlength,
+ .set_baseaddress = fetchunit_set_baseaddress,
+ .set_src_bpp = fetchunit_set_src_bpp,
+ .set_src_stride = fetchunit_set_src_stride,
+ .set_src_buf_dimensions = fetchwarp_set_src_buf_dimensions,
+ .set_fmt = fetchwarp_set_fmt,
+ .enable_src_buf = fetchunit_enable_src_buf,
+ .disable_src_buf = fetchunit_disable_src_buf,
+ .is_enabled = fetchunit_is_enabled,
+ .set_framedimensions = fetchwarp_set_framedimensions,
+ .set_controltrigger = fetchwarp_set_controltrigger,
+ .get_stream_id = fetchunit_get_stream_id,
+ .set_stream_id = fetchunit_set_stream_id,
+};
+
+void _dpu_fw_init(struct dpu_soc *dpu, unsigned int id)
+{
+ struct dpu_fetchunit *fu;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_ids); i++)
+ if (fw_ids[i] == id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(fw_ids)))
+ return;
+
+ fu = dpu->fw_priv[i];
+
+ fetchunit_baddr_autoupdate(fu, 0x0);
+ fetchunit_shden(fu, true);
+ fetchunit_shdldreq_sticky(fu, 0xFF);
+ fetchunit_disable_src_buf(fu);
+
+ mutex_lock(&fu->mutex);
+ dpu_fu_write(fu, BURSTBUFFERMANAGEMENT,
+ SETNUMBUFFERS(16) | SETBURSTLENGTH(16));
+ mutex_unlock(&fu->mutex);
+}
+
+int dpu_fw_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long pec_base, unsigned long base)
+{
+ struct dpu_fetchwarp *fw;
+ struct dpu_fetchunit *fu;
+ int i, ret;
+
+ fw = devm_kzalloc(dpu->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(fw_ids); i++)
+ if (fw_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(fw_ids))
+ return -EINVAL;
+
+ fu = &fw->fu;
+ dpu->fw_priv[i] = fu;
+
+ fu->pec_base = devm_ioremap(dpu->dev, base, SZ_16);
+ if (!fu->pec_base)
+ return -ENOMEM;
+
+ fu->base = devm_ioremap(dpu->dev, base, SZ_512);
+ if (!fu->base)
+ return -ENOMEM;
+
+ fu->dpu = dpu;
+ fu->id = id;
+ fu->sub_id = 0;
+ fu->type = FU_T_FW;
+ fu->ops = &fw_ops;
+ fu->name = "fetchwarp";
+
+ mutex_init(&fu->mutex);
+
+ ret = fetchwarp_fetchtype(fu, &fw->fetchtype);
+ if (ret < 0)
+ return ret;
+
+ _dpu_fw_init(dpu, id);
+
+ return 0;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-framegen.c b/drivers/gpu/imx/dpu/dpu-framegen.c
new file mode 100644
index 000000000000..21041fe987a5
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-framegen.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+#define FGSTCTRL 0x8
+#define FGSYNCMODE_MASK 0x6
+#define HTCFG1 0xC
+#define HTOTAL(n) ((((n) - 1) & 0x3FFF) << 16)
+#define HACT(n) ((n) & 0x3FFF)
+#define HTCFG2 0x10
+#define HSEN BIT(31)
+#define HSBP(n) ((((n) - 1) & 0x3FFF) << 16)
+#define HSYNC(n) (((n) - 1) & 0x3FFF)
+#define VTCFG1 0x14
+#define VTOTAL(n) ((((n) - 1) & 0x3FFF) << 16)
+#define VACT(n) ((n) & 0x3FFF)
+#define VTCFG2 0x18
+#define VSEN BIT(31)
+#define VSBP(n) ((((n) - 1) & 0x3FFF) << 16)
+#define VSYNC(n) (((n) - 1) & 0x3FFF)
+#define INTCONFIG(n) (0x1C + 4 * (n))
+#define EN BIT(31)
+#define ROW(n) (((n) & 0x3FFF) << 16)
+#define COL(n) ((n) & 0x3FFF)
+#define PKICKCONFIG 0x2C
+#define SKICKCONFIG 0x30
+#define SECSTATCONFIG 0x34
+#define FGSRCR1 0x38
+#define FGSRCR2 0x3C
+#define FGSRCR3 0x40
+#define FGSRCR4 0x44
+#define FGSRCR5 0x48
+#define FGSRCR6 0x4C
+#define FGKSDR 0x50
+#define PACFG 0x54
+#define STARTX(n) (((n) + 1) & 0x3FFF)
+#define STARTY(n) (((((n) + 1) & 0x3FFF)) << 16)
+#define SACFG 0x58
+#define FGINCTRL 0x5C
+#define FGDM_MASK 0x7
+#define ENPRIMALPHA BIT(3)
+#define ENSECALPHA BIT(4)
+#define FGINCTRLPANIC 0x60
+#define FGCCR 0x64
+#define CCALPHA(a) (((a) & 0x1) << 30)
+#define CCRED(r) (((r) & 0x3FF) << 20)
+#define CCGREEN(g) (((g) & 0x3FF) << 10)
+#define CCBLUE(b) ((b) & 0x3FF)
+#define FGENABLE 0x68
+#define FGEN BIT(0)
+#define FGSLR 0x6C
+#define FGENSTS 0x70
+#define ENSTS BIT(0)
+#define FGTIMESTAMP 0x74
+#define LINEINDEX_MASK 0x3FFF
+#define LINEINDEX_SHIFT 0
+#define FRAMEINDEX_MASK 0xFFFFC000
+#define FRAMEINDEX_SHIFT 14
+#define FGCHSTAT 0x78
+#define SECSYNCSTAT BIT(24)
+#define FGCHSTATCLR 0x7C
+#define FGSKEWMON 0x80
+#define FGSFIFOMIN 0x84
+#define FGSFIFOMAX 0x88
+#define FGSFIFOFILLCLR 0x8C
+#define FGSREPD 0x90
+#define FGSRFTD 0x94
+
+#define KHZ 1000
+#define PLL_MIN_FREQ_HZ 648000000
+
+struct dpu_framegen {
+ void __iomem *base;
+ struct clk *clk_pll;
+ struct clk *clk_disp;
+ struct mutex mutex;
+ int id;
+ bool inuse;
+ struct dpu_soc *dpu;
+};
+
+static inline u32 dpu_fg_read(struct dpu_framegen *fg, unsigned int offset)
+{
+ return readl(fg->base + offset);
+}
+
+static inline void dpu_fg_write(struct dpu_framegen *fg,
+ unsigned int offset, u32 value)
+{
+ writel(value, fg->base + offset);
+}
+
+void framegen_enable(struct dpu_framegen *fg)
+{
+ mutex_lock(&fg->mutex);
+ dpu_fg_write(fg, FGENABLE, FGEN);
+ mutex_unlock(&fg->mutex);
+
+ dpu_pxlink_set_mst_enable(fg->dpu, fg->id, true);
+}
+EXPORT_SYMBOL_GPL(framegen_enable);
+
+void framegen_disable(struct dpu_framegen *fg)
+{
+ dpu_pxlink_set_mst_enable(fg->dpu, fg->id, false);
+
+ mutex_lock(&fg->mutex);
+ dpu_fg_write(fg, FGENABLE, 0);
+ mutex_unlock(&fg->mutex);
+}
+EXPORT_SYMBOL_GPL(framegen_disable);
+
+void framegen_shdtokgen(struct dpu_framegen *fg)
+{
+ mutex_lock(&fg->mutex);
+ dpu_fg_write(fg, FGSLR, SHDTOKGEN);
+ mutex_unlock(&fg->mutex);
+}
+EXPORT_SYMBOL_GPL(framegen_shdtokgen);
+
+void framegen_syncmode(struct dpu_framegen *fg, fgsyncmode_t mode)
+{
+ u32 val;
+
+ mutex_lock(&fg->mutex);
+ val = dpu_fg_read(fg, FGSTCTRL);
+ val &= ~FGSYNCMODE_MASK;
+ val |= mode;
+ dpu_fg_write(fg, FGSTCTRL, val);
+ mutex_unlock(&fg->mutex);
+}
+EXPORT_SYMBOL_GPL(framegen_syncmode);
+
+void framegen_cfg_videomode(struct dpu_framegen *fg, struct drm_display_mode *m)
+{
+ u32 hact, htotal, hsync, hsbp;
+ u32 vact, vtotal, vsync, vsbp;
+ u32 kick_row, kick_col;
+ u32 val;
+ unsigned long disp_clock_rate, pll_clock_rate = 0;
+ int div = 0;
+
+ hact = m->crtc_hdisplay;
+ htotal = m->crtc_htotal;
+ hsync = m->crtc_hsync_end - m->crtc_hsync_start;
+ hsbp = m->crtc_htotal - m->crtc_hsync_start;
+
+ vact = m->crtc_vdisplay;
+ vtotal = m->crtc_vtotal;
+ vsync = m->crtc_vsync_end - m->crtc_vsync_start;
+ vsbp = m->crtc_vtotal - m->crtc_vsync_start;
+
+ mutex_lock(&fg->mutex);
+ /* video mode */
+ dpu_fg_write(fg, HTCFG1, HACT(hact) | HTOTAL(htotal));
+ dpu_fg_write(fg, HTCFG2, HSYNC(hsync) | HSBP(hsbp) | HSEN);
+ dpu_fg_write(fg, VTCFG1, VACT(vact) | VTOTAL(vtotal));
+ dpu_fg_write(fg, VTCFG2, VSYNC(vsync) | VSBP(vsbp) | VSEN);
+
+ kick_col = hact + 1;
+ kick_row = vact;
+
+ /* pkickconfig */
+ dpu_fg_write(fg, PKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* skikconfig */
+ dpu_fg_write(fg, SKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* primary position config */
+ dpu_fg_write(fg, PACFG, STARTX(0) | STARTY(0));
+
+ /* alpha */
+ val = dpu_fg_read(fg, FGINCTRL);
+ val &= ~(ENPRIMALPHA | ENSECALPHA);
+ dpu_fg_write(fg, FGINCTRL, val);
+
+ val = dpu_fg_read(fg, FGINCTRLPANIC);
+ val &= ~(ENPRIMALPHA | ENSECALPHA);
+ dpu_fg_write(fg, FGINCTRLPANIC, val);
+
+ /* constant color */
+ dpu_fg_write(fg, FGCCR, 0);
+ mutex_unlock(&fg->mutex);
+
+ disp_clock_rate = m->clock * 1000;
+
+ /* find an even divisor for PLL */
+ do {
+ div += 2;
+ pll_clock_rate = disp_clock_rate * div;
+ } while (pll_clock_rate < PLL_MIN_FREQ_HZ);
+
+ clk_set_rate(fg->clk_pll, pll_clock_rate);
+ clk_set_rate(fg->clk_disp, disp_clock_rate);
+}
+EXPORT_SYMBOL_GPL(framegen_cfg_videomode);
+
+void framegen_pkickconfig(struct dpu_framegen *fg, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&fg->mutex);
+ val = dpu_fg_read(fg, PKICKCONFIG);
+ if (enable)
+ val |= EN;
+ else
+ val &= ~EN;
+ dpu_fg_write(fg, PKICKCONFIG, val);
+ mutex_unlock(&fg->mutex);
+}
+EXPORT_SYMBOL_GPL(framegen_pkickconfig);
+
+void framegen_sacfg(struct dpu_framegen *fg, unsigned int x, unsigned int y)
+{
+ mutex_lock(&fg->mutex);
+ dpu_fg_write(fg, SACFG, STARTX(x) | STARTY(y));
+ mutex_unlock(&fg->mutex);
+}
+EXPORT_SYMBOL_GPL(framegen_sacfg);
+
+void framegen_displaymode(struct dpu_framegen *fg, fgdm_t mode)
+{
+ u32 val;
+
+ mutex_lock(&fg->mutex);
+ val = dpu_fg_read(fg, FGINCTRL);
+ val &= ~FGDM_MASK;
+ val |= mode;
+ dpu_fg_write(fg, FGINCTRL, val);
+ mutex_unlock(&fg->mutex);
+}
+EXPORT_SYMBOL_GPL(framegen_displaymode);
+
+void framegen_panic_displaymode(struct dpu_framegen *fg, fgdm_t mode)
+{
+ u32 val;
+
+ mutex_lock(&fg->mutex);
+ val = dpu_fg_read(fg, FGINCTRLPANIC);
+ val &= ~FGDM_MASK;
+ val |= mode;
+ dpu_fg_write(fg, FGINCTRLPANIC, val);
+ mutex_unlock(&fg->mutex);
+}
+EXPORT_SYMBOL_GPL(framegen_panic_displaymode);
+
+void framegen_wait_done(struct dpu_framegen *fg, struct drm_display_mode *m)
+{
+ unsigned long timeout, pending_framedur_jiffies;
+ int frame_size = m->crtc_htotal * m->crtc_vtotal;
+ int dotclock, pending_framedur_ns;
+ u32 val;
+
+ dotclock = clk_get_rate(fg->clk_disp) / KHZ;
+ if (dotclock == 0) {
+ /* fall back to display mode's clock */
+ dotclock = m->crtc_clock;
+ }
+
+ /*
+ * The SoC designer indicates that there are two pending frames
+ * to complete in the worst case.
+ * So, three pending frames are enough for sure.
+ */
+ pending_framedur_ns = div_u64((u64) 3 * frame_size * 1000000, dotclock);
+ pending_framedur_jiffies = nsecs_to_jiffies(pending_framedur_ns);
+ if (pending_framedur_jiffies > (3 * HZ)) {
+ pending_framedur_jiffies = 3 * HZ;
+
+ dev_warn(fg->dpu->dev,
+ "truncate FrameGen%d pending frame duration to 3sec\n",
+ fg->id);
+ }
+ timeout = jiffies + pending_framedur_jiffies;
+
+ mutex_lock(&fg->mutex);
+ do {
+ val = dpu_fg_read(fg, FGENSTS);
+ } while ((val & ENSTS) && time_before(jiffies, timeout));
+ mutex_unlock(&fg->mutex);
+
+ dev_dbg(fg->dpu->dev, "FrameGen%d pending frame duration is %ums\n",
+ fg->id, jiffies_to_msecs(pending_framedur_jiffies));
+
+ if (val & ENSTS)
+ dev_err(fg->dpu->dev, "failed to wait for FrameGen%d done\n",
+ fg->id);
+}
+EXPORT_SYMBOL_GPL(framegen_wait_done);
+
+static inline u32 framegen_frame_index(u32 stamp)
+{
+ return (stamp & FRAMEINDEX_MASK) >> FRAMEINDEX_SHIFT;
+}
+
+static inline u32 framegen_line_index(u32 stamp)
+{
+ return (stamp & LINEINDEX_MASK) >> LINEINDEX_SHIFT;
+}
+
+void framegen_read_timestamp(struct dpu_framegen *fg,
+ u32 *frame_index, u32 *line_index)
+{
+ u32 stamp;
+
+ mutex_lock(&fg->mutex);
+ stamp = dpu_fg_read(fg, FGTIMESTAMP);
+ *frame_index = framegen_frame_index(stamp);
+ *line_index = framegen_line_index(stamp);
+ mutex_unlock(&fg->mutex);
+}
+EXPORT_SYMBOL_GPL(framegen_read_timestamp);
+
+void framegen_wait_for_frame_counter_moving(struct dpu_framegen *fg)
+{
+ u32 frame_index, line_index, last_frame_index;
+ unsigned long timeout = jiffies + msecs_to_jiffies(50);
+
+ framegen_read_timestamp(fg, &frame_index, &line_index);
+ do {
+ last_frame_index = frame_index;
+ framegen_read_timestamp(fg, &frame_index, &line_index);
+ } while (last_frame_index == frame_index &&
+ time_before(jiffies, timeout));
+
+ if (last_frame_index == frame_index)
+ dev_err(fg->dpu->dev,
+ "failed to wait for FrameGen%d frame counter moving\n",
+ fg->id);
+ else
+ dev_dbg(fg->dpu->dev,
+ "FrameGen%d frame counter moves - last %u, curr %d\n",
+ fg->id, last_frame_index, frame_index);
+}
+EXPORT_SYMBOL_GPL(framegen_wait_for_frame_counter_moving);
+
+bool framegen_secondary_is_syncup(struct dpu_framegen *fg)
+{
+ u32 val;
+
+ mutex_lock(&fg->mutex);
+ val = dpu_fg_read(fg, FGCHSTAT);
+ mutex_unlock(&fg->mutex);
+
+ return val & SECSYNCSTAT;
+}
+EXPORT_SYMBOL_GPL(framegen_secondary_is_syncup);
+
+void framegen_wait_for_secondary_syncup(struct dpu_framegen *fg)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(50);
+ bool syncup;
+
+ do {
+ syncup = framegen_secondary_is_syncup(fg);
+ } while (!syncup && time_before(jiffies, timeout));
+
+ if (syncup)
+ dev_dbg(fg->dpu->dev, "FrameGen%d secondary syncup\n", fg->id);
+ else
+ dev_err(fg->dpu->dev,
+ "failed to wait for FrameGen%d secondary syncup\n",
+ fg->id);
+}
+EXPORT_SYMBOL_GPL(framegen_wait_for_secondary_syncup);
+
+void framegen_enable_clock(struct dpu_framegen *fg)
+{
+ clk_prepare_enable(fg->clk_pll);
+ clk_prepare_enable(fg->clk_disp);
+}
+EXPORT_SYMBOL_GPL(framegen_enable_clock);
+
+void framegen_disable_clock(struct dpu_framegen *fg)
+{
+ clk_disable_unprepare(fg->clk_pll);
+ clk_disable_unprepare(fg->clk_disp);
+}
+EXPORT_SYMBOL_GPL(framegen_disable_clock);
+
+struct dpu_framegen *dpu_fg_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_framegen *fg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fg_ids); i++)
+ if (fg_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(fg_ids))
+ return ERR_PTR(-EINVAL);
+
+ fg = dpu->fg_priv[i];
+
+ mutex_lock(&fg->mutex);
+
+ if (fg->inuse) {
+ mutex_unlock(&fg->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ fg->inuse = true;
+
+ mutex_unlock(&fg->mutex);
+
+ return fg;
+}
+EXPORT_SYMBOL_GPL(dpu_fg_get);
+
+void dpu_fg_put(struct dpu_framegen *fg)
+{
+ mutex_lock(&fg->mutex);
+
+ fg->inuse = false;
+
+ mutex_unlock(&fg->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_fg_put);
+
+void _dpu_fg_init(struct dpu_soc *dpu, unsigned int id)
+{
+ struct dpu_framegen *fg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fg_ids); i++)
+ if (fg_ids[i] == id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(fg_ids)))
+ return;
+
+ fg = dpu->fg_priv[i];
+
+ framegen_syncmode(fg, FGSYNCMODE__OFF);
+}
+
+int dpu_fg_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long unused, unsigned long base)
+{
+ struct dpu_framegen *fg;
+
+ fg = devm_kzalloc(dpu->dev, sizeof(*fg), GFP_KERNEL);
+ if (!fg)
+ return -ENOMEM;
+
+ dpu->fg_priv[id] = fg;
+
+ fg->base = devm_ioremap(dpu->dev, base, SZ_256);
+ if (!fg->base)
+ return -ENOMEM;
+
+ fg->clk_pll = devm_clk_get(dpu->dev, id ? "pll1" : "pll0");
+ if (IS_ERR(fg->clk_pll))
+ return PTR_ERR(fg->clk_pll);
+
+ fg->clk_disp = devm_clk_get(dpu->dev, id ? "disp1" : "disp0");
+ if (IS_ERR(fg->clk_disp))
+ return PTR_ERR(fg->clk_disp);
+
+ fg->dpu = dpu;
+ fg->id = id;
+ mutex_init(&fg->mutex);
+
+ _dpu_fg_init(dpu, id);
+
+ return 0;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-hscaler.c b/drivers/gpu/imx/dpu/dpu-hscaler.c
new file mode 100644
index 000000000000..9e69c619bd3f
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-hscaler.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+#define PIXENGCFG_DYNAMIC_SRC_SEL_MASK 0x3F
+
+#define SETUP1 0xC
+#define SCALE_FACTOR_MASK 0xFFFFF
+#define SCALE_FACTOR(n) ((n) & 0xFFFFF)
+#define SETUP2 0x10
+#define PHASE_OFFSET_MASK 0x1FFFFF
+#define PHASE_OFFSET(n) ((n) & 0x1FFFFF)
+#define CONTROL 0x14
+#define OUTPUT_SIZE_MASK 0x3FFF0000
+#define OUTPUT_SIZE(n) ((((n) - 1) << 16) & OUTPUT_SIZE_MASK)
+#define FILTER_MODE 0x100
+#define SCALE_MODE 0x10
+#define MODE 0x1
+
+static const hs_src_sel_t src_sels[3][6] = {
+ {
+ HS_SRC_SEL__DISABLE,
+ HS_SRC_SEL__FETCHDECODE0,
+ HS_SRC_SEL__MATRIX4,
+ HS_SRC_SEL__VSCALER4,
+ }, {
+ HS_SRC_SEL__DISABLE,
+ HS_SRC_SEL__FETCHDECODE1,
+ HS_SRC_SEL__MATRIX5,
+ HS_SRC_SEL__VSCALER5,
+ }, {
+ HS_SRC_SEL__DISABLE,
+ HS_SRC_SEL__MATRIX9,
+ HS_SRC_SEL__VSCALER9,
+ HS_SRC_SEL__FILTER9,
+ },
+};
+
+struct dpu_hscaler {
+ void __iomem *pec_base;
+ void __iomem *base;
+ struct mutex mutex;
+ int id;
+ bool inuse;
+ struct dpu_soc *dpu;
+ /* see DPU_PLANE_SRC_xxx */
+ unsigned int stream_id;
+};
+
+static inline u32 dpu_pec_hs_read(struct dpu_hscaler *hs,
+ unsigned int offset)
+{
+ return readl(hs->pec_base + offset);
+}
+
+static inline void dpu_pec_hs_write(struct dpu_hscaler *hs,
+ unsigned int offset, u32 value)
+{
+ writel(value, hs->pec_base + offset);
+}
+
+static inline u32 dpu_hs_read(struct dpu_hscaler *hs, unsigned int offset)
+{
+ return readl(hs->base + offset);
+}
+
+static inline void dpu_hs_write(struct dpu_hscaler *hs,
+ unsigned int offset, u32 value)
+{
+ writel(value, hs->base + offset);
+}
+
+int hscaler_pixengcfg_dynamic_src_sel(struct dpu_hscaler *hs, hs_src_sel_t src)
+{
+ struct dpu_soc *dpu = hs->dpu;
+ const unsigned int hs_id_array[] = {4, 5, 9};
+ int i, j;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(hs_id_array); i++)
+ if (hs_id_array[i] == hs->id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(hs_id_array)))
+ return -EINVAL;
+
+ mutex_lock(&hs->mutex);
+ for (j = 0; j < ARRAY_SIZE(src_sels[0]); j++) {
+ if (src_sels[i][j] == src) {
+ val = dpu_pec_hs_read(hs, PIXENGCFG_DYNAMIC);
+ val &= ~PIXENGCFG_DYNAMIC_SRC_SEL_MASK;
+ val |= src;
+ dpu_pec_hs_write(hs, PIXENGCFG_DYNAMIC, val);
+ mutex_unlock(&hs->mutex);
+ return 0;
+ }
+ }
+ mutex_unlock(&hs->mutex);
+
+ dev_err(dpu->dev, "Invalid source for HScaler%d\n", hs->id);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(hscaler_pixengcfg_dynamic_src_sel);
+
+void hscaler_pixengcfg_clken(struct dpu_hscaler *hs, pixengcfg_clken_t clken)
+{
+ u32 val;
+
+ mutex_lock(&hs->mutex);
+ val = dpu_pec_hs_read(hs, PIXENGCFG_DYNAMIC);
+ val &= ~CLKEN_MASK;
+ val |= clken << CLKEN_MASK_SHIFT;
+ dpu_pec_hs_write(hs, PIXENGCFG_DYNAMIC, val);
+ mutex_unlock(&hs->mutex);
+}
+EXPORT_SYMBOL_GPL(hscaler_pixengcfg_clken);
+
+void hscaler_shden(struct dpu_hscaler *hs, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&hs->mutex);
+ val = dpu_hs_read(hs, STATICCONTROL);
+ if (enable)
+ val |= SHDEN;
+ else
+ val &= ~SHDEN;
+ dpu_hs_write(hs, STATICCONTROL, val);
+ mutex_unlock(&hs->mutex);
+}
+EXPORT_SYMBOL_GPL(hscaler_shden);
+
+void hscaler_setup1(struct dpu_hscaler *hs, u32 src, u32 dst)
+{
+ struct dpu_soc *dpu = hs->dpu;
+ u32 scale_factor;
+ u64 tmp64;
+
+ if (src == dst) {
+ scale_factor = 0x80000;
+ } else {
+ if (src > dst) {
+ tmp64 = (u64)((u64)dst * 0x80000);
+ do_div(tmp64, src);
+
+ } else {
+ tmp64 = (u64)((u64)src * 0x80000);
+ do_div(tmp64, dst);
+ }
+ scale_factor = (u32)tmp64;
+ }
+
+ WARN_ON(scale_factor > 0x80000);
+
+ mutex_lock(&hs->mutex);
+ dpu_hs_write(hs, SETUP1, SCALE_FACTOR(scale_factor));
+ mutex_unlock(&hs->mutex);
+
+ dev_dbg(dpu->dev, "Hscaler%d scale factor 0x%08x\n",
+ hs->id, scale_factor);
+}
+EXPORT_SYMBOL_GPL(hscaler_setup1);
+
+void hscaler_setup2(struct dpu_hscaler *hs, u32 phase_offset)
+{
+ mutex_lock(&hs->mutex);
+ dpu_hs_write(hs, SETUP2, PHASE_OFFSET(phase_offset));
+ mutex_unlock(&hs->mutex);
+}
+EXPORT_SYMBOL_GPL(hscaler_setup2);
+
+void hscaler_output_size(struct dpu_hscaler *hs, u32 line_num)
+{
+ u32 val;
+
+ mutex_lock(&hs->mutex);
+ val = dpu_hs_read(hs, CONTROL);
+ val &= ~OUTPUT_SIZE_MASK;
+ val |= OUTPUT_SIZE(line_num);
+ dpu_hs_write(hs, CONTROL, val);
+ mutex_unlock(&hs->mutex);
+}
+EXPORT_SYMBOL_GPL(hscaler_output_size);
+
+void hscaler_filter_mode(struct dpu_hscaler *hs, scaler_filter_mode_t m)
+{
+ u32 val;
+
+ mutex_lock(&hs->mutex);
+ val = dpu_hs_read(hs, CONTROL);
+ val &= ~FILTER_MODE;
+ val |= m;
+ dpu_hs_write(hs, CONTROL, val);
+ mutex_unlock(&hs->mutex);
+}
+EXPORT_SYMBOL_GPL(hscaler_filter_mode);
+
+void hscaler_scale_mode(struct dpu_hscaler *hs, scaler_scale_mode_t m)
+{
+ u32 val;
+
+ mutex_lock(&hs->mutex);
+ val = dpu_hs_read(hs, CONTROL);
+ val &= ~SCALE_MODE;
+ val |= m;
+ dpu_hs_write(hs, CONTROL, val);
+ mutex_unlock(&hs->mutex);
+}
+EXPORT_SYMBOL_GPL(hscaler_scale_mode);
+
+void hscaler_mode(struct dpu_hscaler *hs, scaler_mode_t m)
+{
+ u32 val;
+
+ mutex_lock(&hs->mutex);
+ val = dpu_hs_read(hs, CONTROL);
+ val &= ~MODE;
+ val |= m;
+ dpu_hs_write(hs, CONTROL, val);
+ mutex_unlock(&hs->mutex);
+}
+EXPORT_SYMBOL_GPL(hscaler_mode);
+
+bool hscaler_is_enabled(struct dpu_hscaler *hs)
+{
+ u32 val;
+
+ mutex_lock(&hs->mutex);
+ val = dpu_hs_read(hs, CONTROL);
+ mutex_unlock(&hs->mutex);
+
+ return (val & MODE) == SCALER_ACTIVE;
+}
+EXPORT_SYMBOL_GPL(hscaler_is_enabled);
+
+dpu_block_id_t hscaler_get_block_id(struct dpu_hscaler *hs)
+{
+ switch (hs->id) {
+ case 4:
+ return ID_HSCALER4;
+ case 5:
+ return ID_HSCALER5;
+ case 9:
+ return ID_HSCALER9;
+ default:
+ WARN_ON(1);
+ }
+
+ return ID_NONE;
+}
+EXPORT_SYMBOL_GPL(hscaler_get_block_id);
+
+unsigned int hscaler_get_stream_id(struct dpu_hscaler *hs)
+{
+ return hs->stream_id;
+}
+EXPORT_SYMBOL_GPL(hscaler_get_stream_id);
+
+void hscaler_set_stream_id(struct dpu_hscaler *hs, unsigned int id)
+{
+ switch (id) {
+ case DPU_PLANE_SRC_TO_DISP_STREAM0:
+ case DPU_PLANE_SRC_TO_DISP_STREAM1:
+ case DPU_PLANE_SRC_DISABLED:
+ hs->stream_id = id;
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+EXPORT_SYMBOL_GPL(hscaler_set_stream_id);
+
+struct dpu_hscaler *dpu_hs_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_hscaler *hs;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hs_ids); i++)
+ if (hs_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(hs_ids))
+ return ERR_PTR(-EINVAL);
+
+ hs = dpu->hs_priv[i];
+
+ mutex_lock(&hs->mutex);
+
+ if (hs->inuse) {
+ mutex_unlock(&hs->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ hs->inuse = true;
+
+ mutex_unlock(&hs->mutex);
+
+ return hs;
+}
+EXPORT_SYMBOL_GPL(dpu_hs_get);
+
+void dpu_hs_put(struct dpu_hscaler *hs)
+{
+ mutex_lock(&hs->mutex);
+
+ hs->inuse = false;
+
+ mutex_unlock(&hs->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_hs_put);
+
+void _dpu_hs_init(struct dpu_soc *dpu, unsigned int id)
+{
+ struct dpu_hscaler *hs;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hs_ids); i++)
+ if (hs_ids[i] == id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(hs_ids)))
+ return;
+
+ hs = dpu->hs_priv[i];
+
+ hscaler_shden(hs, true);
+ hscaler_setup2(hs, 0);
+ hscaler_pixengcfg_dynamic_src_sel(hs, HS_SRC_SEL__DISABLE);
+}
+
+int dpu_hs_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long pec_base, unsigned long base)
+{
+ struct dpu_hscaler *hs;
+ int i;
+
+ hs = devm_kzalloc(dpu->dev, sizeof(*hs), GFP_KERNEL);
+ if (!hs)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(hs_ids); i++)
+ if (hs_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(hs_ids))
+ return -EINVAL;
+
+ dpu->hs_priv[i] = hs;
+
+ hs->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_8);
+ if (!hs->pec_base)
+ return -ENOMEM;
+
+ hs->base = devm_ioremap(dpu->dev, base, SZ_1K);
+ if (!hs->base)
+ return -ENOMEM;
+
+ hs->dpu = dpu;
+ hs->id = id;
+
+ mutex_init(&hs->mutex);
+
+ _dpu_hs_init(dpu, id);
+
+ return 0;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-layerblend.c b/drivers/gpu/imx/dpu/dpu-layerblend.c
new file mode 100644
index 000000000000..56c31cf094f5
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-layerblend.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK 0x3F
+#define PIXENGCFG_DYNAMIC_SEC_SEL_MASK 0x3F00
+#define PIXENGCFG_DYNAMIC_SEC_SEL_SHIFT 8
+
+static const lb_prim_sel_t prim_sels[] = {
+ LB_PRIM_SEL__DISABLE,
+ LB_PRIM_SEL__BLITBLEND9,
+ LB_PRIM_SEL__CONSTFRAME0,
+ LB_PRIM_SEL__CONSTFRAME1,
+ LB_PRIM_SEL__CONSTFRAME4,
+ LB_PRIM_SEL__CONSTFRAME5,
+ LB_PRIM_SEL__MATRIX4,
+ LB_PRIM_SEL__HSCALER4,
+ LB_PRIM_SEL__VSCALER4,
+ LB_PRIM_SEL__MATRIX5,
+ LB_PRIM_SEL__HSCALER5,
+ LB_PRIM_SEL__VSCALER5,
+ LB_PRIM_SEL__LAYERBLEND0,
+ LB_PRIM_SEL__LAYERBLEND1,
+ LB_PRIM_SEL__LAYERBLEND2,
+ LB_PRIM_SEL__LAYERBLEND3,
+};
+
+#define PIXENGCFG_STATUS 0xC
+#define SHDTOKSEL (0x3 << 3)
+#define SHDTOKSEL_SHIFT 3
+#define SHDLDSEL (0x3 << 1)
+#define SHDLDSEL_SHIFT 1
+#define CONTROL 0xC
+#define OPERATION_MODE_MASK BIT(0)
+#define BLENDCONTROL 0x10
+#define ALPHA(a) (((a) & 0xFF) << 16)
+#define PRIM_C_BLD_FUNC__ONE_MINUS_SEC_ALPHA 0x5
+#define PRIM_C_BLD_FUNC__PRIM_ALPHA 0x2
+#define SEC_C_BLD_FUNC__CONST_ALPHA (0x6 << 4)
+#define SEC_C_BLD_FUNC__ONE_MINUS_PRIM_ALPHA (0x3 << 4)
+#define PRIM_A_BLD_FUNC__ONE_MINUS_SEC_ALPHA (0x5 << 8)
+#define PRIM_A_BLD_FUNC__ZERO (0x0 << 8)
+#define SEC_A_BLD_FUNC__ONE (0x1 << 12)
+#define SEC_A_BLD_FUNC__ZERO (0x0 << 12)
+#define POSITION 0x14
+#define XPOS(x) ((x) & 0x7FFF)
+#define YPOS(y) (((y) & 0x7FFF) << 16)
+#define PRIMCONTROLWORD 0x18
+#define SECCONTROLWORD 0x1C
+
+struct dpu_layerblend {
+ void __iomem *pec_base;
+ void __iomem *base;
+ struct mutex mutex;
+ int id;
+ bool inuse;
+ struct dpu_soc *dpu;
+};
+
+static inline u32 dpu_pec_lb_read(struct dpu_layerblend *lb,
+ unsigned int offset)
+{
+ return readl(lb->pec_base + offset);
+}
+
+static inline void dpu_pec_lb_write(struct dpu_layerblend *lb,
+ unsigned int offset, u32 value)
+{
+ writel(value, lb->pec_base + offset);
+}
+
+static inline u32 dpu_lb_read(struct dpu_layerblend *lb, unsigned int offset)
+{
+ return readl(lb->base + offset);
+}
+
+static inline void dpu_lb_write(struct dpu_layerblend *lb,
+ unsigned int offset, u32 value)
+{
+ writel(value, lb->base + offset);
+}
+
+int layerblend_pixengcfg_dynamic_prim_sel(struct dpu_layerblend *lb,
+ lb_prim_sel_t prim)
+{
+ struct dpu_soc *dpu = lb->dpu;
+ int fixed_sels_num = ARRAY_SIZE(prim_sels) - 4;
+ int i;
+ u32 val;
+
+ mutex_lock(&lb->mutex);
+ for (i = 0; i < fixed_sels_num + lb->id; i++) {
+ if (prim_sels[i] == prim) {
+ val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC);
+ val &= ~PIXENGCFG_DYNAMIC_PRIM_SEL_MASK;
+ val |= prim;
+ dpu_pec_lb_write(lb, PIXENGCFG_DYNAMIC, val);
+ mutex_unlock(&lb->mutex);
+ return 0;
+ }
+ }
+ mutex_unlock(&lb->mutex);
+
+ dev_err(dpu->dev, "Invalid primary source for LayerBlend%d\n", lb->id);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(layerblend_pixengcfg_dynamic_prim_sel);
+
+void layerblend_pixengcfg_dynamic_sec_sel(struct dpu_layerblend *lb,
+ lb_sec_sel_t sec)
+{
+ u32 val;
+
+ mutex_lock(&lb->mutex);
+ val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC);
+ val &= ~PIXENGCFG_DYNAMIC_SEC_SEL_MASK;
+ val |= sec << PIXENGCFG_DYNAMIC_SEC_SEL_SHIFT;
+ dpu_pec_lb_write(lb, PIXENGCFG_DYNAMIC, val);
+ mutex_unlock(&lb->mutex);
+}
+EXPORT_SYMBOL_GPL(layerblend_pixengcfg_dynamic_sec_sel);
+
+void layerblend_pixengcfg_clken(struct dpu_layerblend *lb,
+ pixengcfg_clken_t clken)
+{
+ u32 val;
+
+ mutex_lock(&lb->mutex);
+ val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC);
+ val &= ~CLKEN_MASK;
+ val |= clken << CLKEN_MASK_SHIFT;
+ dpu_pec_lb_write(lb, PIXENGCFG_DYNAMIC, val);
+ mutex_unlock(&lb->mutex);
+}
+EXPORT_SYMBOL_GPL(layerblend_pixengcfg_clken);
+
+void layerblend_shden(struct dpu_layerblend *lb, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&lb->mutex);
+ val = dpu_lb_read(lb, STATICCONTROL);
+ if (enable)
+ val |= SHDEN;
+ else
+ val &= ~SHDEN;
+ dpu_lb_write(lb, STATICCONTROL, val);
+ mutex_unlock(&lb->mutex);
+}
+EXPORT_SYMBOL_GPL(layerblend_shden);
+
+void layerblend_shdtoksel(struct dpu_layerblend *lb, lb_shadow_sel_t sel)
+{
+ u32 val;
+
+ mutex_lock(&lb->mutex);
+ val = dpu_lb_read(lb, STATICCONTROL);
+ val &= ~SHDTOKSEL;
+ val |= (sel << SHDTOKSEL_SHIFT);
+ dpu_lb_write(lb, STATICCONTROL, val);
+ mutex_unlock(&lb->mutex);
+}
+EXPORT_SYMBOL_GPL(layerblend_shdtoksel);
+
+void layerblend_shdldsel(struct dpu_layerblend *lb, lb_shadow_sel_t sel)
+{
+ u32 val;
+
+ mutex_lock(&lb->mutex);
+ val = dpu_lb_read(lb, STATICCONTROL);
+ val &= ~SHDLDSEL;
+ val |= (sel << SHDLDSEL_SHIFT);
+ dpu_lb_write(lb, STATICCONTROL, val);
+ mutex_unlock(&lb->mutex);
+}
+EXPORT_SYMBOL_GPL(layerblend_shdldsel);
+
+void layerblend_control(struct dpu_layerblend *lb, lb_mode_t mode)
+{
+ u32 val;
+
+ mutex_lock(&lb->mutex);
+ val = dpu_lb_read(lb, CONTROL);
+ val &= ~OPERATION_MODE_MASK;
+ val |= mode;
+ dpu_lb_write(lb, CONTROL, val);
+ mutex_unlock(&lb->mutex);
+}
+EXPORT_SYMBOL_GPL(layerblend_control);
+
+void layerblend_blendcontrol(struct dpu_layerblend *lb, bool sec_from_scaler)
+{
+ u32 val;
+
+ val = ALPHA(0xff) |
+ PRIM_C_BLD_FUNC__PRIM_ALPHA |
+ SEC_C_BLD_FUNC__ONE_MINUS_PRIM_ALPHA |
+ PRIM_A_BLD_FUNC__ZERO;
+
+ val |= sec_from_scaler ? SEC_A_BLD_FUNC__ZERO : SEC_A_BLD_FUNC__ONE;
+
+ mutex_lock(&lb->mutex);
+ dpu_lb_write(lb, BLENDCONTROL, val);
+ mutex_unlock(&lb->mutex);
+}
+EXPORT_SYMBOL_GPL(layerblend_blendcontrol);
+
+void layerblend_position(struct dpu_layerblend *lb, int x, int y)
+{
+ mutex_lock(&lb->mutex);
+ dpu_lb_write(lb, POSITION, XPOS(x) | YPOS(y));
+ mutex_unlock(&lb->mutex);
+}
+EXPORT_SYMBOL_GPL(layerblend_position);
+
+struct dpu_layerblend *dpu_lb_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_layerblend *lb;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lb_ids); i++)
+ if (lb_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(lb_ids))
+ return ERR_PTR(-EINVAL);
+
+ lb = dpu->lb_priv[i];
+
+ mutex_lock(&lb->mutex);
+
+ if (lb->inuse) {
+ mutex_unlock(&lb->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ lb->inuse = true;
+
+ mutex_unlock(&lb->mutex);
+
+ return lb;
+}
+EXPORT_SYMBOL_GPL(dpu_lb_get);
+
+void dpu_lb_put(struct dpu_layerblend *lb)
+{
+ mutex_lock(&lb->mutex);
+
+ lb->inuse = false;
+
+ mutex_unlock(&lb->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_lb_put);
+
+void _dpu_lb_init(struct dpu_soc *dpu, unsigned int id)
+{
+ struct dpu_layerblend *lb;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lb_ids); i++)
+ if (lb_ids[i] == id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(lb_ids)))
+ return;
+
+ lb = dpu->lb_priv[i];
+
+ layerblend_pixengcfg_dynamic_prim_sel(lb, LB_PRIM_SEL__DISABLE);
+ layerblend_pixengcfg_dynamic_sec_sel(lb, LB_SEC_SEL__DISABLE);
+ layerblend_pixengcfg_clken(lb, CLKEN__AUTOMATIC);
+ layerblend_shdldsel(lb, BOTH);
+ layerblend_shdtoksel(lb, BOTH);
+ layerblend_shden(lb, true);
+}
+
+int dpu_lb_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long pec_base, unsigned long base)
+{
+ struct dpu_layerblend *lb;
+ int ret;
+
+ lb = devm_kzalloc(dpu->dev, sizeof(*lb), GFP_KERNEL);
+ if (!lb)
+ return -ENOMEM;
+
+ dpu->lb_priv[id] = lb;
+
+ lb->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16);
+ if (!lb->pec_base)
+ return -ENOMEM;
+
+ lb->base = devm_ioremap(dpu->dev, base, SZ_32);
+ if (!lb->base)
+ return -ENOMEM;
+
+ lb->dpu = dpu;
+ lb->id = id;
+ mutex_init(&lb->mutex);
+
+ ret = layerblend_pixengcfg_dynamic_prim_sel(lb, LB_PRIM_SEL__DISABLE);
+ if (ret < 0)
+ return ret;
+
+ _dpu_lb_init(dpu, id);
+
+ return 0;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-prv.h b/drivers/gpu/imx/dpu/dpu-prv.h
new file mode 100644
index 000000000000..a2983e42e630
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-prv.h
@@ -0,0 +1,420 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef __DPU_PRV_H__
+#define __DPU_PRV_H__
+
+#include <linux/firmware/imx/sci.h>
+#include <drm/drm_fourcc.h>
+#include <video/dpu.h>
+
+#define STATICCONTROL 0x8
+#define SHDLDREQSTICKY(lm) (((lm) & 0xFF) << 24)
+#define SHDLDREQSTICKY_MASK (0xFF << 24)
+#define BASEADDRESSAUTOUPDATE(lm) (((lm) & 0xFF) << 16)
+#define BASEADDRESSAUTOUPDATE_MASK (0xFF << 16)
+#define SHDEN BIT(0)
+#define BURSTBUFFERMANAGEMENT 0xC
+#define SETNUMBUFFERS(n) ((n) & 0xFF)
+#define SETBURSTLENGTH(n) (((n) & 0x1F) << 8)
+#define SETBURSTLENGTH_MASK 0x1F00
+#define LINEMODE_MASK 0x80000000U
+#define LINEMODE_SHIFT 31U
+enum linemode {
+ /*
+ * Mandatory setting for operation in the Display Controller.
+ * Works also for Blit Engine with marginal performance impact.
+ */
+ LINEMODE__DISPLAY = 0,
+ /* Recommended setting for operation in the Blit Engine. */
+ LINEMODE__BLIT = 1 << LINEMODE_SHIFT,
+};
+
+#define BITSPERPIXEL(bpp) (((bpp) & 0x3F) << 16)
+#define STRIDE(n) (((n) - 1) & 0xFFFF)
+#define LINEWIDTH(w) (((w) - 1) & 0x3FFF)
+#define LINECOUNT(h) ((((h) - 1) & 0x3FFF) << 16)
+#define ITUFORMAT BIT(31)
+#define R_BITS(n) (((n) & 0xF) << 24)
+#define G_BITS(n) (((n) & 0xF) << 16)
+#define B_BITS(n) (((n) & 0xF) << 8)
+#define A_BITS(n) ((n) & 0xF)
+#define R_SHIFT(n) (((n) & 0x1F) << 24)
+#define G_SHIFT(n) (((n) & 0x1F) << 16)
+#define B_SHIFT(n) (((n) & 0x1F) << 8)
+#define A_SHIFT(n) ((n) & 0x1F)
+#define Y_BITS(n) R_BITS(n)
+#define Y_BITS_MASK 0xF000000
+#define U_BITS(n) G_BITS(n)
+#define U_BITS_MASK 0xF0000
+#define V_BITS(n) B_BITS(n)
+#define V_BITS_MASK 0xF00
+#define Y_SHIFT(n) R_SHIFT(n)
+#define Y_SHIFT_MASK 0x1F000000
+#define U_SHIFT(n) G_SHIFT(n)
+#define U_SHIFT_MASK 0x1F0000
+#define V_SHIFT(n) B_SHIFT(n)
+#define V_SHIFT_MASK 0x1F00
+#define LAYERXOFFSET(x) ((x) & 0x7FFF)
+#define LAYERYOFFSET(y) (((y) & 0x7FFF) << 16)
+#define CLIPWINDOWXOFFSET(x) ((x) & 0x7FFF)
+#define CLIPWINDOWYOFFSET(y) (((y) & 0x7FFF) << 16)
+#define CLIPWINDOWWIDTH(w) (((w) - 1) & 0x3FFF)
+#define CLIPWINDOWHEIGHT(h) ((((h) - 1) & 0x3FFF) << 16)
+#define PALETTEENABLE BIT(0)
+typedef enum {
+ TILE_FILL_ZERO,
+ TILE_FILL_CONSTANT,
+ TILE_PAD,
+ TILE_PAD_ZERO,
+} tilemode_t;
+#define ALPHASRCENABLE BIT(8)
+#define ALPHACONSTENABLE BIT(9)
+#define ALPHAMASKENABLE BIT(10)
+#define ALPHATRANSENABLE BIT(11)
+#define RGBALPHASRCENABLE BIT(12)
+#define RGBALPHACONSTENABLE BIT(13)
+#define RGBALPHAMASKENABLE BIT(14)
+#define RGBALPHATRANSENABLE BIT(15)
+#define PREMULCONSTRGB BIT(16)
+typedef enum {
+ YUVCONVERSIONMODE__OFF,
+ YUVCONVERSIONMODE__ITU601,
+ YUVCONVERSIONMODE__ITU601_FR,
+ YUVCONVERSIONMODE__ITU709,
+} yuvconversionmode_t;
+#define YUVCONVERSIONMODE_MASK 0x60000
+#define YUVCONVERSIONMODE(m) (((m) & 0x3) << 17)
+#define GAMMAREMOVEENABLE BIT(20)
+#define CLIPWINDOWENABLE BIT(30)
+#define SOURCEBUFFERENABLE BIT(31)
+#define EMPTYFRAME BIT(31)
+#define FRAMEWIDTH(w) (((w) - 1) & 0x3FFF)
+#define FRAMEHEIGHT(h) ((((h) - 1) & 0x3FFF) << 16)
+#define DELTAX_MASK 0x3F000
+#define DELTAY_MASK 0xFC0000
+#define DELTAX(x) (((x) & 0x3F) << 12)
+#define DELTAY(y) (((y) & 0x3F) << 18)
+#define YUV422UPSAMPLINGMODE_MASK BIT(5)
+#define YUV422UPSAMPLINGMODE(m) (((m) & 0x1) << 5)
+typedef enum {
+ YUV422UPSAMPLINGMODE__REPLICATE,
+ YUV422UPSAMPLINGMODE__INTERPOLATE,
+} yuv422upsamplingmode_t;
+#define INPUTSELECT_MASK 0x18
+#define INPUTSELECT(s) (((s) & 0x3) << 3)
+typedef enum {
+ INPUTSELECT__INACTIVE,
+ INPUTSELECT__COMPPACK,
+ INPUTSELECT__ALPHAMASK,
+ INPUTSELECT__COORDINATE,
+} inputselect_t;
+#define RASTERMODE_MASK 0x7
+#define RASTERMODE(m) ((m) & 0x7)
+typedef enum {
+ RASTERMODE__NORMAL,
+ RASTERMODE__DECODE,
+ RASTERMODE__ARBITRARY,
+ RASTERMODE__PERSPECTIVE,
+ RASTERMODE__YUV422,
+ RASTERMODE__AFFINE,
+} rastermode_t;
+#define SHDTOKGEN BIT(0)
+#define FETCHTYPE_MASK 0xF
+
+#define DPU_FRAC_PLANE_LAYER_NUM 8
+
+#define DPU_VPROC_CAP_HSCALER4 BIT(0)
+#define DPU_VPROC_CAP_VSCALER4 BIT(1)
+#define DPU_VPROC_CAP_HSCALER5 BIT(2)
+#define DPU_VPROC_CAP_VSCALER5 BIT(3)
+#define DPU_VPROC_CAP_FETCHECO0 BIT(4)
+#define DPU_VPROC_CAP_FETCHECO1 BIT(5)
+
+#define DPU_VPROC_CAP_HSCALE (DPU_VPROC_CAP_HSCALER4 | \
+ DPU_VPROC_CAP_HSCALER5)
+#define DPU_VPROC_CAP_VSCALE (DPU_VPROC_CAP_VSCALER4 | \
+ DPU_VPROC_CAP_VSCALER5)
+#define DPU_VPROC_CAP_FETCHECO (DPU_VPROC_CAP_FETCHECO0 | \
+ DPU_VPROC_CAP_FETCHECO1)
+
+struct dpu_unit {
+ char *name;
+ unsigned int num;
+ const unsigned int *ids;
+ const unsigned long *pec_ofss; /* PixEngCFG */
+ const unsigned long *ofss;
+};
+
+struct cm_reg_ofs {
+ u32 ipidentifier;
+ u32 lockunlock;
+ u32 lockstatus;
+ u32 userinterruptmask;
+ u32 interruptenable;
+ u32 interruptpreset;
+ u32 interruptclear;
+ u32 interruptstatus;
+ u32 userinterruptenable;
+ u32 userinterruptpreset;
+ u32 userinterruptclear;
+ u32 userinterruptstatus;
+ u32 generalpurpose;
+};
+
+struct dpu_data {
+ unsigned long cm_ofs; /* common */
+ const struct dpu_unit *cfs;
+ const struct dpu_unit *decs;
+ const struct dpu_unit *eds;
+ const struct dpu_unit *fds;
+ const struct dpu_unit *fes;
+ const struct dpu_unit *fgs;
+ const struct dpu_unit *fls;
+ const struct dpu_unit *fws;
+ const struct dpu_unit *hss;
+ const struct dpu_unit *lbs;
+ const struct dpu_unit *sts;
+ const struct dpu_unit *tcons;
+ const struct dpu_unit *vss;
+ const struct cm_reg_ofs *cm_reg_ofs;
+ const unsigned long *unused_irq;
+
+ u32 plane_src_mask;
+};
+
+struct dpu_soc {
+ struct device *dev;
+ const struct dpu_data *data;
+ spinlock_t lock;
+ struct list_head list;
+
+ struct device *pd_dc_dev;
+ struct device *pd_pll0_dev;
+ struct device *pd_pll1_dev;
+ struct device_link *pd_dc_link;
+ struct device_link *pd_pll0_link;
+ struct device_link *pd_pll1_link;
+
+ void __iomem *cm_reg;
+
+ int id;
+ int usecount;
+
+ int irq_extdst0_shdload;
+ int irq_extdst4_shdload;
+ int irq_extdst1_shdload;
+ int irq_extdst5_shdload;
+ int irq_disengcfg_shdload0;
+ int irq_disengcfg_framecomplete0;
+ int irq_disengcfg_shdload1;
+ int irq_disengcfg_framecomplete1;
+ int irq_line_num;
+
+ struct irq_domain *domain;
+
+ struct imx_sc_ipc *dpu_ipc_handle;
+
+ struct dpu_constframe *cf_priv[4];
+ struct dpu_disengcfg *dec_priv[2];
+ struct dpu_extdst *ed_priv[4];
+ struct dpu_fetchunit *fd_priv[2];
+ struct dpu_fetchunit *fe_priv[4];
+ struct dpu_framegen *fg_priv[2];
+ struct dpu_fetchunit *fl_priv[1];
+ struct dpu_fetchunit *fw_priv[1];
+ struct dpu_hscaler *hs_priv[3];
+ struct dpu_layerblend *lb_priv[4];
+ struct dpu_tcon *tcon_priv[2];
+ struct dpu_vscaler *vs_priv[3];
+};
+
+int dpu_format_horz_chroma_subsampling(u32 format);
+int dpu_format_vert_chroma_subsampling(u32 format);
+int dpu_format_num_planes(u32 format);
+int dpu_format_plane_width(int width, u32 format, int plane);
+int dpu_format_plane_height(int height, u32 format, int plane);
+
+#define _DECLARE_DPU_UNIT_INIT_FUNC(block) \
+void _dpu_##block##_init(struct dpu_soc *dpu, unsigned int id) \
+
+_DECLARE_DPU_UNIT_INIT_FUNC(cf);
+_DECLARE_DPU_UNIT_INIT_FUNC(dec);
+_DECLARE_DPU_UNIT_INIT_FUNC(ed);
+_DECLARE_DPU_UNIT_INIT_FUNC(fd);
+_DECLARE_DPU_UNIT_INIT_FUNC(fe);
+_DECLARE_DPU_UNIT_INIT_FUNC(fg);
+_DECLARE_DPU_UNIT_INIT_FUNC(fl);
+_DECLARE_DPU_UNIT_INIT_FUNC(fw);
+_DECLARE_DPU_UNIT_INIT_FUNC(hs);
+_DECLARE_DPU_UNIT_INIT_FUNC(lb);
+_DECLARE_DPU_UNIT_INIT_FUNC(tcon);
+_DECLARE_DPU_UNIT_INIT_FUNC(vs);
+
+#define DECLARE_DPU_UNIT_INIT_FUNC(block) \
+int dpu_##block##_init(struct dpu_soc *dpu, unsigned int id, \
+ unsigned long pec_base, unsigned long base)
+
+DECLARE_DPU_UNIT_INIT_FUNC(cf);
+DECLARE_DPU_UNIT_INIT_FUNC(dec);
+DECLARE_DPU_UNIT_INIT_FUNC(ed);
+DECLARE_DPU_UNIT_INIT_FUNC(fd);
+DECLARE_DPU_UNIT_INIT_FUNC(fe);
+DECLARE_DPU_UNIT_INIT_FUNC(fg);
+DECLARE_DPU_UNIT_INIT_FUNC(fl);
+DECLARE_DPU_UNIT_INIT_FUNC(fw);
+DECLARE_DPU_UNIT_INIT_FUNC(hs);
+DECLARE_DPU_UNIT_INIT_FUNC(lb);
+DECLARE_DPU_UNIT_INIT_FUNC(st);
+DECLARE_DPU_UNIT_INIT_FUNC(tcon);
+DECLARE_DPU_UNIT_INIT_FUNC(vs);
+
+static inline u32 dpu_pec_fu_read(struct dpu_fetchunit *fu, unsigned int offset)
+{
+ return readl(fu->pec_base + offset);
+}
+
+static inline void dpu_pec_fu_write(struct dpu_fetchunit *fu,
+ unsigned int offset, u32 value)
+{
+ writel(value, fu->pec_base + offset);
+}
+
+static inline u32 dpu_fu_read(struct dpu_fetchunit *fu, unsigned int offset)
+{
+ return readl(fu->base + offset);
+}
+
+static inline void dpu_fu_write(struct dpu_fetchunit *fu,
+ unsigned int offset, u32 value)
+{
+ writel(value, fu->base + offset);
+}
+
+static inline u32 rgb_color(u8 r, u8 g, u8 b, u8 a)
+{
+ return (r << 24) | (g << 16) | (b << 8) | a;
+}
+
+static inline u32 yuv_color(u8 y, u8 u, u8 v)
+{
+ return (y << 24) | (u << 16) | (v << 8);
+}
+
+static const unsigned int cf_ids[] = {0, 1, 4, 5};
+static const unsigned int dec_ids[] = {0, 1};
+static const unsigned int ed_ids[] = {0, 1, 4, 5};
+static const unsigned int fd_ids[] = {0, 1};
+static const unsigned int fe_ids[] = {0, 1, 2, 9};
+static const unsigned int fg_ids[] = {0, 1};
+static const unsigned int fl_ids[] = {0};
+static const unsigned int fw_ids[] = {2};
+static const unsigned int hs_ids[] = {4, 5, 9};
+static const unsigned int lb_ids[] = {0, 1, 2, 3};
+static const unsigned int tcon_ids[] = {0, 1};
+static const unsigned int vs_ids[] = {4, 5, 9};
+
+struct dpu_pixel_format {
+ u32 pixel_format;
+ u32 bits;
+ u32 shift;
+};
+
+static const struct dpu_pixel_format dpu_pixel_format_matrix[] = {
+ {
+ DRM_FORMAT_ARGB8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8),
+ R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(24),
+ }, {
+ DRM_FORMAT_XRGB8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
+ R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_ABGR8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8),
+ R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(24),
+ }, {
+ DRM_FORMAT_XBGR8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
+ R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_RGBA8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8),
+ R_SHIFT(24) | G_SHIFT(16) | B_SHIFT(8) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_RGBX8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
+ R_SHIFT(24) | G_SHIFT(16) | B_SHIFT(8) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_BGRA8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8),
+ R_SHIFT(8) | G_SHIFT(16) | B_SHIFT(24) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_BGRX8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
+ R_SHIFT(8) | G_SHIFT(16) | B_SHIFT(24) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_RGB888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
+ R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_BGR888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
+ R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_RGB565,
+ R_BITS(5) | G_BITS(6) | B_BITS(5) | A_BITS(0),
+ R_SHIFT(11) | G_SHIFT(5) | B_SHIFT(0) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_YUYV,
+ Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
+ Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(8) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_UYVY,
+ Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
+ Y_SHIFT(8) | U_SHIFT(0) | V_SHIFT(0) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_NV12,
+ Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
+ Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_NV21,
+ Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
+ Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_NV16,
+ Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
+ Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_NV61,
+ Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
+ Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_NV24,
+ Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
+ Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0),
+ }, {
+ DRM_FORMAT_NV42,
+ Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
+ Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0),
+ },
+};
+
+int dpu_sc_misc_init(struct dpu_soc *dpu);
+int dpu_pxlink_set_mst_addr(struct dpu_soc *dpu, int disp_id, u32 val);
+int dpu_pxlink_set_mst_enable(struct dpu_soc *dpu, int disp_id, bool enable);
+int dpu_pxlink_set_mst_valid(struct dpu_soc *dpu, int disp_id, bool enable);
+int dpu_pxlink_set_sync_ctrl(struct dpu_soc *dpu, int disp_id, bool enable);
+int dpu_pxlink_init(struct dpu_soc *dpu);
+#endif /* __DPU_PRV_H__ */
diff --git a/drivers/gpu/imx/dpu/dpu-sc-misc.c b/drivers/gpu/imx/dpu/dpu-sc-misc.c
new file mode 100644
index 000000000000..c98c9bd1a0db
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-sc-misc.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include "dpu-prv.h"
+
+static inline int
+dpu_sc_misc_set_ctrl(struct dpu_soc *dpu, u32 rsc, u8 ctrl, u32 val)
+{
+ return imx_sc_misc_set_control(dpu->dpu_ipc_handle, rsc, ctrl, val);
+}
+
+int dpu_sc_misc_init(struct dpu_soc *dpu)
+{
+ return imx_scu_get_handle(&dpu->dpu_ipc_handle);
+}
+
+int dpu_pxlink_set_mst_addr(struct dpu_soc *dpu, int disp_id, u32 val)
+{
+ u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0;
+ u8 ctrl = disp_id ?
+ IMX_SC_C_PXL_LINK_MST2_ADDR : IMX_SC_C_PXL_LINK_MST1_ADDR;
+
+ return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, val);
+}
+
+int dpu_pxlink_set_mst_enable(struct dpu_soc *dpu, int disp_id, bool enable)
+{
+ u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0;
+ u8 ctrl = disp_id ?
+ IMX_SC_C_PXL_LINK_MST2_ENB: IMX_SC_C_PXL_LINK_MST1_ENB;
+
+ return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, enable);
+}
+
+int dpu_pxlink_set_mst_valid(struct dpu_soc *dpu, int disp_id, bool enable)
+{
+ u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0;
+ u8 ctrl = disp_id ?
+ IMX_SC_C_PXL_LINK_MST2_VLD : IMX_SC_C_PXL_LINK_MST1_VLD;
+
+ return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, enable);
+}
+
+int dpu_pxlink_set_sync_ctrl(struct dpu_soc *dpu, int disp_id, bool enable)
+{
+ u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0;
+ u8 ctrl = disp_id ? IMX_SC_C_SYNC_CTRL1 : IMX_SC_C_SYNC_CTRL0;
+
+ return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, enable);
+}
+
+int dpu_pxlink_init(struct dpu_soc *dpu)
+{
+ int disp_id, ret = 0;
+
+ for (disp_id = 0; disp_id < 2; disp_id++) {
+ ret |= dpu_pxlink_set_mst_addr(dpu, disp_id, 0);
+ ret |= dpu_pxlink_set_mst_enable(dpu, disp_id, false);
+ ret |= dpu_pxlink_set_mst_valid(dpu, disp_id, false);
+ ret |= dpu_pxlink_set_sync_ctrl(dpu, disp_id, false);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-tcon.c b/drivers/gpu/imx/dpu/dpu-tcon.c
new file mode 100644
index 000000000000..63b2a2d5af85
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-tcon.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+#define SSQCNTS 0
+#define SSQCYCLE 0x8
+#define SWRESET 0xC
+#define TCON_CTRL 0x10
+#define BYPASS BIT(3)
+#define RSDSINVCTRL 0x14
+#define MAPBIT3_0 0x18
+#define MAPBIT7_4 0x1C
+#define MAPBIT11_8 0x20
+#define MAPBIT15_12 0x24
+#define MAPBIT19_16 0x28
+#define MAPBIT23_20 0x2C
+#define MAPBIT27_24 0x30
+#define MAPBIT31_28 0x34
+#define MAPBIT34_32 0x38
+#define MAPBIT3_0_DUAL 0x3C
+#define MAPBIT7_4_DUAL 0x40
+#define MAPBIT11_8_DUAL 0x44
+#define MAPBIT15_12_DUAL 0x48
+#define MAPBIT19_16_DUAL 0x4C
+#define MAPBIT23_20_DUAL 0x50
+#define MAPBIT27_24_DUAL 0x54
+#define MAPBIT31_28_DUAL 0x58
+#define MAPBIT34_32_DUAL 0x5C
+#define SPGPOSON(n) (0x60 + (n) * 16)
+#define X(n) (((n) & 0x7FFF) << 16)
+#define Y(n) ((n) & 0x7FFF)
+#define SPGMASKON(n) (0x64 + (n) * 16)
+#define SPGPOSOFF(n) (0x68 + (n) * 16)
+#define SPGMASKOFF(n) (0x6C + (n) * 16)
+#define SMXSIGS(n) (0x120 + (n) * 8)
+#define SMXFCTTABLE(n) (0x124 + (n) * 8)
+#define RESET_OVER_UNFERFLOW 0x180
+#define DUAL_DEBUG 0x184
+
+struct dpu_tcon {
+ void __iomem *base;
+ struct mutex mutex;
+ int id;
+ bool inuse;
+ struct dpu_soc *dpu;
+};
+
+static inline u32 dpu_tcon_read(struct dpu_tcon *tcon, unsigned int offset)
+{
+ return readl(tcon->base + offset);
+}
+
+static inline void dpu_tcon_write(struct dpu_tcon *tcon,
+ unsigned int offset, u32 value)
+{
+ writel(value, tcon->base + offset);
+}
+
+int tcon_set_fmt(struct dpu_tcon *tcon, u32 bus_format)
+{
+ mutex_lock(&tcon->mutex);
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ dpu_tcon_write(tcon, MAPBIT3_0, 0x19181716);
+ dpu_tcon_write(tcon, MAPBIT7_4, 0x1d1c1b1a);
+ dpu_tcon_write(tcon, MAPBIT11_8, 0x0f0e0d0c);
+ dpu_tcon_write(tcon, MAPBIT15_12, 0x13121110);
+ dpu_tcon_write(tcon, MAPBIT19_16, 0x05040302);
+ dpu_tcon_write(tcon, MAPBIT23_20, 0x09080706);
+ break;
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_RGB888_1X30_PADLO:
+ case MEDIA_BUS_FMT_RGB666_1X30_PADLO:
+ dpu_tcon_write(tcon, MAPBIT3_0, 0x17161514);
+ dpu_tcon_write(tcon, MAPBIT7_4, 0x1b1a1918);
+ dpu_tcon_write(tcon, MAPBIT11_8, 0x0b0a1d1c);
+ dpu_tcon_write(tcon, MAPBIT15_12, 0x0f0e0d0c);
+ dpu_tcon_write(tcon, MAPBIT19_16, 0x13121110);
+ dpu_tcon_write(tcon, MAPBIT23_20, 0x03020100);
+ dpu_tcon_write(tcon, MAPBIT27_24, 0x07060504);
+ dpu_tcon_write(tcon, MAPBIT31_28, 0x00000908);
+ break;
+ default:
+ mutex_unlock(&tcon->mutex);
+ return -EINVAL;
+ }
+ mutex_unlock(&tcon->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tcon_set_fmt);
+
+void tcon_cfg_videomode(struct dpu_tcon *tcon, struct drm_display_mode *m)
+{
+ u32 val;
+ int y;
+
+ mutex_lock(&tcon->mutex);
+ val = dpu_tcon_read(tcon, TCON_CTRL);
+ val &= ~BYPASS;
+ dpu_tcon_write(tcon, TCON_CTRL, val);
+
+ /* dsp_control[0]: hsync */
+ dpu_tcon_write(tcon, SPGPOSON(0), X(m->hsync_start));
+ dpu_tcon_write(tcon, SPGMASKON(0), 0xffff);
+
+ dpu_tcon_write(tcon, SPGPOSOFF(0), X(m->hsync_end));
+ dpu_tcon_write(tcon, SPGMASKOFF(0), 0xffff);
+
+ dpu_tcon_write(tcon, SMXSIGS(0), 0x2);
+ dpu_tcon_write(tcon, SMXFCTTABLE(0), 0x1);
+
+ /* dsp_control[1]: vsync */
+ dpu_tcon_write(tcon, SPGPOSON(1),
+ X(m->hsync_start) | Y(m->vsync_start - 1));
+ dpu_tcon_write(tcon, SPGMASKON(1), 0x0);
+
+ dpu_tcon_write(tcon, SPGPOSOFF(1),
+ X(m->hsync_start) | Y(m->vsync_end - 1));
+ dpu_tcon_write(tcon, SPGMASKOFF(1), 0x0);
+
+ dpu_tcon_write(tcon, SMXSIGS(1), 0x3);
+ dpu_tcon_write(tcon, SMXFCTTABLE(1), 0x1);
+
+ /* dsp_control[2]: data enable */
+ /* horizontal */
+ dpu_tcon_write(tcon, SPGPOSON(2), 0x0);
+ dpu_tcon_write(tcon, SPGMASKON(2), 0xffff);
+
+ dpu_tcon_write(tcon, SPGPOSOFF(2), X(m->hdisplay));
+ dpu_tcon_write(tcon, SPGMASKOFF(2), 0xffff);
+
+ /* vertical */
+ dpu_tcon_write(tcon, SPGPOSON(3), 0x0);
+ dpu_tcon_write(tcon, SPGMASKON(3), 0x7fff0000);
+
+ dpu_tcon_write(tcon, SPGPOSOFF(3), Y(m->vdisplay));
+ dpu_tcon_write(tcon, SPGMASKOFF(3), 0x7fff0000);
+
+ dpu_tcon_write(tcon, SMXSIGS(2), 0x2c);
+ dpu_tcon_write(tcon, SMXFCTTABLE(2), 0x8);
+
+ /* dsp_control[3]: kachuck */
+ y = m->vdisplay + 1;
+
+ dpu_tcon_write(tcon, SPGPOSON(4), X(0x0) | Y(y));
+ dpu_tcon_write(tcon, SPGMASKON(4), 0x0);
+
+ dpu_tcon_write(tcon, SPGPOSOFF(4), X(0x20) | Y(y));
+ dpu_tcon_write(tcon, SPGMASKOFF(4), 0x0);
+
+ dpu_tcon_write(tcon, SMXSIGS(3), 0x6);
+ dpu_tcon_write(tcon, SMXFCTTABLE(3), 0x2);
+ mutex_unlock(&tcon->mutex);
+}
+EXPORT_SYMBOL_GPL(tcon_cfg_videomode);
+
+struct dpu_tcon *dpu_tcon_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_tcon *tcon;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcon_ids); i++)
+ if (tcon_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(tcon_ids))
+ return ERR_PTR(-EINVAL);
+
+ tcon = dpu->tcon_priv[i];
+
+ mutex_lock(&tcon->mutex);
+
+ if (tcon->inuse) {
+ mutex_unlock(&tcon->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ tcon->inuse = true;
+
+ mutex_unlock(&tcon->mutex);
+
+ return tcon;
+}
+EXPORT_SYMBOL_GPL(dpu_tcon_get);
+
+void dpu_tcon_put(struct dpu_tcon *tcon)
+{
+ mutex_lock(&tcon->mutex);
+
+ tcon->inuse = false;
+
+ mutex_unlock(&tcon->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_tcon_put);
+
+void _dpu_tcon_init(struct dpu_soc *dpu, unsigned int id)
+{
+}
+
+int dpu_tcon_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long unused, unsigned long base)
+{
+ struct dpu_tcon *tcon;
+
+ tcon = devm_kzalloc(dpu->dev, sizeof(*tcon), GFP_KERNEL);
+ if (!tcon)
+ return -ENOMEM;
+
+ dpu->tcon_priv[id] = tcon;
+
+ tcon->base = devm_ioremap(dpu->dev, base, SZ_512);
+ if (!tcon->base)
+ return -ENOMEM;
+
+ tcon->dpu = dpu;
+ mutex_init(&tcon->mutex);
+
+ return 0;
+}
diff --git a/drivers/gpu/imx/dpu/dpu-vscaler.c b/drivers/gpu/imx/dpu/dpu-vscaler.c
new file mode 100644
index 000000000000..b1bdcd596392
--- /dev/null
+++ b/drivers/gpu/imx/dpu/dpu-vscaler.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-prv.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+#define PIXENGCFG_DYNAMIC_SRC_SEL_MASK 0x3F
+
+#define SETUP1 0xC
+#define SCALE_FACTOR_MASK 0xFFFFF
+#define SCALE_FACTOR(n) ((n) & 0xFFFFF)
+#define SETUP2 0x10
+#define SETUP3 0x14
+#define SETUP4 0x18
+#define SETUP5 0x1C
+#define PHASE_OFFSET_MASK 0x1FFFFF
+#define PHASE_OFFSET(n) ((n) & 0x1FFFFF)
+#define CONTROL 0x20
+#define OUTPUT_SIZE_MASK 0x3FFF0000
+#define OUTPUT_SIZE(n) ((((n) - 1) << 16) & OUTPUT_SIZE_MASK)
+#define FIELD_MODE 0x3000
+#define FILTER_MODE 0x100
+#define SCALE_MODE 0x10
+#define MODE 0x1
+
+static const vs_src_sel_t src_sels[3][6] = {
+ {
+ VS_SRC_SEL__DISABLE,
+ VS_SRC_SEL__FETCHDECODE0,
+ VS_SRC_SEL__MATRIX4,
+ VS_SRC_SEL__HSCALER4,
+ }, {
+ VS_SRC_SEL__DISABLE,
+ VS_SRC_SEL__FETCHDECODE1,
+ VS_SRC_SEL__MATRIX5,
+ VS_SRC_SEL__HSCALER5,
+ }, {
+ VS_SRC_SEL__DISABLE,
+ VS_SRC_SEL__MATRIX9,
+ VS_SRC_SEL__HSCALER9,
+ },
+};
+
+struct dpu_vscaler {
+ void __iomem *pec_base;
+ void __iomem *base;
+ struct mutex mutex;
+ int id;
+ bool inuse;
+ struct dpu_soc *dpu;
+ /* see DPU_PLANE_SRC_xxx */
+ unsigned int stream_id;
+};
+
+static inline u32 dpu_pec_vs_read(struct dpu_vscaler *vs,
+ unsigned int offset)
+{
+ return readl(vs->pec_base + offset);
+}
+
+static inline void dpu_pec_vs_write(struct dpu_vscaler *vs,
+ unsigned int offset, u32 value)
+{
+ writel(value, vs->pec_base + offset);
+}
+
+static inline u32 dpu_vs_read(struct dpu_vscaler *vs, unsigned int offset)
+{
+ return readl(vs->base + offset);
+}
+
+static inline void dpu_vs_write(struct dpu_vscaler *vs,
+ unsigned int offset, u32 value)
+{
+ writel(value, vs->base + offset);
+}
+
+int vscaler_pixengcfg_dynamic_src_sel(struct dpu_vscaler *vs, vs_src_sel_t src)
+{
+ struct dpu_soc *dpu = vs->dpu;
+ const unsigned int vs_id_array[] = {4, 5, 9};
+ int i, j;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(vs_id_array); i++)
+ if (vs_id_array[i] == vs->id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(vs_id_array)))
+ return -EINVAL;
+
+ mutex_lock(&vs->mutex);
+ for (j = 0; j < ARRAY_SIZE(src_sels[0]); j++) {
+ if (src_sels[i][j] == src) {
+ val = dpu_pec_vs_read(vs, PIXENGCFG_DYNAMIC);
+ val &= ~PIXENGCFG_DYNAMIC_SRC_SEL_MASK;
+ val |= src;
+ dpu_pec_vs_write(vs, PIXENGCFG_DYNAMIC, val);
+ mutex_unlock(&vs->mutex);
+ return 0;
+ }
+ }
+ mutex_unlock(&vs->mutex);
+
+ dev_err(dpu->dev, "Invalid source for VScaler%d\n", vs->id);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(vscaler_pixengcfg_dynamic_src_sel);
+
+void vscaler_pixengcfg_clken(struct dpu_vscaler *vs, pixengcfg_clken_t clken)
+{
+ u32 val;
+
+ mutex_lock(&vs->mutex);
+ val = dpu_pec_vs_read(vs, PIXENGCFG_DYNAMIC);
+ val &= ~CLKEN_MASK;
+ val |= clken << CLKEN_MASK_SHIFT;
+ dpu_pec_vs_write(vs, PIXENGCFG_DYNAMIC, val);
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(vscaler_pixengcfg_clken);
+
+void vscaler_shden(struct dpu_vscaler *vs, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&vs->mutex);
+ val = dpu_vs_read(vs, STATICCONTROL);
+ if (enable)
+ val |= SHDEN;
+ else
+ val &= ~SHDEN;
+ dpu_vs_write(vs, STATICCONTROL, val);
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(vscaler_shden);
+
+void vscaler_setup1(struct dpu_vscaler *vs, u32 src, u32 dst, bool deinterlace)
+{
+ struct dpu_soc *dpu = vs->dpu;
+ u32 scale_factor;
+ u64 tmp64;
+
+ if (deinterlace)
+ dst *= 2;
+
+ if (src == dst) {
+ scale_factor = 0x80000;
+ } else {
+ if (src > dst) {
+ tmp64 = (u64)((u64)dst * 0x80000);
+ do_div(tmp64, src);
+
+ } else {
+ tmp64 = (u64)((u64)src * 0x80000);
+ do_div(tmp64, dst);
+ }
+ scale_factor = (u32)tmp64;
+ }
+
+ WARN_ON(scale_factor > 0x80000);
+
+ mutex_lock(&vs->mutex);
+ dpu_vs_write(vs, SETUP1, SCALE_FACTOR(scale_factor));
+ mutex_unlock(&vs->mutex);
+
+ dev_dbg(dpu->dev, "Vscaler%d scale factor 0x%08x\n",
+ vs->id, scale_factor);
+}
+EXPORT_SYMBOL_GPL(vscaler_setup1);
+
+void vscaler_setup2(struct dpu_vscaler *vs, bool deinterlace)
+{
+ /* 0x20000: +0.25 phase offset for deinterlace */
+ u32 phase_offset = deinterlace ? 0x20000 : 0;
+
+ mutex_lock(&vs->mutex);
+ dpu_vs_write(vs, SETUP2, PHASE_OFFSET(phase_offset));
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(vscaler_setup2);
+
+void vscaler_setup3(struct dpu_vscaler *vs, bool deinterlace)
+{
+ /* 0x1e0000: -0.25 phase offset for deinterlace */
+ u32 phase_offset = deinterlace ? 0x1e0000 : 0;
+
+ mutex_lock(&vs->mutex);
+ dpu_vs_write(vs, SETUP3, PHASE_OFFSET(phase_offset));
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(vscaler_setup3);
+
+void vscaler_setup4(struct dpu_vscaler *vs, u32 phase_offset)
+{
+ mutex_lock(&vs->mutex);
+ dpu_vs_write(vs, SETUP4, PHASE_OFFSET(phase_offset));
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(vscaler_setup4);
+
+void vscaler_setup5(struct dpu_vscaler *vs, u32 phase_offset)
+{
+ mutex_lock(&vs->mutex);
+ dpu_vs_write(vs, SETUP5, PHASE_OFFSET(phase_offset));
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(vscaler_setup5);
+
+void vscaler_output_size(struct dpu_vscaler *vs, u32 line_num)
+{
+ u32 val;
+
+ mutex_lock(&vs->mutex);
+ val = dpu_vs_read(vs, CONTROL);
+ val &= ~OUTPUT_SIZE_MASK;
+ val |= OUTPUT_SIZE(line_num);
+ dpu_vs_write(vs, CONTROL, val);
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(vscaler_output_size);
+
+void vscaler_field_mode(struct dpu_vscaler *vs, scaler_field_mode_t m)
+{
+ u32 val;
+
+ mutex_lock(&vs->mutex);
+ val = dpu_vs_read(vs, CONTROL);
+ val &= ~FIELD_MODE;
+ val |= m;
+ dpu_vs_write(vs, CONTROL, val);
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(vscaler_field_mode);
+
+void vscaler_filter_mode(struct dpu_vscaler *vs, scaler_filter_mode_t m)
+{
+ u32 val;
+
+ mutex_lock(&vs->mutex);
+ val = dpu_vs_read(vs, CONTROL);
+ val &= ~FILTER_MODE;
+ val |= m;
+ dpu_vs_write(vs, CONTROL, val);
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(vscaler_filter_mode);
+
+void vscaler_scale_mode(struct dpu_vscaler *vs, scaler_scale_mode_t m)
+{
+ u32 val;
+
+ mutex_lock(&vs->mutex);
+ val = dpu_vs_read(vs, CONTROL);
+ val &= ~SCALE_MODE;
+ val |= m;
+ dpu_vs_write(vs, CONTROL, val);
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(vscaler_scale_mode);
+
+void vscaler_mode(struct dpu_vscaler *vs, scaler_mode_t m)
+{
+ u32 val;
+
+ mutex_lock(&vs->mutex);
+ val = dpu_vs_read(vs, CONTROL);
+ val &= ~MODE;
+ val |= m;
+ dpu_vs_write(vs, CONTROL, val);
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(vscaler_mode);
+
+bool vscaler_is_enabled(struct dpu_vscaler *vs)
+{
+ u32 val;
+
+ mutex_lock(&vs->mutex);
+ val = dpu_vs_read(vs, CONTROL);
+ mutex_unlock(&vs->mutex);
+
+ return (val & MODE) == SCALER_ACTIVE;
+}
+EXPORT_SYMBOL_GPL(vscaler_is_enabled);
+
+dpu_block_id_t vscaler_get_block_id(struct dpu_vscaler *vs)
+{
+ switch (vs->id) {
+ case 4:
+ return ID_VSCALER4;
+ case 5:
+ return ID_VSCALER5;
+ case 9:
+ return ID_VSCALER9;
+ default:
+ WARN_ON(1);
+ }
+
+ return ID_NONE;
+}
+EXPORT_SYMBOL_GPL(vscaler_get_block_id);
+
+unsigned int vscaler_get_stream_id(struct dpu_vscaler *vs)
+{
+ return vs->stream_id;
+}
+EXPORT_SYMBOL_GPL(vscaler_get_stream_id);
+
+void vscaler_set_stream_id(struct dpu_vscaler *vs, unsigned int id)
+{
+ switch (id) {
+ case DPU_PLANE_SRC_TO_DISP_STREAM0:
+ case DPU_PLANE_SRC_TO_DISP_STREAM1:
+ case DPU_PLANE_SRC_DISABLED:
+ vs->stream_id = id;
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+EXPORT_SYMBOL_GPL(vscaler_set_stream_id);
+
+struct dpu_vscaler *dpu_vs_get(struct dpu_soc *dpu, int id)
+{
+ struct dpu_vscaler *vs;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vs_ids); i++)
+ if (vs_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(vs_ids))
+ return ERR_PTR(-EINVAL);
+
+ vs = dpu->vs_priv[i];
+
+ mutex_lock(&vs->mutex);
+
+ if (vs->inuse) {
+ mutex_unlock(&vs->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ vs->inuse = true;
+
+ mutex_unlock(&vs->mutex);
+
+ return vs;
+}
+EXPORT_SYMBOL_GPL(dpu_vs_get);
+
+void dpu_vs_put(struct dpu_vscaler *vs)
+{
+ mutex_lock(&vs->mutex);
+
+ vs->inuse = false;
+
+ mutex_unlock(&vs->mutex);
+}
+EXPORT_SYMBOL_GPL(dpu_vs_put);
+
+void _dpu_vs_init(struct dpu_soc *dpu, unsigned int id)
+{
+ struct dpu_vscaler *vs;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vs_ids); i++)
+ if (vs_ids[i] == id)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(vs_ids)))
+ return;
+
+ vs = dpu->vs_priv[i];
+
+ vscaler_shden(vs, true);
+ vscaler_setup2(vs, false);
+ vscaler_setup3(vs, false);
+ vscaler_setup4(vs, 0);
+ vscaler_setup5(vs, 0);
+ vscaler_pixengcfg_dynamic_src_sel(vs, VS_SRC_SEL__DISABLE);
+}
+
+int dpu_vs_init(struct dpu_soc *dpu, unsigned int id,
+ unsigned long pec_base, unsigned long base)
+{
+ struct dpu_vscaler *vs;
+ int i;
+
+ vs = devm_kzalloc(dpu->dev, sizeof(*vs), GFP_KERNEL);
+ if (!vs)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(vs_ids); i++)
+ if (vs_ids[i] == id)
+ break;
+
+ if (i == ARRAY_SIZE(vs_ids))
+ return -EINVAL;
+
+ dpu->vs_priv[i] = vs;
+
+ vs->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_8);
+ if (!vs->pec_base)
+ return -ENOMEM;
+
+ vs->base = devm_ioremap(dpu->dev, base, SZ_1K);
+ if (!vs->base)
+ return -ENOMEM;
+
+ vs->dpu = dpu;
+ vs->id = id;
+
+ mutex_init(&vs->mutex);
+
+ _dpu_vs_init(dpu, id);
+
+ return 0;
+}
diff --git a/include/video/dpu.h b/include/video/dpu.h
new file mode 100644
index 000000000000..fdb74fa57944
--- /dev/null
+++ b/include/video/dpu.h
@@ -0,0 +1,644 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef __DRM_DPU_H__
+#define __DRM_DPU_H__
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_modes.h>
+#include <video/videomode.h>
+
+struct dpu_soc;
+
+enum dpu_irq {
+ IRQ_STORE9_SHDLOAD = 0,
+ IRQ_STORE9_FRAMECOMPLETE = 1,
+ IRQ_STORE9_SEQCOMPLETE = 2,
+ IRQ_EXTDST0_SHDLOAD = 3,
+ IRQ_EXTDST0_FRAMECOMPLETE = 4,
+ IRQ_EXTDST0_SEQCOMPLETE = 5,
+ IRQ_EXTDST4_SHDLOAD = 6,
+ IRQ_EXTDST4_FRAMECOMPLETE = 7,
+ IRQ_EXTDST4_SEQCOMPLETE = 8,
+ IRQ_EXTDST1_SHDLOAD = 9,
+ IRQ_EXTDST1_FRAMECOMPLETE = 10,
+ IRQ_EXTDST1_SEQCOMPLETE = 11,
+ IRQ_EXTDST5_SHDLOAD = 12,
+ IRQ_EXTDST5_FRAMECOMPLETE = 13,
+ IRQ_EXTDST5_SEQCOMPLETE = 14,
+ IRQ_DISENGCFG_SHDLOAD0 = 15,
+ IRQ_DISENGCFG_FRAMECOMPLETE0 = 16,
+ IRQ_DISENGCFG_SEQCOMPLETE0 = 17,
+ IRQ_FRAMEGEN0_INT0 = 18,
+ IRQ_FRAMEGEN0_INT1 = 19,
+ IRQ_FRAMEGEN0_INT2 = 20,
+ IRQ_FRAMEGEN0_INT3 = 21,
+ IRQ_SIG0_SHDLOAD = 22,
+ IRQ_SIG0_VALID = 23,
+ IRQ_SIG0_ERROR = 24,
+ IRQ_DISENGCFG_SHDLOAD1 = 25,
+ IRQ_DISENGCFG_FRAMECOMPLETE1 = 26,
+ IRQ_DISENGCFG_SEQCOMPLETE1 = 27,
+ IRQ_FRAMEGEN1_INT0 = 28,
+ IRQ_FRAMEGEN1_INT1 = 29,
+ IRQ_FRAMEGEN1_INT2 = 30,
+ IRQ_FRAMEGEN1_INT3 = 31,
+ IRQ_SIG1_SHDLOAD = 32,
+ IRQ_SIG1_VALID = 33,
+ IRQ_SIG1_ERROR = 34,
+ IRQ_RESERVED = 35,
+ IRQ_CMDSEQ_ERROR = 36,
+ IRQ_COMCTRL_SW0 = 37,
+ IRQ_COMCTRL_SW1 = 38,
+ IRQ_COMCTRL_SW2 = 39,
+ IRQ_COMCTRL_SW3 = 40,
+ IRQ_FRAMEGEN0_PRIMSYNC_ON = 41,
+ IRQ_FRAMEGEN0_PRIMSYNC_OFF = 42,
+ IRQ_FRAMEGEN0_SECSYNC_ON = 43,
+ IRQ_FRAMEGEN0_SECSYNC_OFF = 44,
+ IRQ_FRAMEGEN1_PRIMSYNC_ON = 45,
+ IRQ_FRAMEGEN1_PRIMSYNC_OFF = 46,
+ IRQ_FRAMEGEN1_SECSYNC_ON = 47,
+ IRQ_FRAMEGEN1_SECSYNC_OFF = 48,
+};
+
+typedef enum {
+ ID_NONE = 0x00, /* 0 */
+ ID_FETCHDECODE9 = 0x01, /* 1 */
+ ID_FETCHPERSP9 = 0x02, /* 2 */
+ ID_FETCHECO9 = 0x03, /* 3 */
+ ID_ROP9 = 0x04, /* 4 */
+ ID_CLUT9 = 0x05, /* 5 */
+ ID_MATRIX9 = 0x06, /* 6 */
+ ID_HSCALER9 = 0x07, /* 7 */
+ ID_VSCALER9 = 0x08, /* 8 */
+ ID_FILTER9 = 0x09, /* 9 */
+ ID_BLITBLEND9 = 0x0A, /* 10 */
+ ID_CONSTFRAME0 = 0x0C, /* 12 */
+ ID_CONSTFRAME4 = 0x0E, /* 14 */
+ ID_CONSTFRAME1 = 0x10, /* 16 */
+ ID_CONSTFRAME5 = 0x12, /* 18 */
+ ID_FETCHWARP2 = 0x14, /* 20 */
+ ID_FETCHECO2 = 0x15, /* 21 */
+ ID_FETCHDECODE0 = 0x16, /* 22 */
+ ID_FETCHECO0 = 0x17, /* 23 */
+ ID_FETCHDECODE1 = 0x18, /* 24 */
+ ID_FETCHECO1 = 0x19, /* 25 */
+ ID_FETCHLAYER0 = 0x1a, /* 26 */
+ ID_MATRIX4 = 0x1B, /* 27 */
+ ID_HSCALER4 = 0x1C, /* 28 */
+ ID_VSCALER4 = 0x1D, /* 29 */
+ ID_MATRIX5 = 0x1E, /* 30 */
+ ID_HSCALER5 = 0x1F, /* 31 */
+ ID_VSCALER5 = 0x20, /* 32 */
+ ID_LAYERBLEND0 = 0x21, /* 33 */
+ ID_LAYERBLEND1 = 0x22, /* 34 */
+ ID_LAYERBLEND2 = 0x23, /* 35 */
+ ID_LAYERBLEND3 = 0x24, /* 36 */
+} dpu_block_id_t;
+
+typedef enum {
+ ED_SRC_DISABLE = ID_NONE,
+ ED_SRC_BLITBLEND9 = ID_BLITBLEND9,
+ ED_SRC_CONSTFRAME0 = ID_CONSTFRAME0,
+ ED_SRC_CONSTFRAME1 = ID_CONSTFRAME1,
+ ED_SRC_CONSTFRAME4 = ID_CONSTFRAME4,
+ ED_SRC_CONSTFRAME5 = ID_CONSTFRAME5,
+ ED_SRC_MATRIX4 = ID_MATRIX4,
+ ED_SRC_HSCALER4 = ID_HSCALER4,
+ ED_SRC_VSCALER4 = ID_VSCALER4,
+ /* content stream(extdst 0/1) only */
+ ED_SRC_MATRIX5 = ID_MATRIX5,
+ ED_SRC_HSCALER5 = ID_HSCALER5,
+ ED_SRC_VSCALER5 = ID_VSCALER5,
+ /* content stream(extdst 0/1) only */
+ ED_SRC_LAYERBLEND3 = ID_LAYERBLEND3,
+ ED_SRC_LAYERBLEND2 = ID_LAYERBLEND2,
+ ED_SRC_LAYERBLEND1 = ID_LAYERBLEND1,
+ ED_SRC_LAYERBLEND0 = ID_LAYERBLEND0,
+} extdst_src_sel_t;
+
+typedef enum {
+ SINGLE, /* Reconfig pipeline after explicit trigger */
+ AUTO, /* Reconfig pipeline after every kick when idle */
+} ed_sync_mode_t;
+
+typedef enum {
+ PSTATUS_EMPTY,
+ PSTATUS_RUNNING,
+ PSTATUS_RUNNING_RETRIGGERED,
+ PSTATUS_RESERVED
+} ed_pipeline_status_t;
+
+typedef enum {
+ SOFTWARE = 0, /* kick generation by KICK field only */
+ EXTERNAL = BIT(8), /* kick signal from external allowed */
+} ed_kick_mode_t;
+
+typedef enum {
+ FD_SRC_DISABLE = ID_NONE,
+ FD_SRC_FETCHECO0 = ID_FETCHECO0,
+ FD_SRC_FETCHECO1 = ID_FETCHECO1,
+ FD_SRC_FETCHECO2 = ID_FETCHECO2,
+ FD_SRC_FETCHDECODE0 = ID_FETCHDECODE0,
+ FD_SRC_FETCHDECODE1 = ID_FETCHDECODE1,
+ FD_SRC_FETCHWARP2 = ID_FETCHWARP2,
+} fd_dynamic_src_sel_t;
+
+typedef enum {
+ /* RL and RLAD decoder */
+ FETCHTYPE__DECODE,
+ /* fractional plane(8 layers) */
+ FETCHTYPE__LAYER,
+ /* arbitrary warping and fractional plane(8 layers) */
+ FETCHTYPE__WARP,
+ /* minimum feature set for alpha, chroma and coordinate planes */
+ FETCHTYPE__ECO,
+ /* affine, perspective and arbitrary warping */
+ FETCHTYPE__PERSP,
+ /* affine and arbitrary warping */
+ FETCHTYPE__ROT,
+ /* RL and RLAD decoder, reduced feature set */
+ FETCHTYPE__DECODEL,
+ /* fractional plane(8 layers), reduced feature set */
+ FETCHTYPE__LAYERL,
+ /* affine and arbitrary warping, reduced feature set */
+ FETCHTYPE__ROTL,
+} fetchtype_t;
+
+typedef enum {
+ /* No side-by-side synchronization. */
+ FGSYNCMODE__OFF = 0,
+ /* Framegen is master. */
+ FGSYNCMODE__MASTER = 1 << 1,
+ /* Runs in cyclic synchronization mode. */
+ FGSYNCMODE__SLAVE_CYC = 2 << 1,
+ /* Runs in one time synchronization mode. */
+ FGSYNCMODE__SLAVE_ONCE = 3 << 1,
+} fgsyncmode_t;
+
+typedef enum {
+ FGDM__BLACK,
+ /* Constant Color Background is shown. */
+ FGDM__CONSTCOL,
+ FGDM__PRIM,
+ FGDM__SEC,
+ FGDM__PRIM_ON_TOP,
+ FGDM__SEC_ON_TOP,
+ /* White color background with test pattern is shown. */
+ FGDM__TEST,
+} fgdm_t;
+
+typedef enum {
+ HS_SRC_SEL__DISABLE = ID_NONE,
+ HS_SRC_SEL__MATRIX9 = ID_MATRIX9,
+ HS_SRC_SEL__VSCALER9 = ID_VSCALER9,
+ HS_SRC_SEL__FILTER9 = ID_FILTER9,
+ HS_SRC_SEL__FETCHDECODE0 = ID_FETCHDECODE0,
+ HS_SRC_SEL__FETCHDECODE1 = ID_FETCHDECODE1,
+ HS_SRC_SEL__MATRIX4 = ID_MATRIX4,
+ HS_SRC_SEL__VSCALER4 = ID_VSCALER4,
+ HS_SRC_SEL__MATRIX5 = ID_MATRIX5,
+ HS_SRC_SEL__VSCALER5 = ID_VSCALER5,
+} hs_src_sel_t;
+
+typedef enum {
+ /* common options */
+ LB_PRIM_SEL__DISABLE = ID_NONE,
+ LB_PRIM_SEL__BLITBLEND9 = ID_BLITBLEND9,
+ LB_PRIM_SEL__CONSTFRAME0 = ID_CONSTFRAME0,
+ LB_PRIM_SEL__CONSTFRAME1 = ID_CONSTFRAME1,
+ LB_PRIM_SEL__CONSTFRAME4 = ID_CONSTFRAME4,
+ LB_PRIM_SEL__CONSTFRAME5 = ID_CONSTFRAME5,
+ LB_PRIM_SEL__MATRIX4 = ID_MATRIX4,
+ LB_PRIM_SEL__HSCALER4 = ID_HSCALER4,
+ LB_PRIM_SEL__VSCALER4 = ID_VSCALER4,
+ LB_PRIM_SEL__MATRIX5 = ID_MATRIX5,
+ LB_PRIM_SEL__HSCALER5 = ID_HSCALER5,
+ LB_PRIM_SEL__VSCALER5 = ID_VSCALER5,
+ /*
+ * special options:
+ * layerblend(n) has n special options,
+ * from layerblend0 to layerblend(n - 1), e.g.,
+ * layerblend3 has 3 special options -
+ * layerblend0/1/2.
+ */
+ LB_PRIM_SEL__LAYERBLEND3 = ID_LAYERBLEND3,
+ LB_PRIM_SEL__LAYERBLEND2 = ID_LAYERBLEND2,
+ LB_PRIM_SEL__LAYERBLEND1 = ID_LAYERBLEND1,
+ LB_PRIM_SEL__LAYERBLEND0 = ID_LAYERBLEND0,
+} lb_prim_sel_t;
+
+typedef enum {
+ LB_SEC_SEL__DISABLE = ID_NONE,
+ LB_SEC_SEL__FETCHWARP2 = ID_FETCHWARP2,
+ LB_SEC_SEL__FETCHDECODE0 = ID_FETCHDECODE0,
+ LB_SEC_SEL__FETCHDECODE1 = ID_FETCHDECODE1,
+ LB_SEC_SEL__MATRIX4 = ID_MATRIX4,
+ LB_SEC_SEL__HSCALER4 = ID_HSCALER4,
+ LB_SEC_SEL__VSCALER4 = ID_VSCALER4,
+ LB_SEC_SEL__MATRIX5 = ID_MATRIX5,
+ LB_SEC_SEL__HSCALER5 = ID_HSCALER5,
+ LB_SEC_SEL__VSCALER5 = ID_VSCALER5,
+ LB_SEC_SEL__FETCHLAYER0 = ID_FETCHLAYER0,
+} lb_sec_sel_t;
+
+typedef enum {
+ PRIMARY, /* background plane */
+ SECONDARY, /* foreground plane */
+ BOTH,
+} lb_shadow_sel_t;
+
+typedef enum {
+ LB_NEUTRAL, /* Output is same as primary input. */
+ LB_BLEND,
+} lb_mode_t;
+
+typedef enum {
+ /* Constant 0 indicates frame or top field. */
+ SCALER_ALWAYS0 = 0x0,
+ /* Constant 1 indicates bottom field. */
+ SCALER_ALWAYS1 = 0x1 << 12,
+ /* Output field polarity is taken from input field polarity. */
+ SCALER_INPUT = 0x2 << 12,
+ /* Output field polarity toggles, starting with 0 after reset. */
+ SCALER_TOGGLE = 0x3 << 12,
+} scaler_field_mode_t;
+
+typedef enum {
+ /* pointer-sampling */
+ SCALER_NEAREST = 0x0,
+ /* box filter */
+ SCALER_LINEAR = 0x100,
+} scaler_filter_mode_t;
+
+typedef enum {
+ SCALER_DOWNSCALE = 0x0,
+ SCALER_UPSCALE = 0x10,
+} scaler_scale_mode_t;
+
+typedef enum {
+ /* Pixel by-pass the scaler, all other settings are ignored. */
+ SCALER_NEUTRAL = 0x0,
+ /* Scaler is active. */
+ SCALER_ACTIVE = 0x1,
+} scaler_mode_t;
+
+typedef enum {
+ VS_SRC_SEL__DISABLE = ID_NONE,
+ VS_SRC_SEL__MATRIX9 = ID_MATRIX9,
+ VS_SRC_SEL__HSCALER9 = ID_HSCALER9,
+ VS_SRC_SEL__FETCHDECODE0 = ID_FETCHDECODE0,
+ VS_SRC_SEL__FETCHDECODE1 = ID_FETCHDECODE1,
+ VS_SRC_SEL__MATRIX4 = ID_MATRIX4,
+ VS_SRC_SEL__HSCALER4 = ID_HSCALER4,
+ VS_SRC_SEL__MATRIX5 = ID_MATRIX5,
+ VS_SRC_SEL__HSCALER5 = ID_HSCALER5,
+} vs_src_sel_t;
+
+#define CLKEN_MASK (0x3 << 24)
+#define CLKEN_MASK_SHIFT 24
+typedef enum {
+ CLKEN__DISABLE = 0x0,
+ CLKEN__AUTOMATIC = 0x1,
+ CLKEN__FULL = 0x3,
+} pixengcfg_clken_t;
+
+/* fetch unit types */
+enum {
+ FU_T_NA,
+ FU_T_FD,
+ FU_T_FE,
+ FU_T_FL,
+ FU_T_FW,
+};
+
+struct dpu_fetchunit;
+
+struct dpu_fetchunit_ops {
+ void (*set_burstlength)(struct dpu_fetchunit *fu);
+
+ void (*set_baseaddress)(struct dpu_fetchunit *fu, dma_addr_t baddr);
+
+ void (*set_src_bpp)(struct dpu_fetchunit *fu, int bpp);
+
+ void (*set_src_stride)(struct dpu_fetchunit *fu, unsigned int stride);
+
+ void (*set_src_buf_dimensions)(struct dpu_fetchunit *fu,
+ unsigned int w, unsigned int h, u32 fmt,
+ bool deinterlace);
+
+ void (*set_fmt)(struct dpu_fetchunit *fu, u32 fmt, bool deinterlace);
+
+ void (*enable_src_buf)(struct dpu_fetchunit *fu);
+ void (*disable_src_buf)(struct dpu_fetchunit *fu);
+ bool (*is_enabled)(struct dpu_fetchunit *fu);
+
+ void (*set_framedimensions)(struct dpu_fetchunit *fu,
+ unsigned int w, unsigned int h,
+ bool deinterlace);
+
+ void (*set_controltrigger)(struct dpu_fetchunit *fu);
+
+ unsigned int (*get_stream_id)(struct dpu_fetchunit *fu);
+ void (*set_stream_id)(struct dpu_fetchunit *fu, unsigned int id);
+};
+
+struct dpu_fetchunit {
+ void __iomem *pec_base;
+ void __iomem *base;
+ char *name;
+ struct mutex mutex;
+ int id;
+ int sub_id; /* for fractional fetch units */
+ int type;
+ bool inuse;
+ struct dpu_soc *dpu;
+ /* see DPU_PLANE_SRC_xxx */
+ unsigned int stream_id;
+ const struct dpu_fetchunit_ops *ops;
+};
+
+int dpu_map_irq(struct dpu_soc *dpu, int irq);
+
+/* Constant Frame Unit */
+struct dpu_constframe;
+void constframe_shden(struct dpu_constframe *cf, bool enable);
+void constframe_framedimensions(struct dpu_constframe *cf, unsigned int w,
+ unsigned int h);
+void constframe_framedimensions_copy_prim(struct dpu_constframe *cf);
+void constframe_constantcolor(struct dpu_constframe *cf, unsigned int r,
+ unsigned int g, unsigned int b, unsigned int a);
+void constframe_controltrigger(struct dpu_constframe *cf, bool trigger);
+struct dpu_constframe *dpu_cf_get(struct dpu_soc *dpu, int id);
+void dpu_cf_put(struct dpu_constframe *cf);
+
+/* Display Engine Configuration Unit */
+struct dpu_disengcfg;
+struct dpu_disengcfg *dpu_dec_get(struct dpu_soc *dpu, int id);
+void dpu_dec_put(struct dpu_disengcfg *dec);
+
+/* External Destination Unit */
+struct dpu_extdst;
+void extdst_pixengcfg_shden(struct dpu_extdst *ed, bool enable);
+void extdst_pixengcfg_powerdown(struct dpu_extdst *ed, bool powerdown);
+void extdst_pixengcfg_sync_mode(struct dpu_extdst *ed, ed_sync_mode_t mode);
+void extdst_pixengcfg_reset(struct dpu_extdst *ed, bool reset);
+void extdst_pixengcfg_div(struct dpu_extdst *ed, u16 div);
+int extdst_pixengcfg_src_sel(struct dpu_extdst *ed, extdst_src_sel_t src);
+void extdst_pixengcfg_sel_shdldreq(struct dpu_extdst *ed);
+void extdst_pixengcfg_shdldreq(struct dpu_extdst *ed, u32 req_mask);
+void extdst_pixengcfg_sync_trigger(struct dpu_extdst *ed);
+void extdst_pixengcfg_trigger_sequence_complete(struct dpu_extdst *ed);
+bool extdst_pixengcfg_is_sync_busy(struct dpu_extdst *ed);
+ed_pipeline_status_t extdst_pixengcfg_pipeline_status(struct dpu_extdst *ed);
+void extdst_shden(struct dpu_extdst *ed, bool enable);
+void extdst_kick_mode(struct dpu_extdst *ed, ed_kick_mode_t mode);
+void extdst_perfcountmode(struct dpu_extdst *ed, bool enable);
+void extdst_gamma_apply_enable(struct dpu_extdst *ed, bool enable);
+void extdst_kick(struct dpu_extdst *ed);
+void extdst_cnt_err_clear(struct dpu_extdst *ed);
+bool extdst_cnt_err_status(struct dpu_extdst *ed);
+u32 extdst_last_control_word(struct dpu_extdst *ed);
+void extdst_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y);
+void extdst_last_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y);
+u32 extdst_perfresult(struct dpu_extdst *ed);
+struct dpu_extdst *dpu_ed_get(struct dpu_soc *dpu, int id);
+void dpu_ed_put(struct dpu_extdst *ed);
+
+/* Fetch Decode Unit */
+int fetchdecode_pixengcfg_dynamic_src_sel(struct dpu_fetchunit *fu,
+ fd_dynamic_src_sel_t src);
+void fetchdecode_layeroffset(struct dpu_fetchunit *fd, unsigned int x,
+ unsigned int y);
+void fetchdecode_clipoffset(struct dpu_fetchunit *fd, unsigned int x,
+ unsigned int y);
+void fetchdecode_clipdimensions(struct dpu_fetchunit *fd, unsigned int w,
+ unsigned int h);
+void fetchdecode_rgb_constantcolor(struct dpu_fetchunit *fd,
+ u8 r, u8 g, u8 b, u8 a);
+void fetchdecode_yuv_constantcolor(struct dpu_fetchunit *fd,
+ u8 y, u8 u, u8 v);
+int fetchdecode_fetchtype(struct dpu_fetchunit *fd, fetchtype_t *type);
+u32 fetchdecode_get_vproc_mask(struct dpu_fetchunit *fd);
+bool fetchdecode_need_fetcheco(struct dpu_fetchunit *fd, u32 fmt);
+struct dpu_fetchunit *dpu_fd_get(struct dpu_soc *dpu, int id);
+void dpu_fd_put(struct dpu_fetchunit *fu);
+
+/* Fetch ECO Unit */
+void fetcheco_layeroffset(struct dpu_fetchunit *fu, unsigned int x,
+ unsigned int y);
+void fetcheco_clipoffset(struct dpu_fetchunit *fu, unsigned int x,
+ unsigned int y);
+void fetcheco_clipdimensions(struct dpu_fetchunit *fu, unsigned int w,
+ unsigned int h);
+void fetcheco_frameresampling(struct dpu_fetchunit *fu, unsigned int x,
+ unsigned int y);
+int fetcheco_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type);
+dpu_block_id_t fetcheco_get_block_id(struct dpu_fetchunit *fu);
+struct dpu_fetchunit *dpu_fe_get(struct dpu_soc *dpu, int id);
+void dpu_fe_put(struct dpu_fetchunit *fu);
+
+/* Fetch Layer Unit */
+void fetchlayer_rgb_constantcolor(struct dpu_fetchunit *fu,
+ u8 r, u8 g, u8 b, u8 a);
+void fetchlayer_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v);
+int fetchlayer_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type);
+struct dpu_fetchunit *dpu_fl_get(struct dpu_soc *dpu, int id);
+void dpu_fl_put(struct dpu_fetchunit *fu);
+
+/* Fetch Warp Unit */
+void fetchwarp_rgb_constantcolor(struct dpu_fetchunit *fu,
+ u8 r, u8 g, u8 b, u8 a);
+void fetchwarp_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v);
+int fetchwarp_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type);
+struct dpu_fetchunit *dpu_fw_get(struct dpu_soc *dpu, int id);
+void dpu_fw_put(struct dpu_fetchunit *fu);
+
+/* Frame Generator Unit */
+struct dpu_framegen;
+void framegen_enable(struct dpu_framegen *fg);
+void framegen_disable(struct dpu_framegen *fg);
+void framegen_shdtokgen(struct dpu_framegen *fg);
+void framegen_syncmode(struct dpu_framegen *fg, fgsyncmode_t mode);
+void
+framegen_cfg_videomode(struct dpu_framegen *fg, struct drm_display_mode *m);
+void framegen_pkickconfig(struct dpu_framegen *fg, bool enable);
+void framegen_sacfg(struct dpu_framegen *fg, unsigned int x, unsigned int y);
+void framegen_displaymode(struct dpu_framegen *fg, fgdm_t mode);
+void framegen_panic_displaymode(struct dpu_framegen *fg, fgdm_t mode);
+void framegen_wait_done(struct dpu_framegen *fg, struct drm_display_mode *m);
+void framegen_read_timestamp(struct dpu_framegen *fg,
+ u32 *frame_index, u32 *line_index);
+void framegen_wait_for_frame_counter_moving(struct dpu_framegen *fg);
+bool framegen_secondary_is_syncup(struct dpu_framegen *fg);
+void framegen_wait_for_secondary_syncup(struct dpu_framegen *fg);
+void framegen_enable_clock(struct dpu_framegen *fg);
+void framegen_disable_clock(struct dpu_framegen *fg);
+struct dpu_framegen *dpu_fg_get(struct dpu_soc *dpu, int id);
+void dpu_fg_put(struct dpu_framegen *fg);
+
+/* Horizontal Scaler Unit */
+struct dpu_hscaler;
+int hscaler_pixengcfg_dynamic_src_sel(struct dpu_hscaler *hs, hs_src_sel_t src);
+void hscaler_pixengcfg_clken(struct dpu_hscaler *hs, pixengcfg_clken_t clken);
+void hscaler_shden(struct dpu_hscaler *hs, bool enable);
+void hscaler_setup1(struct dpu_hscaler *hs, unsigned int src, unsigned int dst);
+void hscaler_setup2(struct dpu_hscaler *hs, u32 phase_offset);
+void hscaler_output_size(struct dpu_hscaler *hs, u32 line_num);
+void hscaler_filter_mode(struct dpu_hscaler *hs, scaler_filter_mode_t m);
+void hscaler_scale_mode(struct dpu_hscaler *hs, scaler_scale_mode_t m);
+void hscaler_mode(struct dpu_hscaler *hs, scaler_mode_t m);
+bool hscaler_is_enabled(struct dpu_hscaler *hs);
+dpu_block_id_t hscaler_get_block_id(struct dpu_hscaler *hs);
+unsigned int hscaler_get_stream_id(struct dpu_hscaler *hs);
+void hscaler_set_stream_id(struct dpu_hscaler *hs, unsigned int id);
+struct dpu_hscaler *dpu_hs_get(struct dpu_soc *dpu, int id);
+void dpu_hs_put(struct dpu_hscaler *hs);
+
+/* Layer Blend Unit */
+struct dpu_layerblend;
+int layerblend_pixengcfg_dynamic_prim_sel(struct dpu_layerblend *lb,
+ lb_prim_sel_t prim);
+void layerblend_pixengcfg_dynamic_sec_sel(struct dpu_layerblend *lb,
+ lb_sec_sel_t sec);
+void layerblend_pixengcfg_clken(struct dpu_layerblend *lb,
+ pixengcfg_clken_t clken);
+void layerblend_shden(struct dpu_layerblend *lb, bool enable);
+void layerblend_shdtoksel(struct dpu_layerblend *lb, lb_shadow_sel_t sel);
+void layerblend_shdldsel(struct dpu_layerblend *lb, lb_shadow_sel_t sel);
+void layerblend_control(struct dpu_layerblend *lb, lb_mode_t mode);
+void layerblend_blendcontrol(struct dpu_layerblend *lb, bool sec_from_scaler);
+void layerblend_position(struct dpu_layerblend *lb, int x, int y);
+struct dpu_layerblend *dpu_lb_get(struct dpu_soc *dpu, int id);
+void dpu_lb_put(struct dpu_layerblend *lb);
+
+/* Timing Controller Unit */
+struct dpu_tcon;
+int tcon_set_fmt(struct dpu_tcon *tcon, u32 bus_format);
+void tcon_set_operation_mode(struct dpu_tcon *tcon);
+void tcon_cfg_videomode(struct dpu_tcon *tcon, struct drm_display_mode *m);
+struct dpu_tcon *dpu_tcon_get(struct dpu_soc *dpu, int id);
+void dpu_tcon_put(struct dpu_tcon *tcon);
+
+/* Vertical Scaler Unit */
+struct dpu_vscaler;
+int vscaler_pixengcfg_dynamic_src_sel(struct dpu_vscaler *vs, vs_src_sel_t src);
+void vscaler_pixengcfg_clken(struct dpu_vscaler *vs, pixengcfg_clken_t clken);
+void vscaler_shden(struct dpu_vscaler *vs, bool enable);
+void vscaler_setup1(struct dpu_vscaler *vs, u32 src, u32 dst, bool deinterlace);
+void vscaler_setup2(struct dpu_vscaler *vs, bool deinterlace);
+void vscaler_setup3(struct dpu_vscaler *vs, bool deinterlace);
+void vscaler_setup4(struct dpu_vscaler *vs, u32 phase_offset);
+void vscaler_setup5(struct dpu_vscaler *vs, u32 phase_offset);
+void vscaler_output_size(struct dpu_vscaler *vs, u32 line_num);
+void vscaler_field_mode(struct dpu_vscaler *vs, scaler_field_mode_t m);
+void vscaler_filter_mode(struct dpu_vscaler *vs, scaler_filter_mode_t m);
+void vscaler_scale_mode(struct dpu_vscaler *vs, scaler_scale_mode_t m);
+void vscaler_mode(struct dpu_vscaler *vs, scaler_mode_t m);
+bool vscaler_is_enabled(struct dpu_vscaler *vs);
+dpu_block_id_t vscaler_get_block_id(struct dpu_vscaler *vs);
+unsigned int vscaler_get_stream_id(struct dpu_vscaler *vs);
+void vscaler_set_stream_id(struct dpu_vscaler *vs, unsigned int id);
+struct dpu_vscaler *dpu_vs_get(struct dpu_soc *dpu, int id);
+void dpu_vs_put(struct dpu_vscaler *vs);
+
+struct dpu_fetchunit *fetchdecode_get_fetcheco(struct dpu_fetchunit *fu);
+struct dpu_hscaler *fetchdecode_get_hscaler(struct dpu_fetchunit *fu);
+struct dpu_vscaler *fetchdecode_get_vscaler(struct dpu_fetchunit *fu);
+
+bool dpu_vproc_has_fetcheco_cap(u32 cap_mask);
+bool dpu_vproc_has_hscale_cap(u32 cap_mask);
+bool dpu_vproc_has_vscale_cap(u32 cap_mask);
+
+u32 dpu_vproc_get_fetcheco_cap(u32 cap_mask);
+u32 dpu_vproc_get_hscale_cap(u32 cap_mask);
+u32 dpu_vproc_get_vscale_cap(u32 cap_mask);
+
+void fetchunit_shden(struct dpu_fetchunit *fu, bool enable);
+void fetchunit_baddr_autoupdate(struct dpu_fetchunit *fu, u8 layer_mask);
+void fetchunit_shdldreq_sticky(struct dpu_fetchunit *fu, u8 layer_mask);
+void fetchunit_set_burstlength(struct dpu_fetchunit *fu);
+void fetchunit_set_baseaddress(struct dpu_fetchunit *fu, dma_addr_t baddr);
+void fetchunit_set_src_bpp(struct dpu_fetchunit *fu, int bpp);
+void fetchunit_set_src_stride(struct dpu_fetchunit *fu, unsigned int stride);
+void fetchunit_enable_src_buf(struct dpu_fetchunit *fu);
+void fetchunit_disable_src_buf(struct dpu_fetchunit *fu);
+bool fetchunit_is_enabled(struct dpu_fetchunit *fu);
+unsigned int fetchunit_get_stream_id(struct dpu_fetchunit *fu);
+void fetchunit_set_stream_id(struct dpu_fetchunit *fu, unsigned int id);
+bool fetchunit_is_fetchdecode(struct dpu_fetchunit *fu);
+bool fetchunit_is_fetcheco(struct dpu_fetchunit *fu);
+bool fetchunit_is_fetchlayer(struct dpu_fetchunit *fu);
+bool fetchunit_is_fetchwarp(struct dpu_fetchunit *fu);
+
+/*
+ * to avoid on-the-fly/hot plane resource migration
+ * between two display interfaces
+ */
+#define DPU_PLANE_SRC_TO_DISP_STREAM0 BIT(0)
+#define DPU_PLANE_SRC_TO_DISP_STREAM1 BIT(1)
+#define DPU_PLANE_SRC_DISABLED 0
+
+struct dpu_plane_res {
+ struct dpu_constframe *cf[2];
+ struct dpu_extdst *ed[2];
+ struct dpu_fetchunit *fd[2];
+ struct dpu_fetchunit *fe[2];
+ struct dpu_fetchunit *fl[1];
+ struct dpu_fetchunit *fw[1];
+ struct dpu_framegen *fg[2];
+ struct dpu_hscaler *hs[2];
+ struct dpu_layerblend *lb[4];
+ struct dpu_vscaler *vs[2];
+};
+
+/*
+ * Each DPU plane can be a primary plane or an overlay plane
+ * of one of the DPU's two CRTCs.
+ */
+#define DPU_PLANE_SRC_FL0_ID BIT(0)
+#define DPU_PLANE_SRC_FW2_ID BIT(1)
+#define DPU_PLANE_SRC_FD0_ID BIT(2)
+#define DPU_PLANE_SRC_FD1_ID BIT(3)
+
+struct dpu_plane_grp {
+ struct dpu_plane_res res;
+ unsigned int hw_plane_num;
+ unsigned int hw_plane_fetcheco_num;
+ unsigned int hw_plane_hscaler_num;
+ unsigned int hw_plane_vscaler_num;
+ unsigned int id;
+ bool has_vproc;
+
+ /* used when assigning plane source */
+ struct mutex mutex;
+ u32 src_mask;
+ u32 src_a_mask;
+ u32 src_use_vproc_mask;
+};
+
+static inline struct dpu_plane_grp *plane_res_to_grp(struct dpu_plane_res *res)
+{
+ return container_of(res, struct dpu_plane_grp, res);
+}
+
+struct dpu_client_platformdata {
+ const unsigned int stream_id;
+ struct dpu_plane_grp *plane_grp;
+
+ struct device_node *of_node;
+};
+#endif /* __DRM_DPU_H__ */