diff options
54 files changed, 11226 insertions, 24 deletions
diff --git a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt index 5bf77f6dd19d..a374d53de755 100644 --- a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt +++ b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt @@ -110,6 +110,218 @@ prg@21cc000 { fsl,pres = <&pre1>, <&pre2>, <&pre3>; }; +Freescale i.MX DPU +==================== + +Required properties: +- compatible: Should be "fsl,<chip>-dpu" +- reg: should be register base and length as documented in the + datasheet +- interrupt-parent: phandle pointing to the parent interrupt controller. +- interrupts, interrupt-names: Should contain interrupts and names as + documented in the datasheet. +- clocks, clock-names: phandles to the DPU clocks described in + Documentation/devicetree/bindings/clock/clock-bindings.txt + The following clocks are expected on i.MX8qxp: + "pll0" - PLL clock for display interface 0 + "pll1" - PLL clock for display interface 1 + "disp0" - pixel clock for display interface 0 + "disp1" - pixel clock for display interface 1 + The needed clock numbers for each are documented in + Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt. +- power-domains: phandles pointing to power domain. +- power-domain-names: power domain names relevant to power-domains phandles. +- fsl,dpr-channels: phandles to the DPR channels attached to this DPU, + sorted by memory map addresses. +- fsl,pixel-combiner: phandle to the pixel combiner unit attached to this DPU. +Optional properties: +- port@[0-1]: Port nodes with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + ports 0 and 1 should correspond to display interface 0 and + display interface 1, respectively. + +example: + +dpu: dpu@56180000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,imx8qxp-dpu"; + reg = <0x56180000 0x40000>; + interrupt-parent = <&irqsteer_dpu>; + interrupts = <448>, <449>, <450>, <64>, + <65>, <66>, <67>, <68>, + <69>, <70>, <193>, <194>, + <195>, <196>, <197>, <72>, + <73>, <74>, <75>, <76>, + <77>, <78>, <79>, <80>, + <81>, <199>, <200>, <201>, + <202>, <203>, <204>, <205>, + <206>, <207>, <208>, <0>, + <1>, <2>, <3>, <4>, + <82>, <83>, <84>, <85>, + <209>, <210>, <211>, <212>; + interrupt-names = "store9_shdload", + "store9_framecomplete", + "store9_seqcomplete", + "extdst0_shdload", + "extdst0_framecomplete", + "extdst0_seqcomplete", + "extdst4_shdload", + "extdst4_framecomplete", + "extdst4_seqcomplete", + "extdst1_shdload", + "extdst1_framecomplete", + "extdst1_seqcomplete", + "extdst5_shdload", + "extdst5_framecomplete", + "extdst5_seqcomplete", + "disengcfg_shdload0", + "disengcfg_framecomplete0", + "disengcfg_seqcomplete0", + "framegen0_int0", + "framegen0_int1", + "framegen0_int2", + "framegen0_int3", + "sig0_shdload", + "sig0_valid", + "sig0_error", + "disengcfg_shdload1", + "disengcfg_framecomplete1", + "disengcfg_seqcomplete1", + "framegen1_int0", + "framegen1_int1", + "framegen1_int2", + "framegen1_int3", + "sig1_shdload", + "sig1_valid", + "sig1_error", + "cmdseq_error", + "comctrl_sw0", + "comctrl_sw1", + "comctrl_sw2", + "comctrl_sw3", + "framegen0_primsync_on", + "framegen0_primsync_off", + "framegen0_secsync_on", + "framegen0_secsync_off", + "framegen1_primsync_on", + "framegen1_primsync_off", + "framegen1_secsync_on", + "framegen1_secsync_off"; + clocks = <&dc_lpcg IMX_DC0_PLL0_CLK>, + <&dc_lpcg IMX_DC0_PLL1_CLK>, + <&dc_lpcg IMX_DC0_DISP0_CLK>, + <&dc_lpcg IMX_DC0_DISP1_CLK>; + clock-names = "pll0", "pll1", "disp0", "disp1"; + power-domains = <&pd IMX_SC_R_DC_0>, + <&pd IMX_SC_R_DC_0_PLL_0>, + <&pd IMX_SC_R_DC_0_PLL_1>; + power-domain-names = "dc", "pll0", "pll1"; + fsl,dpr-channels = <&dc0_dpr1_channel1>, <&dc0_dpr1_channel2>, + <&dc0_dpr1_channel3>, <&dc0_dpr2_channel1>, + <&dc0_dpr2_channel2>, <&dc0_dpr2_channel3>; + fsl,pixel-combiner = <&dc0_pc>; + + dpu_disp0: port@0 { + reg = <0>; + + dpu_disp0_lvds0_ch0: endpoint@0 { + remote-endpoint = <&ldb1_ch0>; + }; + + dpu_disp0_lvds0_ch1: endpoint@1 { + remote-endpoint = <&ldb1_ch1>; + }; + + dpu_disp0_mipi_dsi: endpoint@2 { + }; + }; + + dpu_disp1: port@1 { + reg = <1>; + + dpu_disp1_lvds1_ch0: endpoint@0 { + remote-endpoint = <&ldb2_ch0>; + }; + + dpu_disp1_lvds1_ch1: endpoint@1 { + remote-endpoint = <&ldb2_ch1>; + }; + + dpu_disp1_mipi_dsi: endpoint@2 { + }; + }; +}; + +Freescale i.MX8 PC (Pixel Combiner) +============================================= +Required properties: +- compatible: should be "fsl,<chip>-pixel-combiner" +- reg: should be register base and length as documented in the + datasheet +- power-domains: phandle pointing to power domain + +example: + +pixel-combiner@56020000 { + compatible = "fsl,imx8qm-pixel-combiner"; + reg = <0x56020000 0x10000>; + power-domains = <&pd IMX_SC_R_DC_0>; +}; + +Freescale i.MX8 PRG (Prefetch Resolve Gasket) +============================================= +Required properties: +- compatible: should be "fsl,<chip>-prg" +- reg: should be register base and length as documented in the + datasheet +- clocks: phandles to the PRG apb and rtram clocks, as described in + Documentation/devicetree/bindings/clock/clock-bindings.txt and + Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt. +- clock-names: should be "apb" and "rtram" +- power-domains: phandle pointing to power domain + +example: + +prg@56040000 { + compatible = "fsl,imx8qm-prg"; + reg = <0x56040000 0x10000>; + clocks = <&dc0_prg0_lpcg 0>, <&dc0_prg0_lpcg 1>; + clock-names = "apb", "rtram"; + power-domains = <&pd IMX_SC_R_DC_0>; +}; + +Freescale i.MX8 DPRC (Display Prefetch Resolve Channel) +======================================================= +Required properties: +- compatible: should be "fsl,<chip>-dpr-channel" +- reg: should be register base and length as documented in the + datasheet +- fsl,sc-resource: SCU resource number as defined in + include/dt-bindings/firmware/imx/rsrc.h +- fsl,prgs: phandles to the PRG unit(s) attached to this DPRC, the first one + is the primary PRG and the second one(if available) is the auxiliary PRG + which is used to fetch luma chunk of a YUV frame with 2 planars. +- clocks: phandles to the DPRC apb, b and rtram clocks, as described in + Documentation/devicetree/bindings/clock/clock-bindings.txt and + Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt. +- clock-names: should be "apb", "b" and "rtram" +- power-domains: phandle pointing to power domain + +example: + +dpr-channel@560e0000 { + compatible = "fsl,imx8qm-dpr-channel"; + reg = <0x560e0000 0x10000>; + fsl,sc-resource = <IMX_SC_R_DC_0_BLIT1>; + fsl,prgs = <&dc0_prg2>, <&dc0_prg1>; + clocks = <&dc0_dpr0_lpcg 0>, + <&dc0_dpr0_lpcg 1>, + <&dc0_rtram0_lpcg 0>; + clock-names = "apb", "b", "rtram"; + power-domains = <&pd IMX_SC_R_DC_0>; +}; + Parallel display support ======================== diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index f17d01f076c7..a6ac74a9f4b1 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -3,5 +3,5 @@ # taken to initialize them in the correct order. Link order is the only way # to ensure this currently. obj-$(CONFIG_TEGRA_HOST1X) += host1x/ +obj-y += imx/ obj-y += drm/ vga/ -obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 6231048aa5aa..30acf9be20c6 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -6,7 +6,7 @@ config DRM_IMX select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST) - depends on IMX_IPUV3_CORE + depends on IMX_IPUV3_CORE || IMX_DPU_CORE help enable i.MX graphics support @@ -33,6 +33,13 @@ config DRM_IMX_LDB Choose this to enable the internal LVDS Display Bridge (LDB) found on i.MX53 and i.MX6 processors. +config DRM_IMX_IPUV3 + tristate + depends on DRM_IMX + depends on IMX_IPUV3_CORE + default y if DRM_IMX=y + default m if DRM_IMX=m + config DRM_IMX_HDMI tristate "Freescale i.MX DRM HDMI" select DRM_DW_HDMI @@ -40,4 +47,5 @@ config DRM_IMX_HDMI help Choose this if you want to use HDMI on i.MX6. +source "drivers/gpu/drm/imx/dpu/Kconfig" source "drivers/gpu/drm/imx/dcss/Kconfig" diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile index b644deffe948..bd07c635ee95 100644 --- a/drivers/gpu/drm/imx/Makefile +++ b/drivers/gpu/drm/imx/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o +imxdrm-objs := imx-drm-core.o obj-$(CONFIG_DRM_IMX) += imxdrm.o @@ -8,5 +8,8 @@ obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o +imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o +obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o +obj-$(CONFIG_DRM_IMX_DPU) += dpu/ obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o obj-$(CONFIG_DRM_IMX_DCSS) += dcss/ diff --git a/drivers/gpu/drm/imx/dpu/Kconfig b/drivers/gpu/drm/imx/dpu/Kconfig new file mode 100644 index 000000000000..c5bd97f4f95b --- /dev/null +++ b/drivers/gpu/drm/imx/dpu/Kconfig @@ -0,0 +1,6 @@ +config DRM_IMX_DPU + tristate + depends on DRM_IMX + depends on IMX_DPU_CORE + default y if DRM_IMX=y + default m if DRM_IMX=m diff --git a/drivers/gpu/drm/imx/dpu/Makefile b/drivers/gpu/drm/imx/dpu/Makefile new file mode 100644 index 000000000000..89c22ffe3c1a --- /dev/null +++ b/drivers/gpu/drm/imx/dpu/Makefile @@ -0,0 +1,4 @@ +ccflags-y += -I $(srctree)/$(src)/../ + +imx-dpu-crtc-objs := dpu-crtc.o dpu-kms.o dpu-plane.o +obj-$(CONFIG_DRM_IMX_DPU) += imx-dpu-crtc.o diff --git a/drivers/gpu/drm/imx/dpu/dpu-crtc.c b/drivers/gpu/drm/imx/dpu/dpu-crtc.c new file mode 100644 index 000000000000..0d6b33f2934b --- /dev/null +++ b/drivers/gpu/drm/imx/dpu/dpu-crtc.c @@ -0,0 +1,1212 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <linux/component.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <video/dpu.h> +#include <video/imx8-pc.h> +#include "dpu-crtc.h" +#include "dpu-kms.h" +#include "dpu-plane.h" +#include "../imx-drm.h" + +static inline struct dpu_plane_state ** +alloc_dpu_plane_states(struct dpu_crtc *dpu_crtc) +{ + struct dpu_plane_state **states; + + states = kcalloc(dpu_crtc->hw_plane_num, sizeof(*states), GFP_KERNEL); + if (!states) + return ERR_PTR(-ENOMEM); + + return states; +} + +static void dpu_crtc_queue_state_event(struct drm_crtc *crtc) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + WARN_ON(drm_crtc_vblank_get(crtc)); + WARN_ON(dpu_crtc->event); + dpu_crtc->event = crtc->state->event; + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); +} + +struct dpu_plane_state ** +crtc_state_get_dpu_plane_states(struct drm_crtc_state *state) +{ + struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(state); + struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state); + + return dcstate->dpu_plane_states; +} + +struct dpu_crtc *dpu_crtc_get_aux_dpu_crtc(struct dpu_crtc *dpu_crtc) +{ + struct drm_crtc *crtc = &dpu_crtc->base, *tmp_crtc; + struct drm_device *dev = crtc->dev; + struct dpu_crtc *aux_dpu_crtc = NULL; + + drm_for_each_crtc(tmp_crtc, dev) { + if (tmp_crtc == crtc) + continue; + + aux_dpu_crtc = to_dpu_crtc(tmp_crtc); + + if (dpu_crtc->crtc_grp_id == aux_dpu_crtc->crtc_grp_id) + break; + } + + BUG_ON(!aux_dpu_crtc); + + return aux_dpu_crtc; +} + +static void dpu_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dpu_crtc *aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc); + struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state); + struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state); + struct dpu_plane *dplane = to_dpu_plane(crtc->primary); + struct dpu_plane_res *res = &dplane->grp->res; + struct dpu_extdst *plane_ed = res->ed[dplane->stream_id]; + struct dpu_extdst *aux_plane_ed = dpu_aux_ed_peek(plane_ed); + struct dpu_extdst *m_plane_ed = NULL, *s_plane_ed; + struct completion *shdld_done; + struct completion *m_safety_shdld_done, *s_safety_shdld_done; + struct completion *m_content_shdld_done, *s_content_shdld_done; + struct completion *m_dec_shdld_done, *s_dec_shdld_done; + unsigned long ret, flags; + + drm_crtc_vblank_on(crtc); + + if (dcstate->use_pc) { + tcon_enable_pc(dpu_crtc->tcon); + + if (extdst_is_master(plane_ed)) { + m_plane_ed = plane_ed; + s_plane_ed = aux_plane_ed; + } else { + m_plane_ed = aux_plane_ed; + s_plane_ed = plane_ed; + } + extdst_pixengcfg_syncmode_master(m_plane_ed, true); + extdst_pixengcfg_syncmode_master(s_plane_ed, false); + } else { + extdst_pixengcfg_syncmode_master(plane_ed, false); + } + + enable_irq(dpu_crtc->safety_shdld_irq); + enable_irq(dpu_crtc->content_shdld_irq); + enable_irq(dpu_crtc->dec_shdld_irq); + if (dcstate->use_pc) { + enable_irq(aux_dpu_crtc->safety_shdld_irq); + enable_irq(aux_dpu_crtc->content_shdld_irq); + enable_irq(aux_dpu_crtc->dec_shdld_irq); + } + + if (dcstate->use_pc) { + framegen_enable_clock(dpu_crtc->stream_id ? + dpu_crtc->aux_fg : dpu_crtc->fg); + extdst_pixengcfg_sync_trigger(m_plane_ed); + framegen_shdtokgen(dpu_crtc->m_fg); + + /* don't relinquish CPU until TCONs are set to operation mode */ + local_irq_save(flags); + preempt_disable(); + /* First turn on the slave stream, second the master stream. */ + framegen_enable(dpu_crtc->s_fg); + framegen_enable(dpu_crtc->m_fg); + /* + * TKT320590: + * Turn TCONs into operation mode as soon as the first dumb + * frame is generated by DPU from the master stream(we don't + * relinquish CPU to ensure this). This makes DPRs/PRGs of + * the dual stream be able to evade the dumb frames of the + * dual stream respectively. + */ + framegen_wait_for_frame_counter_moving(dpu_crtc->m_fg); + /* again, slave first, then master */ + tcon_set_operation_mode(dpu_crtc->s_tcon); + tcon_set_operation_mode(dpu_crtc->m_tcon); + local_irq_restore(flags); + preempt_enable(); + + framegen_enable_pixel_link(dpu_crtc->s_fg); + framegen_enable_pixel_link(dpu_crtc->m_fg); + + if (dpu_crtc->aux_is_master) { + m_safety_shdld_done = &aux_dpu_crtc->safety_shdld_done; + m_content_shdld_done = &aux_dpu_crtc->content_shdld_done; + m_dec_shdld_done = &aux_dpu_crtc->dec_shdld_done; + s_safety_shdld_done = &dpu_crtc->safety_shdld_done; + s_content_shdld_done = &dpu_crtc->content_shdld_done; + s_dec_shdld_done = &dpu_crtc->dec_shdld_done; + } else { + m_safety_shdld_done = &dpu_crtc->safety_shdld_done; + m_content_shdld_done = &dpu_crtc->content_shdld_done; + m_dec_shdld_done = &dpu_crtc->dec_shdld_done; + s_safety_shdld_done = &aux_dpu_crtc->safety_shdld_done; + s_content_shdld_done = &aux_dpu_crtc->content_shdld_done; + s_dec_shdld_done = &aux_dpu_crtc->dec_shdld_done; + } + + ret = wait_for_completion_timeout(m_safety_shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for master safety shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + ret = wait_for_completion_timeout(m_content_shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for master content shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + ret = wait_for_completion_timeout(m_dec_shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for master DEC shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + + ret = wait_for_completion_timeout(s_safety_shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for slave safety shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + ret = wait_for_completion_timeout(s_content_shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for slave content shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + ret = wait_for_completion_timeout(s_dec_shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for slave DEC shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + } else { + framegen_enable_clock(dpu_crtc->fg); + extdst_pixengcfg_sync_trigger(plane_ed); + extdst_pixengcfg_sync_trigger(dpu_crtc->ed); + framegen_shdtokgen(dpu_crtc->fg); + + /* don't relinquish CPU until TCON is set to operation mode */ + local_irq_save(flags); + preempt_disable(); + framegen_enable(dpu_crtc->fg); + /* + * TKT320590: + * Turn TCON into operation mode as soon as the first dumb + * frame is generated by DPU(we don't relinquish CPU to ensure + * this). This makes DPR/PRG be able to evade the frame. + */ + framegen_wait_for_frame_counter_moving(dpu_crtc->fg); + tcon_set_operation_mode(dpu_crtc->tcon); + local_irq_restore(flags); + preempt_enable(); + + framegen_enable_pixel_link(dpu_crtc->fg); + + shdld_done = &dpu_crtc->safety_shdld_done; + ret = wait_for_completion_timeout(shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for safety shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + shdld_done = &dpu_crtc->content_shdld_done; + ret = wait_for_completion_timeout(shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for content shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + shdld_done = &dpu_crtc->dec_shdld_done; + ret = wait_for_completion_timeout(shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for DEC shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + } + + disable_irq(dpu_crtc->safety_shdld_irq); + disable_irq(dpu_crtc->content_shdld_irq); + disable_irq(dpu_crtc->dec_shdld_irq); + if (dcstate->use_pc) { + disable_irq(aux_dpu_crtc->safety_shdld_irq); + disable_irq(aux_dpu_crtc->content_shdld_irq); + disable_irq(aux_dpu_crtc->dec_shdld_irq); + } + + dpu_crtc_queue_state_event(crtc); + + if (dcstate->use_pc) { + framegen_wait_for_secondary_syncup(dpu_crtc->m_fg); + framegen_wait_for_secondary_syncup(dpu_crtc->s_fg); + + if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->m_fg)) { + framegen_secondary_clear_channel_status(dpu_crtc->m_fg); + DRM_WARN("[CRTC:%d:%s] %s: master FrameGen requests to read empty FIFO\n", + crtc->base.id, crtc->name, __func__); + } + if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->s_fg)) { + framegen_secondary_clear_channel_status(dpu_crtc->s_fg); + DRM_WARN("[CRTC:%d:%s] %s: slave FrameGen requests to read empty FIFO\n", + crtc->base.id, crtc->name, __func__); + } + } else { + framegen_wait_for_secondary_syncup(dpu_crtc->fg); + + if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->fg)) { + framegen_secondary_clear_channel_status(dpu_crtc->fg); + DRM_WARN("[CRTC:%d:%s] %s: FrameGen requests to read empty FIFO\n", + crtc->base.id, crtc->name, __func__); + } + } +} + +static void dpu_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct imx_crtc_state *imx_crtc_state = + to_imx_crtc_state(old_crtc_state); + struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state); + struct drm_display_mode *adjusted_mode = &old_crtc_state->adjusted_mode; + + if (dcstate->use_pc) { + tcon_disable_pc(dpu_crtc->tcon); + + framegen_disable_pixel_link(dpu_crtc->m_fg); + framegen_disable_pixel_link(dpu_crtc->s_fg); + + /* First turn off the master stream, second the slave stream. */ + framegen_disable(dpu_crtc->m_fg); + framegen_disable(dpu_crtc->s_fg); + + framegen_wait_done(dpu_crtc->m_fg, adjusted_mode); + framegen_wait_done(dpu_crtc->s_fg, adjusted_mode); + + framegen_disable_clock(dpu_crtc->stream_id ? + dpu_crtc->aux_fg : dpu_crtc->fg); + } else { + framegen_disable_pixel_link(dpu_crtc->fg); + framegen_disable(dpu_crtc->fg); + framegen_wait_done(dpu_crtc->fg, adjusted_mode); + framegen_disable_clock(dpu_crtc->fg); + } + + drm_crtc_vblank_off(crtc); + + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event && !crtc->state->active) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); +} + +static void dpu_drm_crtc_reset(struct drm_crtc *crtc) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct imx_crtc_state *imx_crtc_state; + struct dpu_crtc_state *state; + + if (crtc->state) { + __drm_atomic_helper_crtc_destroy_state(crtc->state); + + imx_crtc_state = to_imx_crtc_state(crtc->state); + state = to_dpu_crtc_state(imx_crtc_state); + kfree(state->dpu_plane_states); + kfree(state); + crtc->state = NULL; + } + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) { + crtc->state = &state->imx_crtc_state.base; + crtc->state->crtc = crtc; + + state->dpu_plane_states = alloc_dpu_plane_states(dpu_crtc); + if (IS_ERR(state->dpu_plane_states)) + kfree(state); + } +} + +static struct drm_crtc_state * +dpu_drm_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct imx_crtc_state *imx_crtc_state; + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dpu_crtc_state *state, *copy; + + if (WARN_ON(!crtc->state)) + return NULL; + + copy = kzalloc(sizeof(*copy), GFP_KERNEL); + if (!copy) + return NULL; + + copy->dpu_plane_states = alloc_dpu_plane_states(dpu_crtc); + if (IS_ERR(copy->dpu_plane_states)) { + kfree(copy); + return NULL; + } + + __drm_atomic_helper_crtc_duplicate_state(crtc, + ©->imx_crtc_state.base); + imx_crtc_state = to_imx_crtc_state(crtc->state); + state = to_dpu_crtc_state(imx_crtc_state); + copy->use_pc = state->use_pc; + + return ©->imx_crtc_state.base; +} + +static void dpu_drm_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(state); + struct dpu_crtc_state *dcstate; + + if (state) { + __drm_atomic_helper_crtc_destroy_state(state); + dcstate = to_dpu_crtc_state(imx_crtc_state); + kfree(dcstate->dpu_plane_states); + kfree(dcstate); + } +} + +static int dpu_enable_vblank(struct drm_crtc *crtc) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + + enable_irq(dpu_crtc->vbl_irq); + + return 0; +} + +static void dpu_disable_vblank(struct drm_crtc *crtc) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + + disable_irq_nosync(dpu_crtc->vbl_irq); +} + +static const struct drm_crtc_funcs dpu_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + .page_flip = drm_atomic_helper_page_flip, + .reset = dpu_drm_crtc_reset, + .atomic_duplicate_state = dpu_drm_crtc_duplicate_state, + .atomic_destroy_state = dpu_drm_crtc_destroy_state, + .enable_vblank = dpu_enable_vblank, + .disable_vblank = dpu_disable_vblank, +}; + +static irqreturn_t dpu_vbl_irq_handler(int irq, void *dev_id) +{ + struct dpu_crtc *dpu_crtc = dev_id; + struct drm_crtc *crtc = &dpu_crtc->base; + unsigned long flags; + + drm_crtc_handle_vblank(crtc); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (dpu_crtc->event) { + drm_crtc_send_vblank_event(crtc, dpu_crtc->event); + dpu_crtc->event = NULL; + drm_crtc_vblank_put(crtc); + } + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t dpu_safety_shdld_irq_handler(int irq, void *dev_id) +{ + struct dpu_crtc *dpu_crtc = dev_id; + + complete(&dpu_crtc->safety_shdld_done); + + return IRQ_HANDLED; +} + +static irqreturn_t dpu_content_shdld_irq_handler(int irq, void *dev_id) +{ + struct dpu_crtc *dpu_crtc = dev_id; + + complete(&dpu_crtc->content_shdld_done); + + return IRQ_HANDLED; +} + +static irqreturn_t dpu_dec_shdld_irq_handler(int irq, void *dev_id) +{ + struct dpu_crtc *dpu_crtc = dev_id; + + complete(&dpu_crtc->dec_shdld_done); + + return IRQ_HANDLED; +} + +static int dpu_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +{ + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + struct dpu_plane_state *dpstate; + struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); + struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state); + struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct videomode vm; + unsigned long encoder_type = DRM_MODE_ENCODER_NONE; + u32 encoder_mask; + int i = 0; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + encoder_mask = 1 << drm_encoder_index(encoder); + + if (!(crtc_state->encoder_mask & encoder_mask)) + continue; + + encoder_type = encoder->encoder_type; + } + + if (crtc_state->enable && dcstate->use_pc) { + if (encoder_type != DRM_MODE_ENCODER_TMDS) { + DRM_DEBUG_KMS("[CRTC:%d:%s] enc type %lu doesn't support pc\n", + crtc->base.id, crtc->name, encoder_type); + return -EINVAL; + } + + drm_display_mode_to_videomode(mode, &vm); + if ((vm.hactive % 2) || (vm.hfront_porch % 2) || + (vm.hsync_len % 2) || (vm.hback_porch % 2)) { + DRM_DEBUG_KMS("[CRTC:%d:%s] video mode is invalid\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + } + + /* + * cache the plane states so that the planes can be disabled in + * ->atomic_begin. + */ + drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) { + plane_state = + drm_atomic_get_plane_state(crtc_state->state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + + dpstate = to_dpu_plane_state(plane_state); + dcstate->dpu_plane_states[i++] = dpstate; + } + + return 0; +} + +static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct imx_crtc_state *old_imx_crtc_state = + to_imx_crtc_state(old_crtc_state); + struct dpu_crtc_state *old_dcstate = + to_dpu_crtc_state(old_imx_crtc_state); + int i; + + /* + * Disable all planes' resources in SHADOW only. + * Whether any of them would be disabled or kept running depends + * on new plane states' commit. + */ + for (i = 0; i < dpu_crtc->hw_plane_num; i++) { + struct dpu_plane_state *old_dpstate; + struct drm_plane_state *plane_state; + struct dpu_plane *dplane; + struct drm_plane *plane; + struct dpu_plane_res *res; + struct dpu_fetchunit *fu; + struct dpu_fetchunit *fe = NULL; + struct dpu_hscaler *hs = NULL; + struct dpu_vscaler *vs = NULL; + struct dpu_layerblend *lb; + struct dpu_extdst *ed; + extdst_src_sel_t ed_src; + dpu_block_id_t blend, source; + unsigned int stream_id; + int lb_id; + bool crtc_disabling_on_primary; + bool release_aux_source; + + old_dpstate = old_dcstate->dpu_plane_states[i]; + if (!old_dpstate) + continue; + + plane_state = &old_dpstate->base; + dplane = to_dpu_plane(plane_state->plane); + res = &dplane->grp->res; + + release_aux_source = false; +again: + crtc_disabling_on_primary = false; + + if (old_dcstate->use_pc) { + if (release_aux_source) { + source = old_dpstate->aux_source; + blend = old_dpstate->aux_blend; + stream_id = 1; + } else { + source = old_dpstate->source; + blend = old_dpstate->blend; + stream_id = old_dpstate->left_src_w ? 0 : 1; + } + } else { + source = old_dpstate->source; + blend = old_dpstate->blend; + stream_id = dplane->stream_id; + } + + fu = source_to_fu(res, source); + if (!fu) + return; + + lb_id = blend_to_id(blend); + if (lb_id < 0) + return; + + lb = res->lb[lb_id]; + + layerblend_pixengcfg_clken(lb, CLKEN__DISABLE); + if (fetchunit_is_fetchdecode(fu)) { + fe = fetchdecode_get_fetcheco(fu); + hs = fetchdecode_get_hscaler(fu); + vs = fetchdecode_get_vscaler(fu); + hscaler_pixengcfg_clken(hs, CLKEN__DISABLE); + vscaler_pixengcfg_clken(vs, CLKEN__DISABLE); + hscaler_mode(hs, SCALER_NEUTRAL); + vscaler_mode(vs, SCALER_NEUTRAL); + } + if ((!old_dcstate->use_pc && old_dpstate->is_top) || + (old_dcstate->use_pc && + ((!stream_id && old_dpstate->is_left_top) || + (stream_id && old_dpstate->is_right_top)))) { + ed = res->ed[stream_id]; + ed_src = stream_id ? + ED_SRC_CONSTFRAME1 : ED_SRC_CONSTFRAME0; + extdst_pixengcfg_src_sel(ed, ed_src); + } + + plane = old_dpstate->base.plane; + if (!crtc->state->enable && + plane->type == DRM_PLANE_TYPE_PRIMARY) + crtc_disabling_on_primary = true; + + if (crtc_disabling_on_primary && old_dpstate->use_prefetch) { + fu->ops->pin_off(fu); + if (fetchunit_is_fetchdecode(fu) && + fe->ops->is_enabled(fe)) + fe->ops->pin_off(fe); + } else { + fu->ops->disable_src_buf(fu); + fu->ops->unpin_off(fu); + if (fetchunit_is_fetchdecode(fu)) { + fetchdecode_pixengcfg_dynamic_src_sel(fu, + FD_SRC_DISABLE); + fe->ops->disable_src_buf(fe); + fe->ops->unpin_off(fe); + } + } + + if (old_dpstate->need_aux_source && !release_aux_source) { + release_aux_source = true; + goto again; + } + } +} + +static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc), *aux_dpu_crtc = NULL; + struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state); + struct imx_crtc_state *old_imx_crtc_state = + to_imx_crtc_state(old_crtc_state); + struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state); + struct dpu_crtc_state *old_dcstate = + to_dpu_crtc_state(old_imx_crtc_state); + struct dpu_plane *dplane = to_dpu_plane(crtc->primary); + struct dpu_plane_res *res = &dplane->grp->res; + struct dpu_extdst *ed = res->ed[dplane->stream_id], *aux_ed; + struct completion *shdld_done; + struct completion *m_content_shdld_done = NULL; + struct completion *s_content_shdld_done = NULL; + unsigned long ret; + bool need_modeset = drm_atomic_crtc_needs_modeset(crtc->state); + int i; + + if (!crtc->state->active && !old_crtc_state->active) + return; + + if (dcstate->use_pc) { + aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc); + + if (dpu_crtc->aux_is_master) { + m_content_shdld_done = &aux_dpu_crtc->content_shdld_done; + s_content_shdld_done = &dpu_crtc->content_shdld_done; + } else { + m_content_shdld_done = &dpu_crtc->content_shdld_done; + s_content_shdld_done = &aux_dpu_crtc->content_shdld_done; + } + } + + if (!need_modeset) { + enable_irq(dpu_crtc->content_shdld_irq); + if (dcstate->use_pc) + enable_irq(aux_dpu_crtc->content_shdld_irq); + + if (dcstate->use_pc) { + if (extdst_is_master(ed)) { + extdst_pixengcfg_sync_trigger(ed); + } else { + aux_ed = dpu_aux_ed_peek(ed); + extdst_pixengcfg_sync_trigger(aux_ed); + } + } else { + extdst_pixengcfg_sync_trigger(ed); + } + + if (dcstate->use_pc) { + shdld_done = m_content_shdld_done; + ret = wait_for_completion_timeout(shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for master content shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + + shdld_done = s_content_shdld_done; + ret = wait_for_completion_timeout(shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for slave content shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + } else { + shdld_done = &dpu_crtc->content_shdld_done; + ret = wait_for_completion_timeout(shdld_done, HZ); + if (ret == 0) + DRM_WARN("[CRTC:%d:%s] %s: wait for content shdld done timeout\n", + crtc->base.id, crtc->name, __func__); + } + + disable_irq(dpu_crtc->content_shdld_irq); + if (dcstate->use_pc) + disable_irq(aux_dpu_crtc->content_shdld_irq); + + if (dcstate->use_pc) { + if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->m_fg)) { + framegen_secondary_clear_channel_status(dpu_crtc->m_fg); + DRM_WARN("[CRTC:%d:%s] %s: master FrameGen requests to read empty FIFO\n", + crtc->base.id, crtc->name, __func__); + } + if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->s_fg)) { + framegen_secondary_clear_channel_status(dpu_crtc->s_fg); + DRM_WARN("[CRTC:%d:%s] %s: slave FrameGen requests to read empty FIFO\n", + crtc->base.id, crtc->name, __func__); + } + } else { + if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->fg)) { + framegen_secondary_clear_channel_status(dpu_crtc->fg); + DRM_WARN("[CRTC:%d:%s] %s: FrameGen requests to read empty FIFO\n", + crtc->base.id, crtc->name, __func__); + } + } + + dpu_crtc_queue_state_event(crtc); + } else if (!crtc->state->active) { + if (old_dcstate->use_pc) { + if (extdst_is_master(ed)) { + extdst_pixengcfg_sync_trigger(ed); + } else { + aux_ed = dpu_aux_ed_peek(ed); + extdst_pixengcfg_sync_trigger(aux_ed); + } + } else { + extdst_pixengcfg_sync_trigger(ed); + } + } + + for (i = 0; i < dpu_crtc->hw_plane_num; i++) { + struct dpu_plane_state *old_dpstate; + struct dpu_fetchunit *fu; + struct dpu_fetchunit *fe; + struct dpu_hscaler *hs; + struct dpu_vscaler *vs; + dpu_block_id_t source; + bool aux_source_disable; + + old_dpstate = old_dcstate->dpu_plane_states[i]; + if (!old_dpstate) + continue; + + aux_source_disable = false; +again: + source = aux_source_disable ? + old_dpstate->aux_source : old_dpstate->source; + fu = source_to_fu(res, old_dpstate->source); + if (!fu) + return; + + if (!fu->ops->is_enabled(fu) || fu->ops->is_pinned_off(fu)) + fu->ops->set_stream_id(fu, DPU_PLANE_SRC_DISABLED); + + if (!fetchunit_is_fetchdecode(fu)) + continue; + + fe = fetchdecode_get_fetcheco(fu); + if (!fe->ops->is_enabled(fe) || fe->ops->is_pinned_off(fe)) + fe->ops->set_stream_id(fe, DPU_PLANE_SRC_DISABLED); + + hs = fetchdecode_get_hscaler(fu); + if (!hscaler_is_enabled(hs)) + hscaler_set_stream_id(hs, DPU_PLANE_SRC_DISABLED); + + vs = fetchdecode_get_vscaler(fu); + if (!vscaler_is_enabled(vs)) + vscaler_set_stream_id(vs, DPU_PLANE_SRC_DISABLED); + + if (old_dpstate->need_aux_source && !aux_source_disable) { + aux_source_disable = true; + goto again; + } + } +} + +static void dpu_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dpu_crtc *aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc); + struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state); + struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state); + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct dpu_plane *dplane = to_dpu_plane(crtc->primary); + struct dpu_plane_res *res = &dplane->grp->res; + struct dpu_constframe *pa_cf, *sa_cf; + struct dpu_disengcfg *dec; + struct dpu_extdst *ed, *plane_ed; + struct dpu_framegen *fg; + struct dpu_tcon *tcon; + struct dpu_store *st; + struct drm_encoder *encoder; + unsigned long encoder_type = DRM_MODE_ENCODER_NONE; + unsigned int stream_id; + int crtc_hdisplay = dcstate->use_pc ? + (mode->crtc_hdisplay >> 1) : mode->crtc_hdisplay; + extdst_src_sel_t ed_src; + bool cfg_aux_pipe = false; + + DRM_DEBUG_KMS("[CRTC:%d:%s] %s: mode->hdisplay: %d\n", + crtc->base.id, crtc->name, __func__, mode->hdisplay); + DRM_DEBUG_KMS("[CRTC:%d:%s] %s: mode->vdisplay: %d\n", + crtc->base.id, crtc->name, __func__, mode->vdisplay); + DRM_DEBUG_KMS("[CRTC:%d:%s] %s: mode->clock: %dKHz\n", + crtc->base.id, crtc->name, __func__, mode->clock); + DRM_DEBUG_KMS("[CRTC:%d:%s] %s: mode->vrefresh: %dHz\n", + crtc->base.id, crtc->name, __func__, + drm_mode_vrefresh(mode)); + if (dcstate->use_pc) + DRM_DEBUG_KMS("[CRTC:%d:%s] %s: use pixel combiner\n", + crtc->base.id, crtc->name, __func__); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + encoder_type = encoder->encoder_type; + break; + } + } + +again: + if (cfg_aux_pipe) { + pa_cf = dpu_crtc->aux_pa_cf; + sa_cf = dpu_crtc->aux_sa_cf; + dec = dpu_crtc->aux_dec; + ed = dpu_crtc->aux_ed; + fg = dpu_crtc->aux_fg; + tcon = dpu_crtc->aux_tcon; + st = aux_dpu_crtc->st; + stream_id = dpu_crtc->stream_id ^ 1; + } else { + pa_cf = dpu_crtc->pa_cf; + sa_cf = dpu_crtc->sa_cf; + dec = dpu_crtc->dec; + ed = dpu_crtc->ed; + fg = dpu_crtc->fg; + tcon = dpu_crtc->tcon; + st = dpu_crtc->st; + stream_id = dpu_crtc->stream_id; + } + + if (dcstate->use_pc) { + store_pixengcfg_syncmode_fixup(st, true); + framegen_syncmode_fixup(fg, + framegen_is_master(fg) ? false : true); + framegen_syncmode(fg, framegen_is_master(fg) ? + FGSYNCMODE__MASTER : FGSYNCMODE__SLAVE_ONCE); + } else { + store_pixengcfg_syncmode_fixup(st, false); + framegen_syncmode_fixup(fg, false); + framegen_syncmode(fg, FGSYNCMODE__OFF); + } + + framegen_cfg_videomode(fg, mode, dcstate->use_pc, encoder_type); + framegen_displaymode(fg, FGDM__SEC_ON_TOP); + + framegen_panic_displaymode(fg, FGDM__TEST); + + tcon_cfg_videomode(tcon, mode, dcstate->use_pc); + tcon_set_fmt(tcon, imx_crtc_state->bus_format); + tcon_configure_pc(tcon, stream_id, mode->crtc_hdisplay, + dcstate->use_pc ? PC_COMBINE : PC_BYPASS, 0); + + constframe_framedimensions(pa_cf, crtc_hdisplay, mode->crtc_vdisplay); + constframe_framedimensions(sa_cf, crtc_hdisplay, mode->crtc_vdisplay); + constframe_constantcolor(sa_cf, 0, 0, 0, 0); + + ed_src = stream_id ? ED_SRC_CONSTFRAME5 : ED_SRC_CONSTFRAME4; + extdst_pixengcfg_src_sel(ed, ed_src); + + plane_ed = res->ed[stream_id]; + ed_src = stream_id ? ED_SRC_CONSTFRAME1 : ED_SRC_CONSTFRAME0; + extdst_pixengcfg_src_sel(plane_ed, ed_src); + + if (dcstate->use_pc && !cfg_aux_pipe) { + cfg_aux_pipe = true; + goto again; + } +} + +static const struct drm_crtc_helper_funcs dpu_helper_funcs = { + .mode_set_nofb = dpu_crtc_mode_set_nofb, + .atomic_check = dpu_crtc_atomic_check, + .atomic_begin = dpu_crtc_atomic_begin, + .atomic_flush = dpu_crtc_atomic_flush, + .atomic_enable = dpu_crtc_atomic_enable, + .atomic_disable = dpu_crtc_atomic_disable, +}; + +static void dpu_crtc_put_resources(struct dpu_crtc *dpu_crtc) +{ + if (!IS_ERR_OR_NULL(dpu_crtc->pa_cf)) + dpu_cf_put(dpu_crtc->pa_cf); + if (!IS_ERR_OR_NULL(dpu_crtc->sa_cf)) + dpu_cf_put(dpu_crtc->sa_cf); + if (!IS_ERR_OR_NULL(dpu_crtc->dec)) + dpu_dec_put(dpu_crtc->dec); + if (!IS_ERR_OR_NULL(dpu_crtc->ed)) + dpu_ed_put(dpu_crtc->ed); + if (!IS_ERR_OR_NULL(dpu_crtc->fg)) + dpu_fg_put(dpu_crtc->fg); + if (!IS_ERR_OR_NULL(dpu_crtc->tcon)) + dpu_tcon_put(dpu_crtc->tcon); +} + +static int dpu_crtc_get_resources(struct dpu_crtc *dpu_crtc) +{ + struct dpu_soc *dpu = dev_get_drvdata(dpu_crtc->dev->parent); + unsigned int stream_id = dpu_crtc->stream_id; + int ret; + + dpu_crtc->pa_cf = dpu_cf_get(dpu, stream_id + 4); + if (IS_ERR(dpu_crtc->pa_cf)) { + ret = PTR_ERR(dpu_crtc->pa_cf); + goto err_out; + } + dpu_crtc->aux_pa_cf = dpu_aux_cf_peek(dpu_crtc->pa_cf); + + dpu_crtc->sa_cf = dpu_cf_get(dpu, stream_id); + if (IS_ERR(dpu_crtc->sa_cf)) { + ret = PTR_ERR(dpu_crtc->sa_cf); + goto err_out; + } + dpu_crtc->aux_sa_cf = dpu_aux_cf_peek(dpu_crtc->sa_cf); + + dpu_crtc->dec = dpu_dec_get(dpu, stream_id); + if (IS_ERR(dpu_crtc->dec)) { + ret = PTR_ERR(dpu_crtc->dec); + goto err_out; + } + dpu_crtc->aux_dec = dpu_aux_dec_peek(dpu_crtc->dec); + + dpu_crtc->ed = dpu_ed_get(dpu, stream_id + 4); + if (IS_ERR(dpu_crtc->ed)) { + ret = PTR_ERR(dpu_crtc->ed); + goto err_out; + } + dpu_crtc->aux_ed = dpu_aux_ed_peek(dpu_crtc->ed); + + dpu_crtc->fg = dpu_fg_get(dpu, stream_id); + if (IS_ERR(dpu_crtc->fg)) { + ret = PTR_ERR(dpu_crtc->fg); + goto err_out; + } + dpu_crtc->aux_fg = dpu_aux_fg_peek(dpu_crtc->fg); + + dpu_crtc->tcon = dpu_tcon_get(dpu, stream_id); + if (IS_ERR(dpu_crtc->tcon)) { + ret = PTR_ERR(dpu_crtc->tcon); + goto err_out; + } + dpu_crtc->aux_tcon = dpu_aux_tcon_peek(dpu_crtc->tcon); + + if (dpu_crtc->aux_is_master) { + dpu_crtc->m_pa_cf = dpu_crtc->aux_pa_cf; + dpu_crtc->m_sa_cf = dpu_crtc->aux_sa_cf; + dpu_crtc->m_dec = dpu_crtc->aux_dec; + dpu_crtc->m_ed = dpu_crtc->aux_ed; + dpu_crtc->m_fg = dpu_crtc->aux_fg; + dpu_crtc->m_tcon = dpu_crtc->aux_tcon; + + dpu_crtc->s_pa_cf = dpu_crtc->pa_cf; + dpu_crtc->s_sa_cf = dpu_crtc->sa_cf; + dpu_crtc->s_dec = dpu_crtc->dec; + dpu_crtc->s_ed = dpu_crtc->ed; + dpu_crtc->s_fg = dpu_crtc->fg; + dpu_crtc->s_tcon = dpu_crtc->tcon; + } else { + dpu_crtc->m_pa_cf = dpu_crtc->pa_cf; + dpu_crtc->m_sa_cf = dpu_crtc->sa_cf; + dpu_crtc->m_dec = dpu_crtc->dec; + dpu_crtc->m_ed = dpu_crtc->ed; + dpu_crtc->m_fg = dpu_crtc->fg; + dpu_crtc->m_tcon = dpu_crtc->tcon; + + dpu_crtc->s_pa_cf = dpu_crtc->aux_pa_cf; + dpu_crtc->s_sa_cf = dpu_crtc->aux_sa_cf; + dpu_crtc->s_dec = dpu_crtc->aux_dec; + dpu_crtc->s_ed = dpu_crtc->aux_ed; + dpu_crtc->s_fg = dpu_crtc->aux_fg; + dpu_crtc->s_tcon = dpu_crtc->aux_tcon; + } + + return 0; +err_out: + dpu_crtc_put_resources(dpu_crtc); + + return ret; +} + +static int dpu_crtc_init(struct dpu_crtc *dpu_crtc, + struct dpu_client_platformdata *pdata, struct drm_device *drm) +{ + struct dpu_soc *dpu = dev_get_drvdata(dpu_crtc->dev->parent); + struct device *dev = dpu_crtc->dev; + struct drm_crtc *crtc = &dpu_crtc->base; + struct dpu_plane_grp *plane_grp = pdata->plane_grp; + unsigned int stream_id = pdata->stream_id; + int i, ret; + + init_completion(&dpu_crtc->safety_shdld_done); + init_completion(&dpu_crtc->content_shdld_done); + init_completion(&dpu_crtc->dec_shdld_done); + + dpu_crtc->stream_id = stream_id; + dpu_crtc->crtc_grp_id = pdata->di_grp_id; + dpu_crtc->hw_plane_num = plane_grp->hw_plane_num; + dpu_crtc->syncmode_min_prate = dpu_get_syncmode_min_prate(dpu); + dpu_crtc->singlemode_max_width = dpu_get_singlemode_max_width(dpu); + dpu_crtc->master_stream_id = dpu_get_master_stream_id(dpu); + dpu_crtc->aux_is_master = !(dpu_crtc->master_stream_id == stream_id); + dpu_crtc->st = pdata->st9; + + dpu_crtc->plane = devm_kcalloc(dev, dpu_crtc->hw_plane_num, + sizeof(*dpu_crtc->plane), GFP_KERNEL); + if (!dpu_crtc->plane) + return -ENOMEM; + + ret = dpu_crtc_get_resources(dpu_crtc); + if (ret) { + DRM_DEV_ERROR(dev, "getting resources failed with %d.\n", ret); + return ret; + } + + plane_grp->res.fg[stream_id] = dpu_crtc->fg; + dpu_crtc->plane[0] = dpu_plane_create(drm, 0, stream_id, plane_grp, + DRM_PLANE_TYPE_PRIMARY); + if (IS_ERR(dpu_crtc->plane[0])) { + ret = PTR_ERR(dpu_crtc->plane[0]); + DRM_DEV_ERROR(dev, + "initializing plane0 failed with %d.\n", ret); + goto err_put_resources; + } + + crtc->port = pdata->of_node; + drm_crtc_helper_add(crtc, &dpu_helper_funcs); + ret = drm_crtc_init_with_planes(drm, crtc, &dpu_crtc->plane[0]->base, NULL, + &dpu_crtc_funcs, NULL); + if (ret) { + DRM_DEV_ERROR(dev, "adding crtc failed with %d.\n", ret); + goto err_put_resources; + } + + for (i = 1; i < dpu_crtc->hw_plane_num; i++) { + dpu_crtc->plane[i] = dpu_plane_create(drm, + drm_crtc_mask(&dpu_crtc->base), + stream_id, plane_grp, + DRM_PLANE_TYPE_OVERLAY); + if (IS_ERR(dpu_crtc->plane[i])) { + ret = PTR_ERR(dpu_crtc->plane[i]); + DRM_DEV_ERROR(dev, + "initializing plane%d failed with %d.\n", + i, ret); + goto err_put_resources; + } + } + + dpu_crtc->vbl_irq = dpu_map_irq(dpu, stream_id ? + IRQ_DISENGCFG_FRAMECOMPLETE1 : + IRQ_DISENGCFG_FRAMECOMPLETE0); + irq_set_status_flags(dpu_crtc->vbl_irq, IRQ_DISABLE_UNLAZY); + ret = devm_request_irq(dev, dpu_crtc->vbl_irq, dpu_vbl_irq_handler, 0, + "imx_drm", dpu_crtc); + if (ret < 0) { + DRM_DEV_ERROR(dev, "vblank irq request failed with %d.\n", ret); + goto err_put_resources; + } + disable_irq(dpu_crtc->vbl_irq); + + dpu_crtc->safety_shdld_irq = dpu_map_irq(dpu, stream_id ? + IRQ_EXTDST5_SHDLOAD : IRQ_EXTDST4_SHDLOAD); + irq_set_status_flags(dpu_crtc->safety_shdld_irq, IRQ_DISABLE_UNLAZY); + ret = devm_request_irq(dev, dpu_crtc->safety_shdld_irq, + dpu_safety_shdld_irq_handler, 0, "imx_drm", + dpu_crtc); + if (ret < 0) { + DRM_DEV_ERROR(dev, + "safety shadow load irq request failed with %d.\n", + ret); + goto err_put_resources; + } + disable_irq(dpu_crtc->safety_shdld_irq); + + dpu_crtc->content_shdld_irq = dpu_map_irq(dpu, stream_id ? + IRQ_EXTDST1_SHDLOAD : IRQ_EXTDST0_SHDLOAD); + irq_set_status_flags(dpu_crtc->content_shdld_irq, IRQ_DISABLE_UNLAZY); + ret = devm_request_irq(dev, dpu_crtc->content_shdld_irq, + dpu_content_shdld_irq_handler, 0, "imx_drm", + dpu_crtc); + if (ret < 0) { + DRM_DEV_ERROR(dev, + "content shadow load irq request failed with %d.\n", + ret); + goto err_put_resources; + } + disable_irq(dpu_crtc->content_shdld_irq); + + dpu_crtc->dec_shdld_irq = dpu_map_irq(dpu, stream_id ? + IRQ_DISENGCFG_SHDLOAD1 : IRQ_DISENGCFG_SHDLOAD0); + irq_set_status_flags(dpu_crtc->dec_shdld_irq, IRQ_DISABLE_UNLAZY); + ret = devm_request_irq(dev, dpu_crtc->dec_shdld_irq, + dpu_dec_shdld_irq_handler, 0, "imx_drm", + dpu_crtc); + if (ret < 0) { + DRM_DEV_ERROR(dev, + "DEC shadow load irq request failed with %d.\n", + ret); + goto err_put_resources; + } + disable_irq(dpu_crtc->dec_shdld_irq); + + return 0; + +err_put_resources: + dpu_crtc_put_resources(dpu_crtc); + + return ret; +} + +static int dpu_crtc_bind(struct device *dev, struct device *master, void *data) +{ + struct dpu_client_platformdata *pdata = dev->platform_data; + struct drm_device *drm = data; + struct dpu_crtc *dpu_crtc; + int ret; + + dpu_crtc = devm_kzalloc(dev, sizeof(*dpu_crtc), GFP_KERNEL); + if (!dpu_crtc) + return -ENOMEM; + + dpu_crtc->dev = dev; + + ret = dpu_crtc_init(dpu_crtc, pdata, drm); + if (ret) + return ret; + + if (!drm->mode_config.funcs) + drm->mode_config.funcs = &dpu_drm_mode_config_funcs; + + dev_set_drvdata(dev, dpu_crtc); + + return 0; +} + +static void dpu_crtc_unbind(struct device *dev, struct device *master, + void *data) +{ + struct dpu_crtc *dpu_crtc = dev_get_drvdata(dev); + + dpu_crtc_put_resources(dpu_crtc); + + /* make sure the crtc exists, and then cleanup */ + if (dpu_crtc->base.dev) + drm_crtc_cleanup(&dpu_crtc->base); +} + +static const struct component_ops dpu_crtc_ops = { + .bind = dpu_crtc_bind, + .unbind = dpu_crtc_unbind, +}; + +static int dpu_crtc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + if (!dev->platform_data) + return -EINVAL; + + return component_add(dev, &dpu_crtc_ops); +} + +static int dpu_crtc_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dpu_crtc_ops); + return 0; +} + +static struct platform_driver dpu_crtc_driver = { + .driver = { + .name = "imx-dpu-crtc", + }, + .probe = dpu_crtc_probe, + .remove = dpu_crtc_remove, +}; +module_platform_driver(dpu_crtc_driver); + +MODULE_AUTHOR("NXP Semiconductor"); +MODULE_DESCRIPTION("i.MX DPU CRTC"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:imx-dpu-crtc"); diff --git a/drivers/gpu/drm/imx/dpu/dpu-crtc.h b/drivers/gpu/drm/imx/dpu/dpu-crtc.h new file mode 100644 index 000000000000..ee6dababa0d8 --- /dev/null +++ b/drivers/gpu/drm/imx/dpu/dpu-crtc.h @@ -0,0 +1,94 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _DPU_CRTC_H_ +#define _DPU_CRTC_H_ + +#include <drm/drm_vblank.h> +#include <video/dpu.h> +#include "dpu-plane.h" +#include "../imx-drm.h" + +struct dpu_crtc { + struct device *dev; + struct drm_crtc base; + struct imx_drm_crtc *imx_crtc; + struct dpu_constframe *pa_cf; + struct dpu_constframe *sa_cf; + struct dpu_disengcfg *dec; + struct dpu_extdst *ed; + struct dpu_framegen *fg; + struct dpu_tcon *tcon; + struct dpu_store *st; + struct dpu_constframe *aux_pa_cf; + struct dpu_constframe *aux_sa_cf; + struct dpu_disengcfg *aux_dec; + struct dpu_extdst *aux_ed; + struct dpu_framegen *aux_fg; + struct dpu_tcon *aux_tcon; + /* master */ + struct dpu_constframe *m_pa_cf; + struct dpu_constframe *m_sa_cf; + struct dpu_disengcfg *m_dec; + struct dpu_extdst *m_ed; + struct dpu_framegen *m_fg; + struct dpu_tcon *m_tcon; + /* slave */ + struct dpu_constframe *s_pa_cf; + struct dpu_constframe *s_sa_cf; + struct dpu_disengcfg *s_dec; + struct dpu_extdst *s_ed; + struct dpu_framegen *s_fg; + struct dpu_tcon *s_tcon; + struct dpu_plane **plane; + unsigned int hw_plane_num; + unsigned int stream_id; + unsigned int crtc_grp_id; + unsigned int syncmode_min_prate; + unsigned int singlemode_max_width; + unsigned int master_stream_id; + int vbl_irq; + int safety_shdld_irq; + int content_shdld_irq; + int dec_shdld_irq; + + bool aux_is_master; + + struct completion safety_shdld_done; + struct completion content_shdld_done; + struct completion dec_shdld_done; + + struct drm_pending_vblank_event *event; +}; + +struct dpu_crtc_state { + struct imx_crtc_state imx_crtc_state; + struct dpu_plane_state **dpu_plane_states; + bool use_pc; +}; + +static inline struct dpu_crtc_state *to_dpu_crtc_state(struct imx_crtc_state *s) +{ + return container_of(s, struct dpu_crtc_state, imx_crtc_state); +} + +static inline struct dpu_crtc *to_dpu_crtc(struct drm_crtc *crtc) +{ + return container_of(crtc, struct dpu_crtc, base); +} + +struct dpu_plane_state ** +crtc_state_get_dpu_plane_states(struct drm_crtc_state *state); + +#endif diff --git a/drivers/gpu/drm/imx/dpu/dpu-kms.c b/drivers/gpu/drm/imx/dpu/dpu-kms.c new file mode 100644 index 000000000000..ff0e94236553 --- /dev/null +++ b/drivers/gpu/drm/imx/dpu/dpu-kms.c @@ -0,0 +1,726 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <linux/sort.h> +#include <video/dpu.h> +#include "dpu-crtc.h" +#include "dpu-plane.h" +#include "../imx-drm.h" + +static struct drm_plane_state ** +dpu_atomic_alloc_tmp_planes_per_crtc(struct drm_device *dev) +{ + int total_planes = dev->mode_config.num_total_plane; + struct drm_plane_state **states; + + states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL); + if (!states) + return ERR_PTR(-ENOMEM); + + return states; +} + +static int zpos_cmp(const void *a, const void *b) +{ + const struct drm_plane_state *sa = *(struct drm_plane_state **)a; + const struct drm_plane_state *sb = *(struct drm_plane_state **)b; + + return sa->normalized_zpos - sb->normalized_zpos; +} + +static int dpu_atomic_sort_planes_per_crtc(struct drm_crtc_state *crtc_state, + struct drm_plane_state **states) +{ + struct drm_atomic_state *state = crtc_state->state; + struct drm_device *dev = state->dev; + struct drm_plane *plane; + int n = 0; + + drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) { + struct drm_plane_state *plane_state = + drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + states[n++] = plane_state; + } + + sort(states, n, sizeof(*states), zpos_cmp, NULL); + + return n; +} + +static void +dpu_atomic_compute_plane_lrx_per_crtc(struct drm_crtc_state *crtc_state, + struct drm_plane_state **states, int n) +{ + struct dpu_plane_state *dpstate; + struct drm_plane_state *plane_state; + int i; + int half_hdisplay = crtc_state->adjusted_mode.hdisplay >> 1; + bool lo, ro, bo; + + /* compute left/right_crtc_x if pixel combiner is needed */ + for (i = 0; i < n; i++) { + plane_state = states[i]; + dpstate = to_dpu_plane_state(plane_state); + + lo = dpstate->left_src_w && !dpstate->right_src_w; + ro = !dpstate->left_src_w && dpstate->right_src_w; + bo = dpstate->left_src_w && dpstate->right_src_w; + + if (lo || bo) { + dpstate->left_crtc_x = plane_state->crtc_x; + dpstate->right_crtc_x = 0; + } else if (ro) { + dpstate->left_crtc_x = 0; + dpstate->right_crtc_x = + plane_state->crtc_x - half_hdisplay; + } + } +} + +static void +dpu_atomic_set_top_plane_per_crtc(struct drm_plane_state **states, int n, + bool use_pc) +{ + struct dpu_plane_state *dpstate; + bool found_l_top = false, found_r_top = false; + int i; + + for (i = n - 1; i >= 0; i--) { + dpstate = to_dpu_plane_state(states[i]); + if (use_pc) { + if (dpstate->left_src_w && !found_l_top) { + dpstate->is_left_top = true; + found_l_top = true; + } else { + dpstate->is_left_top = false; + } + + if (dpstate->right_src_w && !found_r_top) { + dpstate->is_right_top = true; + found_r_top = true; + } else { + dpstate->is_right_top = false; + } + } else { + dpstate->is_top = (i == (n - 1)) ? true : false; + } + } +} + +static int +dpu_atomic_assign_plane_source_per_crtc(struct drm_plane_state **states, + int n, bool use_pc) +{ + struct dpu_plane_state *dpstate; + struct dpu_plane *dplane; + struct dpu_plane_grp *grp; + struct drm_framebuffer *fb; + struct dpu_fetchunit *fu; + struct dpu_fetchunit *fe; + struct dpu_hscaler *hs; + struct dpu_vscaler *vs; + lb_prim_sel_t stage; + dpu_block_id_t blend; + unsigned int sid, src_sid; + unsigned int num_planes; + int bit; + int i, j, k = 0, m; + int total_asrc_num; + int s0_layer_cnt = 0, s1_layer_cnt = 0; + int s0_n = 0, s1_n = 0; + u32 src_a_mask, cap_mask, fe_mask, hs_mask, vs_mask; + bool need_fetcheco, need_hscaler, need_vscaler; + bool fmt_is_yuv; + bool alloc_aux_source; + + if (use_pc) { + for (i = 0; i < n; i++) { + dpstate = to_dpu_plane_state(states[i]); + + if (dpstate->left_src_w) + s0_n++; + + if (dpstate->right_src_w) + s1_n++; + } + } else { + s0_n = n; + s1_n = n; + } + + /* for active planes only */ + for (i = 0; i < n; i++) { + dpstate = to_dpu_plane_state(states[i]); + dplane = to_dpu_plane(states[i]->plane); + fb = states[i]->fb; + num_planes = fb->format->num_planes; + fmt_is_yuv = drm_format_is_yuv(fb->format->format); + grp = dplane->grp; + alloc_aux_source = false; + + if (use_pc) + sid = dpstate->left_src_w ? 0 : 1; + else + sid = dplane->stream_id; + +again: + if (alloc_aux_source) + sid ^= 1; + + need_fetcheco = (num_planes > 1); + need_hscaler = (states[i]->src_w >> 16 != states[i]->crtc_w); + need_vscaler = (states[i]->src_h >> 16 != states[i]->crtc_h); + + total_asrc_num = 0; + src_a_mask = grp->src_a_mask; + fe_mask = 0; + hs_mask = 0; + vs_mask = 0; + + for_each_set_bit(bit, (unsigned long *)&src_a_mask, 32) + total_asrc_num++; + + /* assign source */ + mutex_lock(&grp->mutex); + for (j = 0; j < total_asrc_num; j++) { + k = ffs(src_a_mask) - 1; + if (k < 0) + return -EINVAL; + + fu = source_to_fu(&grp->res, sources[k]); + if (!fu) + return -EINVAL; + + /* avoid on-the-fly/hot migration */ + src_sid = fu->ops->get_stream_id(fu); + if (src_sid && src_sid != BIT(sid)) + goto next; + + if (fetchunit_is_fetchdecode(fu)) { + cap_mask = fetchdecode_get_vproc_mask(fu); + + if (need_fetcheco) { + fe = fetchdecode_get_fetcheco(fu); + + /* avoid on-the-fly/hot migration */ + src_sid = fu->ops->get_stream_id(fe); + if (src_sid && src_sid != BIT(sid)) + goto next; + + /* fetch unit has the fetcheco cap? */ + if (!dpu_vproc_has_fetcheco_cap(cap_mask)) + goto next; + + fe_mask = + dpu_vproc_get_fetcheco_cap(cap_mask); + + /* fetcheco available? */ + if (grp->src_use_vproc_mask & fe_mask) + goto next; + } + + if (need_hscaler) { + hs = fetchdecode_get_hscaler(fu); + + /* avoid on-the-fly/hot migration */ + src_sid = hscaler_get_stream_id(hs); + if (src_sid && src_sid != BIT(sid)) + goto next; + + /* fetch unit has the hscale cap */ + if (!dpu_vproc_has_hscale_cap(cap_mask)) + goto next; + + hs_mask = + dpu_vproc_get_hscale_cap(cap_mask); + + /* hscaler available? */ + if (grp->src_use_vproc_mask & hs_mask) + goto next; + } + + if (need_vscaler) { + vs = fetchdecode_get_vscaler(fu); + + /* avoid on-the-fly/hot migration */ + src_sid = vscaler_get_stream_id(vs); + if (src_sid && src_sid != BIT(sid)) + goto next; + + /* fetch unit has the vscale cap? */ + if (!dpu_vproc_has_vscale_cap(cap_mask)) + goto next; + + vs_mask = + dpu_vproc_get_vscale_cap(cap_mask); + + /* vscaler available? */ + if (grp->src_use_vproc_mask & vs_mask) + goto next; + } + } else { + if (fmt_is_yuv || need_fetcheco || + need_hscaler || need_vscaler) + goto next; + } + + grp->src_a_mask &= ~BIT(k); + grp->src_use_vproc_mask |= fe_mask | hs_mask | vs_mask; + break; +next: + src_a_mask &= ~BIT(k); + fe_mask = 0; + hs_mask = 0; + vs_mask = 0; + } + mutex_unlock(&grp->mutex); + + if (j == total_asrc_num) + return -EINVAL; + + if (alloc_aux_source) + dpstate->aux_source = sources[k]; + else + dpstate->source = sources[k]; + + /* assign stage and blend */ + if (sid) { + m = grp->hw_plane_num - (s1_n - s1_layer_cnt); + stage = s1_layer_cnt ? stages[m - 1] : cf_stages[sid]; + blend = blends[m]; + + s1_layer_cnt++; + } else { + stage = s0_layer_cnt ? + stages[s0_layer_cnt - 1] : cf_stages[sid]; + blend = blends[s0_layer_cnt]; + + s0_layer_cnt++; + } + + if (alloc_aux_source) { + dpstate->aux_stage = stage; + dpstate->aux_blend = blend; + } else { + dpstate->stage = stage; + dpstate->blend = blend; + } + + if (dpstate->need_aux_source && !alloc_aux_source) { + alloc_aux_source = true; + goto again; + } + } + + return 0; +} + +static void +dpu_atomic_mark_pipe_states_prone_to_put_per_crtc(struct drm_crtc *crtc, + u32 crtc_mask, + struct drm_atomic_state *state, + bool *puts) +{ + struct drm_plane *plane; + struct drm_plane_state *plane_state; + bool found_pstate = false; + int i; + + if ((crtc_mask & drm_crtc_mask(crtc)) == 0) { + for_each_new_plane_in_state(state, plane, plane_state, i) { + if (plane->possible_crtcs & drm_crtc_mask(crtc)) { + found_pstate = true; + break; + } + } + + if (!found_pstate) + puts[drm_crtc_index(crtc)] = true; + } +} + +static void +dpu_atomic_put_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + int index = drm_plane_index(plane); + + plane->funcs->atomic_destroy_state(plane, state->planes[index].state); + state->planes[index].ptr = NULL; + state->planes[index].state = NULL; + + drm_modeset_unlock(&plane->mutex); +} + +static void +dpu_atomic_put_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + int index = drm_crtc_index(crtc); + + crtc->funcs->atomic_destroy_state(crtc, state->crtcs[index].state); + state->crtcs[index].ptr = NULL; + state->crtcs[index].state = NULL; + + drm_modeset_unlock(&crtc->mutex); +} + +static void +dpu_atomic_put_possible_states_per_crtc(struct drm_crtc_state *crtc_state) +{ + struct drm_atomic_state *state = crtc_state->state; + struct drm_crtc *crtc = crtc_state->crtc; + struct drm_crtc_state *old_crtc_state = crtc->state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + struct dpu_plane *dplane = to_dpu_plane(crtc->primary); + struct dpu_plane_state **old_dpstates; + struct dpu_plane_state *old_dpstate, *new_dpstate; + u32 active_mask = 0; + int i; + + old_dpstates = crtc_state_get_dpu_plane_states(old_crtc_state); + if (WARN_ON(!old_dpstates)) + return; + + for (i = 0; i < dplane->grp->hw_plane_num; i++) { + old_dpstate = old_dpstates[i]; + if (!old_dpstate) + continue; + + active_mask |= BIT(i); + + drm_atomic_crtc_state_for_each_plane(plane, crtc_state) { + if (drm_plane_index(plane) != + drm_plane_index(old_dpstate->base.plane)) + continue; + + plane_state = + drm_atomic_get_existing_plane_state(state, + plane); + if (WARN_ON(!plane_state)) + return; + + new_dpstate = to_dpu_plane_state(plane_state); + + active_mask &= ~BIT(i); + + /* + * Should be enough to check the below real HW plane + * resources only. + * Things like vproc resources should be fine. + */ + if (old_dpstate->stage != new_dpstate->stage || + old_dpstate->source != new_dpstate->source || + old_dpstate->blend != new_dpstate->blend || + old_dpstate->aux_stage != new_dpstate->aux_stage || + old_dpstate->aux_source != new_dpstate->aux_source || + old_dpstate->aux_blend != new_dpstate->aux_blend) + return; + } + } + + /* pure software check */ + if (WARN_ON(active_mask)) + return; + + drm_atomic_crtc_state_for_each_plane(plane, crtc_state) + dpu_atomic_put_plane_state(state, plane); + + dpu_atomic_put_crtc_state(state, crtc); +} + +static int dpu_drm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct dpu_plane *dpu_plane; + struct drm_plane_state *plane_state; + struct dpu_plane_state *dpstate; + struct drm_framebuffer *fb; + struct dpu_plane_grp *grp[MAX_DPU_PLANE_GRP]; + int ret, i, grp_id; + int active_plane[MAX_DPU_PLANE_GRP]; + int active_plane_fetcheco[MAX_DPU_PLANE_GRP]; + int active_plane_hscale[MAX_DPU_PLANE_GRP]; + int active_plane_vscale[MAX_DPU_PLANE_GRP]; + int half_hdisplay = 0; + bool pipe_states_prone_to_put[MAX_CRTC]; + bool use_pc[MAX_DPU_PLANE_GRP]; + u32 crtc_mask_in_state = 0; + + ret = drm_atomic_helper_check_modeset(dev, state); + if (ret) { + DRM_DEBUG_KMS("%s: failed to check modeset\n", __func__); + return ret; + } + + for (i = 0; i < MAX_CRTC; i++) + pipe_states_prone_to_put[i] = false; + + for (i = 0; i < MAX_DPU_PLANE_GRP; i++) { + active_plane[i] = 0; + active_plane_fetcheco[i] = 0; + active_plane_hscale[i] = 0; + active_plane_vscale[i] = 0; + use_pc[i] = false; + grp[i] = NULL; + } + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) + crtc_mask_in_state |= drm_crtc_mask(crtc); + + drm_for_each_crtc(crtc, dev) { + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct imx_crtc_state *imx_crtc_state; + struct dpu_crtc_state *dcstate; + bool need_left, need_right, need_aux_source, use_pc_per_crtc; + + use_pc_per_crtc = false; + + dpu_atomic_mark_pipe_states_prone_to_put_per_crtc(crtc, + crtc_mask_in_state, state, + pipe_states_prone_to_put); + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (WARN_ON(IS_ERR(crtc_state))) + return PTR_ERR(crtc_state); + + imx_crtc_state = to_imx_crtc_state(crtc_state); + dcstate = to_dpu_crtc_state(imx_crtc_state); + + if (crtc_state->enable) { + if (use_pc[dpu_crtc->crtc_grp_id]) { + DRM_DEBUG_KMS("other crtc needs pixel combiner\n"); + return -EINVAL; + } + + if (crtc_state->adjusted_mode.clock > + dpu_crtc->syncmode_min_prate || + crtc_state->adjusted_mode.hdisplay > + dpu_crtc->singlemode_max_width) + use_pc_per_crtc = true; + } + + if (use_pc_per_crtc) { + use_pc[dpu_crtc->crtc_grp_id] = true; + half_hdisplay = crtc_state->adjusted_mode.hdisplay >> 1; + } + + dcstate->use_pc = use_pc_per_crtc; + + drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) { + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + DRM_DEBUG_KMS("failed to get plane state\n"); + return PTR_ERR(plane_state); + } + + dpstate = to_dpu_plane_state(plane_state); + fb = plane_state->fb; + dpu_plane = to_dpu_plane(plane); + grp_id = dpu_plane->grp->id; + active_plane[grp_id]++; + + need_left = false; + need_right = false; + need_aux_source = false; + + if (use_pc_per_crtc) { + if (plane_state->crtc_x < half_hdisplay) + need_left = true; + + if ((plane_state->crtc_w + + plane_state->crtc_x) > half_hdisplay) + need_right = true; + + if (need_left && need_right) { + need_aux_source = true; + active_plane[grp_id]++; + } + } + + if (need_left && need_right) { + dpstate->left_crtc_w = half_hdisplay; + dpstate->left_crtc_w -= plane_state->crtc_x; + + dpstate->left_src_w = dpstate->left_crtc_w; + } else if (need_left) { + dpstate->left_crtc_w = plane_state->crtc_w; + dpstate->left_src_w = plane_state->src_w >> 16; + } else { + dpstate->left_crtc_w = 0; + dpstate->left_src_w = 0; + } + + if (need_right && need_left) { + dpstate->right_crtc_w = plane_state->crtc_x + + plane_state->crtc_w; + dpstate->right_crtc_w -= half_hdisplay; + + dpstate->right_src_w = dpstate->right_crtc_w; + } else if (need_right) { + dpstate->right_crtc_w = plane_state->crtc_w; + dpstate->right_src_w = plane_state->src_w >> 16; + } else { + dpstate->right_crtc_w = 0; + dpstate->right_src_w = 0; + } + + if (fb->format->num_planes > 1) { + active_plane_fetcheco[grp_id]++; + if (need_aux_source) + active_plane_fetcheco[grp_id]++; + } + + if (plane_state->src_w >> 16 != plane_state->crtc_w) { + if (use_pc_per_crtc) + return -EINVAL; + + active_plane_hscale[grp_id]++; + } + + if (plane_state->src_h >> 16 != plane_state->crtc_h) { + if (use_pc_per_crtc) + return -EINVAL; + + active_plane_vscale[grp_id]++; + } + + if (grp[grp_id] == NULL) + grp[grp_id] = dpu_plane->grp; + + dpstate->need_aux_source = need_aux_source; + } + } + + /* enough resources? */ + for (i = 0; i < MAX_DPU_PLANE_GRP; i++) { + if (!grp[i]) + continue; + + if (active_plane[i] > grp[i]->hw_plane_num) { + DRM_DEBUG_KMS("no enough fetch units\n"); + return -EINVAL; + } + + if (active_plane_fetcheco[i] > grp[i]->hw_plane_fetcheco_num) { + DRM_DEBUG_KMS("no enough FetchEcos\n"); + return -EINVAL; + } + + if (active_plane_hscale[i] > grp[i]->hw_plane_hscaler_num) { + DRM_DEBUG_KMS("no enough Hscalers\n"); + return -EINVAL; + } + + if (active_plane_vscale[i] > grp[i]->hw_plane_vscaler_num) { + DRM_DEBUG_KMS("no enough Vscalers\n"); + return -EINVAL; + } + } + + /* initialize resource mask */ + for (i = 0; i < MAX_DPU_PLANE_GRP; i++) { + if (!grp[i]) + continue; + + mutex_lock(&grp[i]->mutex); + grp[i]->src_a_mask = grp[i]->src_mask; + grp[i]->src_use_vproc_mask = 0; + mutex_unlock(&grp[i]->mutex); + } + + ret = drm_atomic_normalize_zpos(dev, state); + if (ret) + return ret; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct drm_plane_state **states; + int n; + + states = dpu_atomic_alloc_tmp_planes_per_crtc(dev); + if (IS_ERR(states)) { + DRM_DEBUG_KMS( + "[CRTC:%d:%s] cannot alloc plane state ptrs\n", + crtc->base.id, crtc->name); + return PTR_ERR(states); + } + + n = dpu_atomic_sort_planes_per_crtc(crtc_state, states); + if (n < 0) { + DRM_DEBUG_KMS("[CRTC:%d:%s] failed to sort planes\n", + crtc->base.id, crtc->name); + kfree(states); + return n; + } + + /* no active planes? */ + if (n == 0) { + kfree(states); + continue; + } + + if (use_pc[dpu_crtc->crtc_grp_id]) + dpu_atomic_compute_plane_lrx_per_crtc(crtc_state, + states, n); + + dpu_atomic_set_top_plane_per_crtc(states, n, + use_pc[dpu_crtc->crtc_grp_id]); + + ret = dpu_atomic_assign_plane_source_per_crtc(states, n, + use_pc[dpu_crtc->crtc_grp_id]); + if (ret) { + DRM_DEBUG_KMS("[CRTC:%d:%s] cannot assign plane rscs\n", + crtc->base.id, crtc->name); + kfree(states); + return ret; + } + + kfree(states); + } + + drm_for_each_crtc(crtc, dev) { + if (pipe_states_prone_to_put[drm_crtc_index(crtc)]) { + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (WARN_ON(IS_ERR(crtc_state))) + return PTR_ERR(crtc_state); + + dpu_atomic_put_possible_states_per_crtc(crtc_state); + } + } + + ret = drm_atomic_helper_check_planes(dev, state); + if (ret) { + DRM_DEBUG_KMS("%s: failed to check planes\n", __func__); + return ret; + } + + return ret; +} + +const struct drm_mode_config_funcs dpu_drm_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + .atomic_check = dpu_drm_atomic_check, + .atomic_commit = drm_atomic_helper_commit, +}; diff --git a/drivers/gpu/drm/imx/dpu/dpu-kms.h b/drivers/gpu/drm/imx/dpu/dpu-kms.h new file mode 100644 index 000000000000..73723e500239 --- /dev/null +++ b/drivers/gpu/drm/imx/dpu/dpu-kms.h @@ -0,0 +1,20 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _DPU_KMS_H_ +#define _DPU_KMS_H_ + +extern const struct drm_mode_config_funcs dpu_drm_mode_config_funcs; + +#endif diff --git a/drivers/gpu/drm/imx/dpu/dpu-plane.c b/drivers/gpu/drm/imx/dpu/dpu-plane.c new file mode 100644 index 000000000000..c0237a268e11 --- /dev/null +++ b/drivers/gpu/drm/imx/dpu/dpu-plane.c @@ -0,0 +1,1012 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_blend.h> +#include <drm/drm_color_mgmt.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_plane_helper.h> +#include <video/dpu.h> +#include <video/imx8-prefetch.h> +#include "dpu-plane.h" +#include "../imx-drm.h" + +#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) + +static const uint32_t dpu_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_RGB565, + + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, +}; + +static const uint64_t dpu_format_modifiers[] = { + DRM_FORMAT_MOD_VIVANTE_TILED, + DRM_FORMAT_MOD_VIVANTE_SUPER_TILED, + DRM_FORMAT_MOD_AMPHION_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + +static unsigned int dpu_plane_get_default_zpos(enum drm_plane_type type) +{ + if (type == DRM_PLANE_TYPE_PRIMARY) + return 0; + else if (type == DRM_PLANE_TYPE_OVERLAY) + return 1; + + return 0; +} + +static void dpu_plane_destroy(struct drm_plane *plane) +{ + struct dpu_plane *dpu_plane = to_dpu_plane(plane); + + drm_plane_cleanup(plane); + kfree(dpu_plane); +} + +static void dpu_plane_reset(struct drm_plane *plane) +{ + struct dpu_plane_state *state; + + if (plane->state) { + __drm_atomic_helper_plane_destroy_state(plane->state); + kfree(to_dpu_plane_state(plane->state)); + plane->state = NULL; + } + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return; + + __drm_atomic_helper_plane_reset(plane, &state->base); + + plane->state->zpos = dpu_plane_get_default_zpos(plane->type); + plane->state->color_encoding = DRM_COLOR_YCBCR_BT601; + plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE; +} + +static struct drm_plane_state * +dpu_drm_atomic_plane_duplicate_state(struct drm_plane *plane) +{ + struct dpu_plane_state *state, *copy; + + if (WARN_ON(!plane->state)) + return NULL; + + copy = kmalloc(sizeof(*state), GFP_KERNEL); + if (!copy) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, ©->base); + state = to_dpu_plane_state(plane->state); + copy->stage = state->stage; + copy->source = state->source; + copy->blend = state->blend; + copy->aux_stage = state->aux_stage; + copy->aux_source = state->aux_source; + copy->aux_blend = state->aux_blend; + copy->is_top = state->is_top; + copy->use_prefetch = state->use_prefetch; + copy->use_aux_prefetch = state->use_aux_prefetch; + copy->need_aux_source = state->need_aux_source; + copy->left_src_w = state->left_src_w; + copy->left_crtc_w = state->left_crtc_w; + copy->left_crtc_x = state->left_crtc_x; + copy->right_src_w = state->right_src_w; + copy->right_crtc_w = state->right_crtc_w; + copy->right_crtc_x = state->right_crtc_x; + copy->is_left_top = state->is_left_top; + copy->is_right_top = state->is_right_top; + + return ©->base; +} + +static bool dpu_drm_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID)) + return false; + + switch (format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + return modifier == DRM_FORMAT_MOD_LINEAR; + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_RGB565: + return modifier == DRM_FORMAT_MOD_LINEAR || + modifier == DRM_FORMAT_MOD_VIVANTE_TILED || + modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + return modifier == DRM_FORMAT_MOD_LINEAR || + modifier == DRM_FORMAT_MOD_AMPHION_TILED; + default: + return false; + } +} + +static void dpu_drm_atomic_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + __drm_atomic_helper_plane_destroy_state(state); + kfree(to_dpu_plane_state(state)); +} + +static const struct drm_plane_funcs dpu_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = dpu_plane_destroy, + .reset = dpu_plane_reset, + .atomic_duplicate_state = dpu_drm_atomic_plane_duplicate_state, + .atomic_destroy_state = dpu_drm_atomic_plane_destroy_state, + .format_mod_supported = dpu_drm_plane_format_mod_supported, +}; + +static inline dma_addr_t +drm_plane_state_to_baseaddr(struct drm_plane_state *state, bool aux_source) +{ + struct drm_framebuffer *fb = state->fb; + struct drm_gem_cma_object *cma_obj; + struct dpu_plane_state *dpstate = to_dpu_plane_state(state); + unsigned int x = (state->src.x1 >> 16) + + (aux_source ? dpstate->left_src_w : 0); + unsigned int y = state->src.y1 >> 16; + + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + BUG_ON(!cma_obj); + + if (fb->modifier) + return cma_obj->paddr + fb->offsets[0]; + + if (fb->flags & DRM_MODE_FB_INTERLACED) + y /= 2; + + return cma_obj->paddr + fb->offsets[0] + fb->pitches[0] * y + + fb->format->cpp[0] * x; +} + +static inline dma_addr_t +drm_plane_state_to_uvbaseaddr(struct drm_plane_state *state, bool aux_source) +{ + struct drm_framebuffer *fb = state->fb; + struct drm_gem_cma_object *cma_obj; + struct dpu_plane_state *dpstate = to_dpu_plane_state(state); + int x = (state->src.x1 >> 16) + (aux_source ? dpstate->left_src_w : 0); + int y = state->src.y1 >> 16; + + cma_obj = drm_fb_cma_get_gem_obj(fb, 1); + BUG_ON(!cma_obj); + + if (fb->modifier) + return cma_obj->paddr + fb->offsets[1]; + + x /= fb->format->hsub; + y /= fb->format->vsub; + + if (fb->flags & DRM_MODE_FB_INTERLACED) + y /= 2; + + return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y + + fb->format->cpp[1] * x; +} + +static inline bool dpu_plane_fb_format_is_yuv(u32 fmt) +{ + return fmt == DRM_FORMAT_YUYV || fmt == DRM_FORMAT_UYVY || + fmt == DRM_FORMAT_NV12 || fmt == DRM_FORMAT_NV21; +} + +static int dpu_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct dpu_plane *dplane = to_dpu_plane(plane); + struct dpu_plane_state *dpstate = to_dpu_plane_state(state); + struct dpu_plane_res *res = &dplane->grp->res; + struct drm_crtc_state *crtc_state; + struct drm_framebuffer *fb = state->fb; + struct dpu_fetchunit *fu; + struct dprc *dprc; + dma_addr_t baseaddr, uv_baseaddr = 0; + u32 src_w, src_h, src_x, src_y; + unsigned int frame_width; + int min_scale, bpp, ret; + bool fb_is_interlaced; + bool check_aux_source = false; + + /* ok to disable */ + if (!fb) { + dpstate->stage = LB_PRIM_SEL__DISABLE; + dpstate->source = LB_SEC_SEL__DISABLE; + dpstate->blend = ID_NONE; + dpstate->aux_stage = LB_PRIM_SEL__DISABLE; + dpstate->aux_source = LB_SEC_SEL__DISABLE; + dpstate->aux_blend = ID_NONE; + dpstate->is_top = false; + dpstate->use_prefetch = false; + dpstate->use_aux_prefetch = false; + dpstate->need_aux_source = false; + dpstate->left_src_w = 0; + dpstate->left_crtc_w = 0; + dpstate->left_crtc_x = 0; + dpstate->right_src_w = 0; + dpstate->right_crtc_w = 0; + dpstate->right_crtc_x = 0; + dpstate->is_left_top = false; + dpstate->is_right_top = false; + return 0; + } + + if (!state->crtc) { + DRM_DEBUG_KMS("[PLANE:%d:%s] has no CRTC in plane state\n", + plane->base.id, plane->name); + return -EINVAL; + } + + src_w = drm_rect_width(&state->src) >> 16; + src_h = drm_rect_height(&state->src) >> 16; + src_x = state->src.x1 >> 16; + src_y = state->src.y1 >> 16; + + fb_is_interlaced = !!(fb->flags & DRM_MODE_FB_INTERLACED); + + if (fb->modifier && + fb->modifier != DRM_FORMAT_MOD_AMPHION_TILED && + fb->modifier != DRM_FORMAT_MOD_VIVANTE_TILED && + fb->modifier != DRM_FORMAT_MOD_VIVANTE_SUPER_TILED) { + DRM_DEBUG_KMS("[PLANE:%d:%s] unsupported fb modifier\n", + plane->base.id, plane->name); + return -EINVAL; + } + + crtc_state = + drm_atomic_get_existing_crtc_state(state->state, state->crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + min_scale = dplane->grp->has_vproc ? + FRAC_16_16(min(src_w, src_h), 8192) : + DRM_PLANE_HELPER_NO_SCALING; + ret = drm_atomic_helper_check_plane_state(state, crtc_state, + min_scale, + DRM_PLANE_HELPER_NO_SCALING, + true, false); + if (ret) { + DRM_DEBUG_KMS("[PLANE:%d:%s] failed to check plane state\n", + plane->base.id, plane->name); + return ret; + } + + /* no off screen */ + if (state->dst.x1 < 0 || state->dst.y1 < 0 || + (state->dst.x2 > crtc_state->adjusted_mode.hdisplay) || + (state->dst.y2 > crtc_state->adjusted_mode.vdisplay)) { + DRM_DEBUG_KMS("[PLANE:%d:%s] no off screen\n", + plane->base.id, plane->name); + return -EINVAL; + } + + /* pixel/line count and position parameters check */ + if (fb->format->hsub == 2) { + if (dpstate->left_src_w || dpstate->right_src_w) { + if ((dpstate->left_src_w % 2) || + (dpstate->right_src_w % 2) || (src_x % 2)) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad left/right uv width or xoffset\n", + plane->base.id, plane->name); + return -EINVAL; + } + } else { + if ((src_w % 2) || (src_x % 2)) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv width or xoffset\n", + plane->base.id, plane->name); + return -EINVAL; + } + } + } + if (fb->format->vsub == 2) { + if (src_h % (fb_is_interlaced ? 4 : 2)) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv height\n", + plane->base.id, plane->name); + return -EINVAL; + } + if (src_y % (fb_is_interlaced ? 4 : 2)) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv yoffset\n", + plane->base.id, plane->name); + return -EINVAL; + } + } + + /* for tile formats, framebuffer has to be tile aligned */ + switch (fb->modifier) { + case DRM_FORMAT_MOD_AMPHION_TILED: + if (fb->width % 8) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb width for AMPHION tile\n", + plane->base.id, plane->name); + return -EINVAL; + } + if (fb->height % 256) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb height for AMPHION tile\n", + plane->base.id, plane->name); + return -EINVAL; + } + break; + case DRM_FORMAT_MOD_VIVANTE_TILED: + if (fb->width % 4) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb width for VIVANTE tile\n", + plane->base.id, plane->name); + return -EINVAL; + } + if (fb->height % 4) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb height for VIVANTE tile\n", + plane->base.id, plane->name); + return -EINVAL; + } + break; + case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED: + if (fb->width % 64) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb width for VIVANTE super tile\n", + plane->base.id, plane->name); + return -EINVAL; + } + if (fb->height % 64) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb height for VIVANTE super tile\n", + plane->base.id, plane->name); + return -EINVAL; + } + break; + default: + break; + } + + /* do not support BT709 full range */ + if (dpu_plane_fb_format_is_yuv(fb->format->format) && + state->color_encoding == DRM_COLOR_YCBCR_BT709 && + state->color_range == DRM_COLOR_YCBCR_FULL_RANGE) + return -EINVAL; + +again: + fu = source_to_fu(res, + check_aux_source ? dpstate->aux_source : dpstate->source); + if (!fu) { + DRM_DEBUG_KMS("[PLANE:%d:%s] cannot get fetch unit\n", + plane->base.id, plane->name); + return -EINVAL; + } + + dprc = fu->dprc; + + if (dpstate->need_aux_source) + frame_width = check_aux_source ? + dpstate->right_src_w : dpstate->left_src_w; + else + frame_width = src_w; + + if (dprc && + dprc_format_supported(dprc, fb->format->format, fb->modifier) && + dprc_stride_supported(dprc, fb->pitches[0], fb->pitches[1], + frame_width, fb->format->format)) { + if (check_aux_source) + dpstate->use_aux_prefetch = true; + else + dpstate->use_prefetch = true; + } else { + if (check_aux_source) + dpstate->use_aux_prefetch = false; + else + dpstate->use_prefetch = false; + } + + if (fb->modifier) { + if (check_aux_source && !dpstate->use_aux_prefetch) { + DRM_DEBUG_KMS("[PLANE:%d:%s] cannot do tile resolving wo prefetch\n", + plane->base.id, plane->name); + return -EINVAL; + } else if (!check_aux_source && !dpstate->use_prefetch) { + DRM_DEBUG_KMS("[PLANE:%d:%s] cannot do tile resolving wo prefetch\n", + plane->base.id, plane->name); + return -EINVAL; + } + } + + /* base address alignment check */ + baseaddr = drm_plane_state_to_baseaddr(state, check_aux_source); + switch (fb->format->format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + bpp = 16; + break; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + bpp = 8; + break; + default: + bpp = fb->format->cpp[0] * 8; + break; + } + switch (bpp) { + case 32: + if (baseaddr & 0x3) { + DRM_DEBUG_KMS("[PLANE:%d:%s] 32bpp fb bad baddr alignment\n", + plane->base.id, plane->name); + return -EINVAL; + } + break; + case 16: + if (fb->modifier) { + if (baseaddr & 0x1) { + DRM_DEBUG_KMS("[PLANE:%d:%s] 16bpp tile fb bad baddr alignment\n", + plane->base.id, plane->name); + return -EINVAL; + } + } else { + if (check_aux_source) { + if (baseaddr & + (dpstate->use_aux_prefetch ? 0x7 : 0x1)) { + DRM_DEBUG_KMS("[PLANE:%d:%s] 16bpp fb bad baddr alignment\n", + plane->base.id, plane->name); + return -EINVAL; + } + } else { + if (baseaddr & + (dpstate->use_prefetch ? 0x7 : 0x1)) { + DRM_DEBUG_KMS("[PLANE:%d:%s] 16bpp fb bad baddr alignment\n", + plane->base.id, plane->name); + return -EINVAL; + } + } + } + break; + } + + if (fb->pitches[0] > 0x10000) { + DRM_DEBUG_KMS("[PLANE:%d:%s] fb pitch[0] is too big\n", + plane->base.id, plane->name); + return -EINVAL; + } + + /* UV base address alignment check, assuming 16bpp */ + if (fb->format->num_planes > 1) { + uv_baseaddr = drm_plane_state_to_uvbaseaddr(state, + check_aux_source); + if (fb->modifier) { + if (uv_baseaddr & 0x1) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv baddr alignment for tile fb\n", + plane->base.id, plane->name); + return -EINVAL; + } + } else { + if (check_aux_source) { + if (uv_baseaddr & + (dpstate->use_aux_prefetch ? 0x7 : 0x1)) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv baddr alignment\n", + plane->base.id, plane->name); + return -EINVAL; + } + } else { + if (uv_baseaddr & + (dpstate->use_prefetch ? 0x7 : 0x1)) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv baddr alignment\n", + plane->base.id, plane->name); + return -EINVAL; + } + } + } + + if (fb->pitches[1] > 0x10000) { + DRM_DEBUG_KMS("[PLANE:%d:%s] fb pitch[1] is too big\n", + plane->base.id, plane->name); + return -EINVAL; + } + } + + if (!check_aux_source && dpstate->use_prefetch && + !dprc_stride_double_check(dprc, frame_width, src_x, + fb->format->format, + fb->modifier, + baseaddr, uv_baseaddr)) { + if (fb->modifier) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad pitch\n", + plane->base.id, plane->name); + return -EINVAL; + } + + if (bpp == 16 && (baseaddr & 0x1)) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad baddr alignment\n", + plane->base.id, plane->name); + return -EINVAL; + } + + if (uv_baseaddr & 0x1) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv baddr alignment\n", + plane->base.id, plane->name); + return -EINVAL; + } + + dpstate->use_prefetch = false; + } else if (check_aux_source && dpstate->use_aux_prefetch && + !dprc_stride_double_check(dprc, frame_width, src_x, + fb->format->format, + fb->modifier, + baseaddr, uv_baseaddr)) { + if (fb->modifier) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad pitch\n", + plane->base.id, plane->name); + return -EINVAL; + } + + if (bpp == 16 && (baseaddr & 0x1)) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad baddr alignment\n", + plane->base.id, plane->name); + return -EINVAL; + } + + if (uv_baseaddr & 0x1) { + DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv baddr alignment\n", + plane->base.id, plane->name); + return -EINVAL; + } + + dpstate->use_aux_prefetch = false; + } + + if (dpstate->need_aux_source && !check_aux_source) { + check_aux_source = true; + goto again; + } + + return 0; +} + +static void dpu_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct dpu_plane *dplane = to_dpu_plane(plane); + struct drm_plane_state *state = plane->state; + struct dpu_plane_state *dpstate = to_dpu_plane_state(state); + struct drm_framebuffer *fb = state->fb; + struct dpu_plane_res *res = &dplane->grp->res; + struct dpu_fetchunit *fu; + struct dpu_fetchunit *fe = NULL; + struct dprc *dprc; + struct dpu_hscaler *hs = NULL; + struct dpu_vscaler *vs = NULL; + struct dpu_layerblend *lb; + struct dpu_extdst *ed; + struct dpu_framegen *fg; + dma_addr_t baseaddr, uv_baseaddr = 0; + dpu_block_id_t blend, fe_id, vs_id = ID_NONE, hs_id; + lb_sec_sel_t source; + lb_prim_sel_t stage; + unsigned int stream_id; + unsigned int src_w, src_h, src_x, src_y, dst_w, dst_h; + unsigned int crtc_x; + unsigned int mt_w = 0, mt_h = 0; /* w/h in a micro-tile */ + int bpp, lb_id; + bool need_fetcheco, need_hscaler = false, need_vscaler = false; + bool prefetch_start, uv_prefetch_start; + bool crtc_use_pc = dpstate->left_src_w || dpstate->right_src_w; + bool update_aux_source = false; + bool use_prefetch; + bool need_modeset; + bool fb_is_interlaced; + + /* + * Do nothing since the plane is disabled by + * crtc_func->atomic_begin/flush. + */ + if (!fb) + return; + + need_modeset = drm_atomic_crtc_needs_modeset(state->crtc->state); + fb_is_interlaced = !!(fb->flags & DRM_MODE_FB_INTERLACED); + +again: + need_fetcheco = false; + prefetch_start = false; + uv_prefetch_start = false; + + source = update_aux_source ? dpstate->aux_source : dpstate->source; + blend = update_aux_source ? dpstate->aux_blend : dpstate->blend; + stage = update_aux_source ? dpstate->aux_stage : dpstate->stage; + use_prefetch = update_aux_source ? + dpstate->use_aux_prefetch : dpstate->use_prefetch; + + if (crtc_use_pc) { + if (update_aux_source) { + stream_id = 1; + crtc_x = dpstate->right_crtc_x; + } else { + stream_id = dpstate->left_src_w ? 0 : 1; + crtc_x = dpstate->left_src_w ? + dpstate->left_crtc_x : dpstate->right_crtc_x; + } + } else { + stream_id = dplane->stream_id; + crtc_x = state->crtc_x; + } + + fg = res->fg[stream_id]; + + fu = source_to_fu(res, source); + if (!fu) + return; + + dprc = fu->dprc; + + lb_id = blend_to_id(blend); + if (lb_id < 0) + return; + + lb = res->lb[lb_id]; + + if (crtc_use_pc) { + if (update_aux_source || !dpstate->left_src_w) + src_w = dpstate->right_src_w; + else + src_w = dpstate->left_src_w; + } else { + src_w = drm_rect_width(&state->src) >> 16; + } + src_h = drm_rect_height(&state->src) >> 16; + if (crtc_use_pc && update_aux_source) { + if (fb->modifier) + src_x = (state->src_x >> 16) + dpstate->left_src_w; + else + src_x = 0; + } else { + src_x = fb->modifier ? (state->src_x >> 16) : 0; + } + src_y = fb->modifier ? (state->src_y >> 16) : 0; + dst_w = drm_rect_width(&state->dst); + dst_h = drm_rect_height(&state->dst); + + if (fetchunit_is_fetchdecode(fu)) { + if (fetchdecode_need_fetcheco(fu, fb->format->format)) { + need_fetcheco = true; + fe = fetchdecode_get_fetcheco(fu); + if (IS_ERR(fe)) + return; + } + + /* assume pixel combiner is unused */ + if ((src_w != dst_w) && !crtc_use_pc) { + need_hscaler = true; + hs = fetchdecode_get_hscaler(fu); + if (IS_ERR(hs)) + return; + } + + if ((src_h != dst_h) || fb_is_interlaced) { + need_vscaler = true; + vs = fetchdecode_get_vscaler(fu); + if (IS_ERR(vs)) + return; + } + } + + switch (fb->format->format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + bpp = 16; + break; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + bpp = 8; + break; + default: + bpp = fb->format->cpp[0] * 8; + break; + } + + switch (fb->modifier) { + case DRM_FORMAT_MOD_AMPHION_TILED: + mt_w = 8; + mt_h = 8; + break; + case DRM_FORMAT_MOD_VIVANTE_TILED: + case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED: + mt_w = (bpp == 16) ? 8 : 4; + mt_h = 4; + break; + default: + break; + } + + baseaddr = drm_plane_state_to_baseaddr(state, update_aux_source); + if (need_fetcheco) + uv_baseaddr = drm_plane_state_to_uvbaseaddr(state, + update_aux_source); + + if (use_prefetch && + (fu->ops->get_stream_id(fu) == DPU_PLANE_SRC_DISABLED || + need_modeset)) + prefetch_start = true; + + fu->ops->set_burstlength(fu, src_x, mt_w, bpp, baseaddr, use_prefetch); + fu->ops->set_src_bpp(fu, bpp); + fu->ops->set_src_stride(fu, src_w, src_w, mt_w, bpp, fb->pitches[0], + baseaddr, use_prefetch); + fu->ops->set_src_buf_dimensions(fu, src_w, src_h, 0, fb_is_interlaced); + fu->ops->set_pixel_blend_mode(fu, state->pixel_blend_mode, + state->alpha, fb->format->format); + fu->ops->set_fmt(fu, fb->format->format, state->color_encoding, + state->color_range, fb_is_interlaced); + fu->ops->enable_src_buf(fu); + fu->ops->set_framedimensions(fu, src_w, src_h, fb_is_interlaced); + fu->ops->set_baseaddress(fu, src_w, src_x, src_y, mt_w, mt_h, bpp, + baseaddr); + fu->ops->set_stream_id(fu, stream_id ? + DPU_PLANE_SRC_TO_DISP_STREAM1 : + DPU_PLANE_SRC_TO_DISP_STREAM0); + fu->ops->unpin_off(fu); + + DRM_DEBUG_KMS("[PLANE:%d:%s] %s-0x%02x\n", + plane->base.id, plane->name, fu->name, fu->id); + + if (need_fetcheco) { + fe_id = fetcheco_get_block_id(fe); + if (fe_id == ID_NONE) + return; + + if (use_prefetch && + (fe->ops->get_stream_id(fe) == DPU_PLANE_SRC_DISABLED || + need_modeset)) + uv_prefetch_start = true; + + fetchdecode_pixengcfg_dynamic_src_sel(fu, + (fd_dynamic_src_sel_t)fe_id); + fe->ops->set_burstlength(fe, src_w, mt_w, bpp, uv_baseaddr, + use_prefetch); + fe->ops->set_src_bpp(fe, 16); + fe->ops->set_src_stride(fe, src_w, src_x, mt_w, bpp, + fb->pitches[1], + uv_baseaddr, use_prefetch); + fe->ops->set_fmt(fe, fb->format->format, state->color_encoding, + state->color_range, fb_is_interlaced); + fe->ops->set_src_buf_dimensions(fe, src_w, src_h, + fb->format->format, + fb_is_interlaced); + fe->ops->set_framedimensions(fe, src_w, src_h, + fb_is_interlaced); + fe->ops->set_baseaddress(fe, src_w, src_x, src_y / 2, + mt_w, mt_h, bpp, uv_baseaddr); + fe->ops->enable_src_buf(fe); + fe->ops->set_stream_id(fe, stream_id ? + DPU_PLANE_SRC_TO_DISP_STREAM1 : + DPU_PLANE_SRC_TO_DISP_STREAM0); + fe->ops->unpin_off(fe); + + DRM_DEBUG_KMS("[PLANE:%d:%s] %s-0x%02x\n", + plane->base.id, plane->name, fe->name, fe_id); + } else { + if (fetchunit_is_fetchdecode(fu)) + fetchdecode_pixengcfg_dynamic_src_sel(fu, + FD_SRC_DISABLE); + } + + /* vscaler comes first */ + if (need_vscaler) { + vs_id = vscaler_get_block_id(vs); + if (vs_id == ID_NONE) + return; + + vscaler_pixengcfg_dynamic_src_sel(vs, (vs_src_sel_t)source); + vscaler_pixengcfg_clken(vs, CLKEN__AUTOMATIC); + vscaler_setup1(vs, src_h, state->crtc_h, fb_is_interlaced); + vscaler_setup2(vs, fb_is_interlaced); + vscaler_setup3(vs, fb_is_interlaced); + vscaler_output_size(vs, dst_h); + vscaler_field_mode(vs, fb_is_interlaced ? + SCALER_ALWAYS0 : SCALER_INPUT); + vscaler_filter_mode(vs, SCALER_LINEAR); + vscaler_scale_mode(vs, SCALER_UPSCALE); + vscaler_mode(vs, SCALER_ACTIVE); + vscaler_set_stream_id(vs, dplane->stream_id ? + DPU_PLANE_SRC_TO_DISP_STREAM1 : + DPU_PLANE_SRC_TO_DISP_STREAM0); + + source = (lb_sec_sel_t)vs_id; + + DRM_DEBUG_KMS("[PLANE:%d:%s] vscaler-0x%02x\n", + plane->base.id, plane->name, vs_id); + } + + /* and then, hscaler */ + if (need_hscaler) { + hs_id = hscaler_get_block_id(hs); + if (hs_id == ID_NONE) + return; + + hscaler_pixengcfg_dynamic_src_sel(hs, need_vscaler ? + (hs_src_sel_t)vs_id : + (hs_src_sel_t)source); + hscaler_pixengcfg_clken(hs, CLKEN__AUTOMATIC); + hscaler_setup1(hs, src_w, dst_w); + hscaler_output_size(hs, dst_w); + hscaler_filter_mode(hs, SCALER_LINEAR); + hscaler_scale_mode(hs, SCALER_UPSCALE); + hscaler_mode(hs, SCALER_ACTIVE); + hscaler_set_stream_id(hs, dplane->stream_id ? + DPU_PLANE_SRC_TO_DISP_STREAM1 : + DPU_PLANE_SRC_TO_DISP_STREAM0); + + source = (lb_sec_sel_t)hs_id; + + DRM_DEBUG_KMS("[PLANE:%d:%s] hscaler-0x%02x\n", + plane->base.id, plane->name, hs_id); + } + + if (use_prefetch) { + dprc_configure(dprc, stream_id, + src_w, src_h, src_x, src_y, + fb->pitches[0], fb->format->format, + fb->modifier, baseaddr, uv_baseaddr, + prefetch_start, uv_prefetch_start, + fb_is_interlaced); + if (prefetch_start || uv_prefetch_start) + dprc_enable(dprc); + + dprc_reg_update(dprc); + + if (prefetch_start || uv_prefetch_start) { + dprc_first_frame_handle(dprc); + + if (!need_modeset && state->normalized_zpos != 0) + framegen_wait_for_frame_counter_moving(fg); + } + + if (update_aux_source) + DRM_DEBUG_KMS("[PLANE:%d:%s] use aux prefetch\n", + plane->base.id, plane->name); + else + DRM_DEBUG_KMS("[PLANE:%d:%s] use prefetch\n", + plane->base.id, plane->name); + } else if (dprc) { + dprc_disable(dprc); + + if (update_aux_source) + DRM_DEBUG_KMS("[PLANE:%d:%s] bypass aux prefetch\n", + plane->base.id, plane->name); + else + DRM_DEBUG_KMS("[PLANE:%d:%s] bypass prefetch\n", + plane->base.id, plane->name); + } + + layerblend_pixengcfg_dynamic_prim_sel(lb, stage); + layerblend_pixengcfg_dynamic_sec_sel(lb, source); + layerblend_control(lb, LB_BLEND); + layerblend_blendcontrol(lb, state->normalized_zpos, + state->pixel_blend_mode, state->alpha); + layerblend_pixengcfg_clken(lb, CLKEN__AUTOMATIC); + layerblend_position(lb, crtc_x, state->crtc_y); + + if (crtc_use_pc) { + if ((!stream_id && dpstate->is_left_top) || + (stream_id && dpstate->is_right_top)) { + ed = res->ed[stream_id]; + extdst_pixengcfg_src_sel(ed, (extdst_src_sel_t)blend); + } + } else { + if (dpstate->is_top) { + ed = res->ed[stream_id]; + extdst_pixengcfg_src_sel(ed, (extdst_src_sel_t)blend); + } + } + + if (update_aux_source) + DRM_DEBUG_KMS("[PLANE:%d:%s] *aux* source-0x%02x stage-0x%02x blend-0x%02x\n", + plane->base.id, plane->name, + source, dpstate->stage, dpstate->blend); + else + DRM_DEBUG_KMS("[PLANE:%d:%s] source-0x%02x stage-0x%02x blend-0x%02x\n", + plane->base.id, plane->name, + source, dpstate->stage, dpstate->blend); + + if (dpstate->need_aux_source && !update_aux_source) { + update_aux_source = true; + goto again; + } +} + +static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, + .atomic_check = dpu_plane_atomic_check, + .atomic_update = dpu_plane_atomic_update, +}; + +struct dpu_plane *dpu_plane_create(struct drm_device *drm, + unsigned int possible_crtcs, + unsigned int stream_id, + struct dpu_plane_grp *grp, + enum drm_plane_type type) +{ + struct dpu_plane *dpu_plane; + struct drm_plane *plane; + unsigned int zpos = dpu_plane_get_default_zpos(type); + int ret; + + dpu_plane = kzalloc(sizeof(*dpu_plane), GFP_KERNEL); + if (!dpu_plane) + return ERR_PTR(-ENOMEM); + + dpu_plane->stream_id = stream_id; + dpu_plane->grp = grp; + + plane = &dpu_plane->base; + + ret = drm_universal_plane_init(drm, plane, possible_crtcs, + &dpu_plane_funcs, + dpu_formats, ARRAY_SIZE(dpu_formats), + dpu_format_modifiers, type, NULL); + if (ret) + goto err; + + drm_plane_helper_add(plane, &dpu_plane_helper_funcs); + + ret = drm_plane_create_zpos_property(plane, + zpos, 0, grp->hw_plane_num - 1); + if (ret) + goto err; + + ret = drm_plane_create_alpha_property(plane); + if (ret) + goto err; + + ret = drm_plane_create_blend_mode_property(plane, + BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE)); + if (ret) + goto err; + + ret = drm_plane_create_color_properties(plane, + BIT(DRM_COLOR_YCBCR_BT601) | + BIT(DRM_COLOR_YCBCR_BT709), + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | + BIT(DRM_COLOR_YCBCR_FULL_RANGE), + DRM_COLOR_YCBCR_BT601, + DRM_COLOR_YCBCR_FULL_RANGE); + if (ret) + goto err; + + return dpu_plane; + +err: + kfree(dpu_plane); + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/imx/dpu/dpu-plane.h b/drivers/gpu/drm/imx/dpu/dpu-plane.h new file mode 100644 index 000000000000..bc9189336bf3 --- /dev/null +++ b/drivers/gpu/drm/imx/dpu/dpu-plane.h @@ -0,0 +1,195 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef __DPU_PLANE_H__ +#define __DPU_PLANE_H__ + +#include <video/dpu.h> +#include "../imx-drm.h" + +#define MAX_DPU_PLANE_GRP (MAX_CRTC / 2) + +enum dpu_plane_src_type { + DPU_PLANE_SRC_FL, + DPU_PLANE_SRC_FW, + DPU_PLANE_SRC_FD, +}; + +struct dpu_plane { + struct drm_plane base; + struct dpu_plane_grp *grp; + struct list_head head; + unsigned int stream_id; +}; + +struct dpu_plane_state { + struct drm_plane_state base; + lb_prim_sel_t stage; + lb_sec_sel_t source; + dpu_block_id_t blend; + lb_prim_sel_t aux_stage; + lb_sec_sel_t aux_source; + dpu_block_id_t aux_blend; + + bool is_top; + bool use_prefetch; + bool use_aux_prefetch; + bool need_aux_source; + + /* used when pixel combiner is needed */ + unsigned int left_src_w; + unsigned int left_crtc_w; + unsigned int left_crtc_x; + unsigned int right_src_w; + unsigned int right_crtc_w; + unsigned int right_crtc_x; + + bool is_left_top; + bool is_right_top; +}; + +static const lb_prim_sel_t cf_stages[] = {LB_PRIM_SEL__CONSTFRAME0, + LB_PRIM_SEL__CONSTFRAME1}; +static const lb_prim_sel_t stages[] = {LB_PRIM_SEL__LAYERBLEND0, + LB_PRIM_SEL__LAYERBLEND1, + LB_PRIM_SEL__LAYERBLEND2, + LB_PRIM_SEL__LAYERBLEND3}; +/* TODO: Add source entries for subsidiary layers. */ +static const lb_sec_sel_t sources[] = {LB_SEC_SEL__FETCHLAYER0, + LB_SEC_SEL__FETCHWARP2, + LB_SEC_SEL__FETCHDECODE0, + LB_SEC_SEL__FETCHDECODE1}; +static const dpu_block_id_t blends[] = {ID_LAYERBLEND0, ID_LAYERBLEND1, + ID_LAYERBLEND2, ID_LAYERBLEND3}; + +static inline struct dpu_plane *to_dpu_plane(struct drm_plane *plane) +{ + return container_of(plane, struct dpu_plane, base); +} + +static inline struct dpu_plane_state * +to_dpu_plane_state(struct drm_plane_state *plane_state) +{ + return container_of(plane_state, struct dpu_plane_state, base); +} + +static inline int source_to_type(lb_sec_sel_t source) +{ + switch (source) { + case LB_SEC_SEL__FETCHLAYER0: + return DPU_PLANE_SRC_FL; + case LB_SEC_SEL__FETCHWARP2: + return DPU_PLANE_SRC_FW; + case LB_SEC_SEL__FETCHDECODE0: + case LB_SEC_SEL__FETCHDECODE1: + return DPU_PLANE_SRC_FD; + default: + break; + } + + WARN_ON(1); + return -EINVAL; +} + +static inline int source_to_id(lb_sec_sel_t source) +{ + int i, offset = 0; + int type = source_to_type(source); + + for (i = 0; i < ARRAY_SIZE(sources); i++) { + if (source != sources[i]) + continue; + + /* FetchLayer */ + if (type == DPU_PLANE_SRC_FL) + return i; + + /* FetchWarp or FetchDecode */ + while (offset < ARRAY_SIZE(sources)) { + if (source_to_type(sources[offset]) == type) + break; + offset++; + } + return i - offset; + } + + WARN_ON(1); + return -EINVAL; +} + +static inline struct dpu_fetchunit * +source_to_fu(struct dpu_plane_res *res, lb_sec_sel_t source) +{ + int fu_type = source_to_type(source); + int fu_id = source_to_id(source); + + if (fu_type < 0 || fu_id < 0) + return NULL; + + switch (fu_type) { + case DPU_PLANE_SRC_FD: + return res->fd[fu_id]; + case DPU_PLANE_SRC_FL: + return res->fl[fu_id]; + case DPU_PLANE_SRC_FW: + return res->fw[fu_id]; + } + + return NULL; +} + +static inline struct dpu_fetchunit * +dpstate_to_fu(struct dpu_plane_state *dpstate) +{ + struct drm_plane *plane = dpstate->base.plane; + struct dpu_plane *dplane = to_dpu_plane(plane); + struct dpu_plane_res *res = &dplane->grp->res; + + return source_to_fu(res, dpstate->source); +} + +static inline int blend_to_id(dpu_block_id_t blend) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(blends); i++) { + if (blend == blends[i]) + return i; + } + + WARN_ON(1); + return -EINVAL; +} + +static inline bool drm_format_is_yuv(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + return true; + default: + break; + } + + return false; +} + +struct dpu_plane *dpu_plane_create(struct drm_device *drm, + unsigned int possible_crtcs, + unsigned int stream_id, + struct dpu_plane_grp *grp, + enum drm_plane_type type); +#endif diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index da87c70e413b..959874fc15bf 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -343,23 +343,7 @@ static struct platform_driver imx_drm_pdrv = { .of_match_table = imx_drm_dt_ids, }, }; - -static struct platform_driver * const drivers[] = { - &imx_drm_pdrv, - &ipu_drm_driver, -}; - -static int __init imx_drm_init(void) -{ - return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); -} -module_init(imx_drm_init); - -static void __exit imx_drm_exit(void) -{ - platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); -} -module_exit(imx_drm_exit); +module_platform_driver(imx_drm_pdrv); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION("i.MX drm driver core"); diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index ab9c6f706eb3..c04f150d2e7c 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h @@ -28,8 +28,6 @@ int imx_drm_init_drm(struct platform_device *pdev, int preferred_bpp); int imx_drm_exit_drm(void); -extern struct platform_driver ipu_drm_driver; - void imx_drm_mode_config_init(struct drm_device *drm); struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 63c0284f8b3c..bf8c52e9dc9c 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -492,10 +492,16 @@ static int ipu_drm_remove(struct platform_device *pdev) return 0; } -struct platform_driver ipu_drm_driver = { +static struct platform_driver ipu_drm_driver = { .driver = { .name = "imx-ipuv3-crtc", }, .probe = ipu_drm_probe, .remove = ipu_drm_remove, }; +module_platform_driver(ipu_drm_driver); + +MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:imx-ipuv3-crtc"); diff --git a/drivers/gpu/imx/Kconfig b/drivers/gpu/imx/Kconfig new file mode 100644 index 000000000000..e170d3677f61 --- /dev/null +++ b/drivers/gpu/imx/Kconfig @@ -0,0 +1,2 @@ +source "drivers/gpu/imx/ipu-v3/Kconfig" +source "drivers/gpu/imx/dpu/Kconfig" diff --git a/drivers/gpu/imx/Makefile b/drivers/gpu/imx/Makefile new file mode 100644 index 000000000000..3ac4d4b25035 --- /dev/null +++ b/drivers/gpu/imx/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ +obj-$(CONFIG_IMX_DPU_CORE) += dpu/ diff --git a/drivers/gpu/imx/dpu/Kconfig b/drivers/gpu/imx/dpu/Kconfig new file mode 100644 index 000000000000..d62891118907 --- /dev/null +++ b/drivers/gpu/imx/dpu/Kconfig @@ -0,0 +1,8 @@ +config IMX_DPU_CORE + tristate "i.MX DPU core support" + depends on ARCH_MXC + select GENERIC_IRQ_CHIP + help + Choose this if you have a Freescale i.MX8QM or i.MX8QXP system and + want to use the Display Processing Unit. This option only enables + DPU base support. diff --git a/drivers/gpu/imx/dpu/Makefile b/drivers/gpu/imx/dpu/Makefile new file mode 100644 index 000000000000..569ef08c5751 --- /dev/null +++ b/drivers/gpu/imx/dpu/Makefile @@ -0,0 +1,7 @@ +obj-$(CONFIG_IMX_DPU_CORE) += imx-dpu-core.o + +imx-dpu-core-objs := dpu-common.o dpu-constframe.o dpu-disengcfg.o \ + dpu-extdst.o dpu-fetchdecode.o dpu-fetcheco.o \ + dpu-fetchlayer.o dpu-fetchwarp.o dpu-fetchunit.o \ + dpu-framegen.o dpu-hscaler.o dpu-layerblend.o \ + dpu-sc-misc.o dpu-store.o dpu-tcon.o dpu-vscaler.o diff --git a/drivers/gpu/imx/dpu/dpu-common.c b/drivers/gpu/imx/dpu/dpu-common.c new file mode 100644 index 000000000000..4f2918f629f1 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-common.c @@ -0,0 +1,1225 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ +#include <linux/clk.h> +#include <linux/fb.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <video/dpu.h> +#include <video/imx8-pc.h> +#include <video/imx8-prefetch.h> +#include "dpu-prv.h" + +#define IMX_DPU_BLITENG_NAME "imx-drm-dpu-bliteng" + +static bool display_plane_video_proc = true; +module_param(display_plane_video_proc, bool, 0444); +MODULE_PARM_DESC(display_plane_video_proc, + "Enable video processing for display [default=true]"); + +#define DPU_CM_REG_DEFINE1(name1, name2) \ +static inline u32 name1(const struct cm_reg_ofs *ofs) \ +{ \ + return ofs->name2; \ +} + +#define DPU_CM_REG_DEFINE2(name1, name2) \ +static inline u32 name1(const struct cm_reg_ofs *ofs, \ + unsigned int n) \ +{ \ + return ofs->name2 + (4 * n); \ +} + +DPU_CM_REG_DEFINE1(LOCKUNLOCK, lockunlock); +DPU_CM_REG_DEFINE1(LOCKSTATUS, lockstatus); +DPU_CM_REG_DEFINE2(USERINTERRUPTMASK, userinterruptmask); +DPU_CM_REG_DEFINE2(INTERRUPTENABLE, interruptenable); +DPU_CM_REG_DEFINE2(INTERRUPTPRESET, interruptpreset); +DPU_CM_REG_DEFINE2(INTERRUPTCLEAR, interruptclear); +DPU_CM_REG_DEFINE2(INTERRUPTSTATUS, interruptstatus); +DPU_CM_REG_DEFINE2(USERINTERRUPTENABLE, userinterruptenable); +DPU_CM_REG_DEFINE2(USERINTERRUPTPRESET, userinterruptpreset); +DPU_CM_REG_DEFINE2(USERINTERRUPTCLEAR, userinterruptclear); +DPU_CM_REG_DEFINE2(USERINTERRUPTSTATUS, userinterruptstatus); +DPU_CM_REG_DEFINE1(GENERALPURPOSE, generalpurpose); + +static inline u32 dpu_cm_read(struct dpu_soc *dpu, unsigned int offset) +{ + return readl(dpu->cm_reg + offset); +} + +static inline void dpu_cm_write(struct dpu_soc *dpu, + unsigned int offset, u32 value) +{ + writel(value, dpu->cm_reg + offset); +} + +/* Constant Frame Unit */ +static const unsigned long cf_ofss[] = {0x4400, 0x5400, 0x4c00, 0x5c00}; +static const unsigned long cf_pec_ofss[] = {0x960, 0x9e0, 0x9a0, 0xa20}; + +/* Display Engine Configuration Unit */ +static const unsigned long dec_ofss[] = {0xb400, 0xb420}; + +/* External Destination Unit */ +static const unsigned long ed_ofss[] = {0x4800, 0x5800, 0x5000, 0x6000}; +static const unsigned long ed_pec_ofss[] = {0x980, 0xa00, 0x9c0, 0xa40}; + +/* Fetch Decode Unit */ +static const unsigned long fd_ofss[] = {0x6c00, 0x7800}; +static const unsigned long fd_pec_ofss[] = {0xa80, 0xaa0}; + +/* Fetch ECO Unit */ +static const unsigned long fe_ofss[] = {0x7400, 0x8000, 0x6800, 0x1c00}; +static const unsigned long fe_pec_ofss[] = {0xa90, 0xab0, 0xa70, 0x850}; + +/* Frame Generator Unit */ +static const unsigned long fg_ofss[] = {0xb800, 0xd400}; + +/* Fetch Layer Unit */ +static const unsigned long fl_ofss[] = {0x8400}; +static const unsigned long fl_pec_ofss[] = {0xac0}; + +/* Fetch Warp Unit */ +static const unsigned long fw_ofss[] = {0x6400}; +static const unsigned long fw_pec_ofss[] = {0xa60}; + +/* Horizontal Scaler Unit */ +static const unsigned long hs_ofss[] = {0x9000, 0x9c00, 0x3000}; +static const unsigned long hs_pec_ofss[] = {0xb00, 0xb60, 0x8c0}; + +/* Layer Blend Unit */ +static const unsigned long lb_ofss[] = {0xa400, 0xa800, 0xac00, 0xb000}; +static const unsigned long lb_pec_ofss[] = {0xba0, 0xbc0, 0xbe0, 0xc00}; + +/* Store Unit */ +static const unsigned long st_ofss[] = {0x4000}; +static const unsigned long st_pec_ofss[] = {0x940}; + +/* Timing Controller Unit */ +static const unsigned long tcon_ofss[] = {0xcc00, 0xe800}; + +/* Vertical Scaler Unit */ +static const unsigned long vs_ofss[] = {0x9400, 0xa000, 0x3400}; +static const unsigned long vs_pec_ofss[] = {0xb20, 0xb80, 0x8e0}; + +static const struct dpu_unit _cfs = { + .name = "ConstFrame", + .num = ARRAY_SIZE(cf_ids), + .ids = cf_ids, + .pec_ofss = cf_pec_ofss, + .ofss = cf_ofss, +}; + +static const struct dpu_unit _decs = { + .name = "DisEngCfg", + .num = ARRAY_SIZE(dec_ids), + .ids = dec_ids, + .pec_ofss = NULL, + .ofss = dec_ofss, +}; + +static const struct dpu_unit _eds = { + .name = "ExtDst", + .num = ARRAY_SIZE(ed_ids), + .ids = ed_ids, + .pec_ofss = ed_pec_ofss, + .ofss = ed_ofss, +}; + +static const struct dpu_unit _fds = { + .name = "FetchDecode", + .num = ARRAY_SIZE(fd_ids), + .ids = fd_ids, + .pec_ofss = fd_pec_ofss, + .ofss = fd_ofss, + .dprc_ids = fd_dprc_ids, +}; + +static const struct dpu_unit _fes = { + .name = "FetchECO", + .num = ARRAY_SIZE(fe_ids), + .ids = fe_ids, + .pec_ofss = fe_pec_ofss, + .ofss = fe_ofss, +}; + +static const struct dpu_unit _fgs = { + .name = "FrameGen", + .num = ARRAY_SIZE(fg_ids), + .ids = fg_ids, + .pec_ofss = NULL, + .ofss = fg_ofss, +}; + +static const struct dpu_unit _fls = { + .name = "FetchLayer", + .num = ARRAY_SIZE(fl_ids), + .ids = fl_ids, + .pec_ofss = fl_pec_ofss, + .ofss = fl_ofss, + .dprc_ids = fl_dprc_ids, +}; + +static const struct dpu_unit _fws = { + .name = "FetchWarp", + .num = ARRAY_SIZE(fw_ids), + .ids = fw_ids, + .pec_ofss = fw_pec_ofss, + .ofss = fw_ofss, + .dprc_ids = fw_dprc_ids, +}; + +static const struct dpu_unit _hss = { + .name = "HScaler", + .num = ARRAY_SIZE(hs_ids), + .ids = hs_ids, + .pec_ofss = hs_pec_ofss, + .ofss = hs_ofss, +}; + +static const struct dpu_unit _lbs = { + .name = "LayerBlend", + .num = ARRAY_SIZE(lb_ids), + .ids = lb_ids, + .pec_ofss = lb_pec_ofss, + .ofss = lb_ofss, +}; + +static const struct dpu_unit _sts = { + .name = "Store", + .num = ARRAY_SIZE(st_ids), + .ids = st_ids, + .pec_ofss = st_pec_ofss, + .ofss = st_ofss, +}; + +static const struct dpu_unit _tcons = { + .name = "TCon", + .num = ARRAY_SIZE(tcon_ids), + .ids = tcon_ids, + .pec_ofss = NULL, + .ofss = tcon_ofss, +}; + +static const struct dpu_unit _vss = { + .name = "VScaler", + .num = ARRAY_SIZE(vs_ids), + .ids = vs_ids, + .pec_ofss = vs_pec_ofss, + .ofss = vs_ofss, +}; + +static const struct cm_reg_ofs _cm_reg_ofs = { + .ipidentifier = 0, + .lockunlock = 0x40, + .lockstatus = 0x44, + .userinterruptmask = 0x48, + .interruptenable = 0x50, + .interruptpreset = 0x58, + .interruptclear = 0x60, + .interruptstatus = 0x68, + .userinterruptenable = 0x80, + .userinterruptpreset = 0x88, + .userinterruptclear = 0x90, + .userinterruptstatus = 0x98, + .generalpurpose = 0x100, +}; + +static const unsigned long unused_irq[] = {0x00000000, 0xfffe0008}; + +static const struct dpu_data dpu_data_qxp = { + .cm_ofs = 0x0, + .cfs = &_cfs, + .decs = &_decs, + .eds = &_eds, + .fds = &_fds, + .fes = &_fes, + .fgs = &_fgs, + .fls = &_fls, + .fws = &_fws, + .hss = &_hss, + .lbs = &_lbs, + .sts = &_sts, + .tcons = &_tcons, + .vss = &_vss, + .cm_reg_ofs = &_cm_reg_ofs, + .unused_irq = unused_irq, + .plane_src_mask = DPU_PLANE_SRC_FL0_ID | DPU_PLANE_SRC_FW2_ID | + DPU_PLANE_SRC_FD0_ID | DPU_PLANE_SRC_FD1_ID, + .has_dual_ldb = true, + .syncmode_min_prate = UINT_MAX, /* pc is unused */ + .singlemode_max_width = UINT_MAX, /* pc is unused */ +}; + +static const struct dpu_data dpu_data_qm = { + .cm_ofs = 0x0, + .cfs = &_cfs, + .decs = &_decs, + .eds = &_eds, + .fds = &_fds, + .fes = &_fes, + .fgs = &_fgs, + .fls = &_fls, + .fws = &_fws, + .hss = &_hss, + .lbs = &_lbs, + .sts = &_sts, + .tcons = &_tcons, + .vss = &_vss, + .cm_reg_ofs = &_cm_reg_ofs, + .unused_irq = unused_irq, + .plane_src_mask = DPU_PLANE_SRC_FL0_ID | DPU_PLANE_SRC_FW2_ID | + DPU_PLANE_SRC_FD0_ID | DPU_PLANE_SRC_FD1_ID, + .has_dual_ldb = false, + .syncmode_min_prate = 300000, + .singlemode_max_width = 1920, + .master_stream_id = 1, +}; + +static const struct of_device_id dpu_dt_ids[] = { + { + .compatible = "fsl,imx8qxp-dpu", + .data = &dpu_data_qxp, + }, { + .compatible = "fsl,imx8qm-dpu", + .data = &dpu_data_qm, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, dpu_dt_ids); + +unsigned int dpu_get_syncmode_min_prate(struct dpu_soc *dpu) +{ + return dpu->data->syncmode_min_prate; +} +EXPORT_SYMBOL_GPL(dpu_get_syncmode_min_prate); + +unsigned int dpu_get_singlemode_max_width(struct dpu_soc *dpu) +{ + return dpu->data->singlemode_max_width; +} +EXPORT_SYMBOL_GPL(dpu_get_singlemode_max_width); + +unsigned int dpu_get_master_stream_id(struct dpu_soc *dpu) +{ + return dpu->data->master_stream_id; +} +EXPORT_SYMBOL_GPL(dpu_get_master_stream_id); + +bool dpu_vproc_has_fetcheco_cap(u32 cap_mask) +{ + return !!(cap_mask & DPU_VPROC_CAP_FETCHECO); +} +EXPORT_SYMBOL_GPL(dpu_vproc_has_fetcheco_cap); + +bool dpu_vproc_has_hscale_cap(u32 cap_mask) +{ + return !!(cap_mask & DPU_VPROC_CAP_HSCALE); +} +EXPORT_SYMBOL_GPL(dpu_vproc_has_hscale_cap); + +bool dpu_vproc_has_vscale_cap(u32 cap_mask) +{ + return !!(cap_mask & DPU_VPROC_CAP_VSCALE); +} +EXPORT_SYMBOL_GPL(dpu_vproc_has_vscale_cap); + +u32 dpu_vproc_get_fetcheco_cap(u32 cap_mask) +{ + return cap_mask & DPU_VPROC_CAP_FETCHECO; +} +EXPORT_SYMBOL_GPL(dpu_vproc_get_fetcheco_cap); + +u32 dpu_vproc_get_hscale_cap(u32 cap_mask) +{ + return cap_mask & DPU_VPROC_CAP_HSCALE; +} +EXPORT_SYMBOL_GPL(dpu_vproc_get_hscale_cap); + +u32 dpu_vproc_get_vscale_cap(u32 cap_mask) +{ + return cap_mask & DPU_VPROC_CAP_VSCALE; +} +EXPORT_SYMBOL_GPL(dpu_vproc_get_vscale_cap); + +int dpu_format_horz_chroma_subsampling(u32 format) +{ + switch (format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + return 2; + default: + return 1; + } +} + +int dpu_format_vert_chroma_subsampling(u32 format) +{ + switch (format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + return 2; + default: + return 1; + } +} + +int dpu_format_num_planes(u32 format) +{ + switch (format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + return 2; + default: + return 1; + } +} + +int dpu_format_plane_width(int width, u32 format, int plane) +{ + if (plane >= dpu_format_num_planes(format)) + return 0; + + if (plane == 0) + return width; + + return width / dpu_format_horz_chroma_subsampling(format); +} + +int dpu_format_plane_height(int height, u32 format, int plane) +{ + if (plane >= dpu_format_num_planes(format)) + return 0; + + if (plane == 0) + return height; + + return height / dpu_format_vert_chroma_subsampling(format); +} + +static void dpu_detach_pm_domains(struct dpu_soc *dpu) +{ + if (dpu->pd_pll1_link && !IS_ERR(dpu->pd_pll1_link)) + device_link_del(dpu->pd_pll1_link); + if (dpu->pd_pll1_dev && !IS_ERR(dpu->pd_pll1_dev)) + dev_pm_domain_detach(dpu->pd_pll1_dev, true); + + if (dpu->pd_pll0_link && !IS_ERR(dpu->pd_pll0_link)) + device_link_del(dpu->pd_pll0_link); + if (dpu->pd_pll0_dev && !IS_ERR(dpu->pd_pll0_dev)) + dev_pm_domain_detach(dpu->pd_pll0_dev, true); + + if (dpu->pd_dc_link && !IS_ERR(dpu->pd_dc_link)) + device_link_del(dpu->pd_dc_link); + if (dpu->pd_dc_dev && !IS_ERR(dpu->pd_dc_dev)) + dev_pm_domain_detach(dpu->pd_dc_dev, true); + + dpu->pd_dc_dev = NULL; + dpu->pd_dc_link = NULL; + dpu->pd_pll0_dev = NULL; + dpu->pd_pll0_link = NULL; + dpu->pd_pll1_dev = NULL; + dpu->pd_pll1_link = NULL; +} + +static int dpu_attach_pm_domains(struct dpu_soc *dpu) +{ + struct device *dev = dpu->dev; + u32 flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE; + int ret = 0; + + dpu->pd_dc_dev = dev_pm_domain_attach_by_name(dev, "dc"); + if (IS_ERR(dpu->pd_dc_dev)) { + ret = PTR_ERR(dpu->pd_dc_dev); + dev_err(dev, "Failed to attach dc pd dev: %d\n", ret); + goto fail; + } + dpu->pd_dc_link = device_link_add(dev, dpu->pd_dc_dev, flags); + if (IS_ERR(dpu->pd_dc_link)) { + ret = PTR_ERR(dpu->pd_dc_link); + dev_err(dev, "Failed to add device link to dc pd dev: %d\n", + ret); + goto fail; + } + + dpu->pd_pll0_dev = dev_pm_domain_attach_by_name(dev, "pll0"); + if (IS_ERR(dpu->pd_pll0_dev)) { + ret = PTR_ERR(dpu->pd_pll0_dev); + dev_err(dev, "Failed to attach pll0 pd dev: %d\n", ret); + goto fail; + } + dpu->pd_pll0_link = device_link_add(dev, dpu->pd_pll0_dev, flags); + if (IS_ERR(dpu->pd_pll0_link)) { + ret = PTR_ERR(dpu->pd_pll0_link); + dev_err(dev, "Failed to add device link to pll0 pd dev: %d\n", + ret); + goto fail; + } + + dpu->pd_pll1_dev = dev_pm_domain_attach_by_name(dev, "pll1"); + if (IS_ERR(dpu->pd_pll1_dev)) { + ret = PTR_ERR(dpu->pd_pll1_dev); + dev_err(dev, "Failed to attach pll0 pd dev: %d\n", ret); + goto fail; + } + dpu->pd_pll1_link = device_link_add(dev, dpu->pd_pll1_dev, flags); + if (IS_ERR(dpu->pd_pll1_link)) { + ret = PTR_ERR(dpu->pd_pll1_link); + dev_err(dev, "Failed to add device link to pll1 pd dev: %d\n", + ret); + goto fail; + } +fail: + dpu_detach_pm_domains(dpu); + return ret; +} + +#define DPU_UNITS_ADDR_DBG(unit) \ +{ \ + const struct dpu_unit *us = data->unit##s; \ + int i; \ + for (i = 0; i < us->num; i++) { \ + if (us->pec_ofss) { \ + dev_dbg(&pdev->dev, "%s%d: pixengcfg @ 0x%08lx,"\ + " unit @ 0x%08lx\n", us->name, \ + us->ids[i], \ + dpu_base + us->pec_ofss[i], \ + dpu_base + us->ofss[i]); \ + } else { \ + dev_dbg(&pdev->dev, \ + "%s%d: unit @ 0x%08lx\n", us->name, \ + us->ids[i], dpu_base + us->ofss[i]); \ + } \ + } \ +} + +static void dpu_units_addr_dbg(struct dpu_soc *dpu, + struct platform_device *pdev, unsigned long dpu_base) +{ + const struct dpu_data *data = dpu->data; + + dev_dbg(dpu->dev, "Common: 0x%08lx\n", dpu_base + data->cm_ofs); + DPU_UNITS_ADDR_DBG(cf); + DPU_UNITS_ADDR_DBG(dec); + DPU_UNITS_ADDR_DBG(ed); + DPU_UNITS_ADDR_DBG(fd); + DPU_UNITS_ADDR_DBG(fe); + DPU_UNITS_ADDR_DBG(fg); + DPU_UNITS_ADDR_DBG(fl); + DPU_UNITS_ADDR_DBG(fw); + DPU_UNITS_ADDR_DBG(hs); + DPU_UNITS_ADDR_DBG(lb); + DPU_UNITS_ADDR_DBG(st); + DPU_UNITS_ADDR_DBG(tcon); + DPU_UNITS_ADDR_DBG(vs); +} + +static int dpu_get_irq(struct platform_device *pdev, struct dpu_soc *dpu) +{ +#define DPU_GET_IRQ(name) \ +{ \ + dpu->irq_##name = platform_get_irq_byname(pdev, "" #name ""); \ + dev_dbg(dpu->dev, "irq_" #name ": %d\n", dpu->irq_##name); \ + if (dpu->irq_##name < 0) { \ + dev_err(dpu->dev, "failed to get irq " #name "\n"); \ + return dpu->irq_##name; \ + } \ +} + + DPU_GET_IRQ(extdst0_shdload); + DPU_GET_IRQ(extdst4_shdload); + DPU_GET_IRQ(extdst1_shdload); + DPU_GET_IRQ(extdst5_shdload); + DPU_GET_IRQ(disengcfg_shdload0); + DPU_GET_IRQ(disengcfg_framecomplete0); + DPU_GET_IRQ(disengcfg_shdload1); + DPU_GET_IRQ(disengcfg_framecomplete1); + + return 0; +} + +static void dpu_irq_handle(struct irq_desc *desc, enum dpu_irq irq) +{ + struct dpu_soc *dpu = irq_desc_get_handler_data(desc); + const struct dpu_data *data = dpu->data; + const struct cm_reg_ofs *ofs = data->cm_reg_ofs; + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int virq; + u32 status; + + chained_irq_enter(chip, desc); + + status = dpu_cm_read(dpu, USERINTERRUPTSTATUS(ofs, irq / 32)); + status &= dpu_cm_read(dpu, USERINTERRUPTENABLE(ofs, irq / 32)); + + if (status & BIT(irq % 32)) { + virq = irq_linear_revmap(dpu->domain, irq); + if (virq) + generic_handle_irq(virq); + } + + chained_irq_exit(chip, desc); +} + +#define DPU_IRQ_HANDLER_DEFINE(name1, name2) \ +static void dpu_##name1##_irq_handler(struct irq_desc *desc) \ +{ \ + dpu_irq_handle(desc, IRQ_##name2); \ +} + +DPU_IRQ_HANDLER_DEFINE(extdst0_shdload, EXTDST0_SHDLOAD) +DPU_IRQ_HANDLER_DEFINE(extdst4_shdload, EXTDST4_SHDLOAD) +DPU_IRQ_HANDLER_DEFINE(extdst1_shdload, EXTDST1_SHDLOAD) +DPU_IRQ_HANDLER_DEFINE(extdst5_shdload, EXTDST5_SHDLOAD) +DPU_IRQ_HANDLER_DEFINE(disengcfg_shdload0, DISENGCFG_SHDLOAD0) +DPU_IRQ_HANDLER_DEFINE(disengcfg_framecomplete0, DISENGCFG_FRAMECOMPLETE0) +DPU_IRQ_HANDLER_DEFINE(disengcfg_shdload1, DISENGCFG_SHDLOAD1) +DPU_IRQ_HANDLER_DEFINE(disengcfg_framecomplete1, DISENGCFG_FRAMECOMPLETE1) + +int dpu_map_irq(struct dpu_soc *dpu, int irq) +{ + int virq = irq_linear_revmap(dpu->domain, irq); + + if (!virq) + virq = irq_create_mapping(dpu->domain, irq); + + return virq; +} +EXPORT_SYMBOL_GPL(dpu_map_irq); + +static int dpu_irq_init(struct dpu_soc *dpu) +{ + const struct dpu_data *data = dpu->data; + const struct cm_reg_ofs *ofs = data->cm_reg_ofs; + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + int ret, i; + + dpu->domain = irq_domain_add_linear(dpu->dev->of_node, + dpu->irq_line_num, + &irq_generic_chip_ops, dpu); + if (!dpu->domain) { + dev_err(dpu->dev, "failed to add irq domain\n"); + return -ENODEV; + } + + ret = irq_alloc_domain_generic_chips(dpu->domain, 32, 1, "DPU", + handle_level_irq, 0, 0, 0); + if (ret < 0) { + dev_err(dpu->dev, "failed to alloc generic irq chips\n"); + irq_domain_remove(dpu->domain); + return ret; + } + + for (i = 0; i < dpu->irq_line_num; i += 32) { + /* Mask and clear all interrupts */ + dpu_cm_write(dpu, USERINTERRUPTENABLE(ofs, i / 32), 0); + dpu_cm_write(dpu, USERINTERRUPTCLEAR(ofs, i / 32), + ~data->unused_irq[i / 32]); + dpu_cm_write(dpu, INTERRUPTENABLE(ofs, i / 32), 0); + dpu_cm_write(dpu, INTERRUPTCLEAR(ofs, i / 32), + ~data->unused_irq[i / 32]); + + /* Set all interrupts to user mode */ + dpu_cm_write(dpu, USERINTERRUPTMASK(ofs, i / 32), + ~data->unused_irq[i / 32]); + + gc = irq_get_domain_generic_chip(dpu->domain, i); + gc->reg_base = dpu->cm_reg; + gc->unused = data->unused_irq[i / 32]; + ct = gc->chip_types; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask = irq_gc_mask_clr_bit; + ct->chip.irq_unmask = irq_gc_mask_set_bit; + ct->regs.ack = USERINTERRUPTCLEAR(ofs, i / 32); + ct->regs.mask = USERINTERRUPTENABLE(ofs, i / 32); + } + +#define DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(name) \ +irq_set_chained_handler_and_data(dpu->irq_##name, dpu_##name##_irq_handler, dpu) + + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(extdst0_shdload); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(extdst4_shdload); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(extdst1_shdload); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(extdst5_shdload); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(disengcfg_shdload0); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(disengcfg_framecomplete0); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(disengcfg_shdload1); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA1(disengcfg_framecomplete1); + + return 0; +} + +static void dpu_irq_exit(struct dpu_soc *dpu) +{ + unsigned int i, irq; + +#define DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(name) \ +irq_set_chained_handler_and_data(dpu->irq_##name, NULL, NULL) + + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(extdst0_shdload); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(extdst4_shdload); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(extdst1_shdload); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(extdst5_shdload); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(disengcfg_shdload0); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(disengcfg_framecomplete0); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(disengcfg_shdload1); + DPU_IRQ_SET_CHAINED_HANDLER_AND_DATA2(disengcfg_framecomplete1); + + for (i = 0; i < dpu->irq_line_num; i++) { + irq = irq_linear_revmap(dpu->domain, i); + if (irq) + irq_dispose_mapping(irq); + } + + irq_domain_remove(dpu->domain); +} + +#define _DPU_UNITS_INIT(unit) \ +{ \ + const struct dpu_unit *us = data->unit##s; \ + int i; \ + \ + /* software check */ \ + if (WARN_ON(us->num > ARRAY_SIZE(unit##_ids))) \ + return -EINVAL; \ + \ + for (i = 0; i < us->num; i++) \ + _dpu_##unit##_init(dpu, us->ids[i]); \ +} + +static int +_dpu_submodules_init(struct dpu_soc *dpu, struct platform_device *pdev) +{ + const struct dpu_data *data = dpu->data; + + _DPU_UNITS_INIT(cf); + _DPU_UNITS_INIT(dec); + _DPU_UNITS_INIT(ed); + _DPU_UNITS_INIT(fd); + _DPU_UNITS_INIT(fe); + _DPU_UNITS_INIT(fg); + _DPU_UNITS_INIT(fl); + _DPU_UNITS_INIT(fw); + _DPU_UNITS_INIT(hs); + _DPU_UNITS_INIT(lb); + _DPU_UNITS_INIT(st); + _DPU_UNITS_INIT(tcon); + _DPU_UNITS_INIT(vs); + + return 0; +} + +#define DPU_UNIT_INIT(dpu, base, unit, name, id, pec_ofs, ofs) \ +{ \ + int ret; \ + ret = dpu_##unit##_init((dpu), (id), \ + (pec_ofs) ? (base) + (pec_ofs) : 0, \ + (base) + (ofs)); \ + if (ret) { \ + dev_err((dpu)->dev, "init %s%d failed with %d\n", \ + (name), (id), ret); \ + return ret; \ + } \ +} + +#define DPU_UNITS_INIT(unit) \ +{ \ + const struct dpu_unit *us = data->unit##s; \ + int i; \ + \ + /* software check */ \ + if (WARN_ON(us->num > ARRAY_SIZE(unit##_ids))) \ + return -EINVAL; \ + \ + for (i = 0; i < us->num; i++) \ + DPU_UNIT_INIT(dpu, dpu_base, unit, us->name, \ + us->ids[i], \ + us->pec_ofss ? us->pec_ofss[i] : 0, \ + us->ofss[i]); \ +} + +static int dpu_submodules_init(struct dpu_soc *dpu, + struct platform_device *pdev, unsigned long dpu_base) +{ + const struct dpu_data *data = dpu->data; + const struct dpu_unit *fds = data->fds; + const struct dpu_unit *fls = data->fls; + const struct dpu_unit *fws = data->fws; + const struct dpu_unit *tcons = data->tcons; + struct dpu_fetchunit *fu; + struct dprc *dprc; + struct dpu_tcon *tcon; + struct pc *pc; + int i; + + DPU_UNITS_INIT(cf); + DPU_UNITS_INIT(dec); + DPU_UNITS_INIT(ed); + DPU_UNITS_INIT(fd); + DPU_UNITS_INIT(fe); + DPU_UNITS_INIT(fg); + DPU_UNITS_INIT(fl); + DPU_UNITS_INIT(fw); + DPU_UNITS_INIT(hs); + DPU_UNITS_INIT(lb); + DPU_UNITS_INIT(st); + DPU_UNITS_INIT(tcon); + DPU_UNITS_INIT(vs); + + for (i = 0; i < fds->num; i++) { + dprc = dprc_lookup_by_phandle(dpu->dev, "fsl,dpr-channels", + fds->dprc_ids[i]); + if (!dprc) + return -EPROBE_DEFER; + + fu = dpu_fd_get(dpu, i); + fetchunit_get_dprc(fu, dprc); + dpu_fd_put(fu); + } + + for (i = 0; i < fls->num; i++) { + dprc = dprc_lookup_by_phandle(dpu->dev, "fsl,dpr-channels", + fls->dprc_ids[i]); + if (!dprc) + return -EPROBE_DEFER; + + fu = dpu_fl_get(dpu, i); + fetchunit_get_dprc(fu, dprc); + dpu_fl_put(fu); + } + + for (i = 0; i < fws->num; i++) { + dprc = dprc_lookup_by_phandle(dpu->dev, "fsl,dpr-channels", + fws->dprc_ids[i]); + if (!dprc) + return -EPROBE_DEFER; + + fu = dpu_fw_get(dpu, fw_ids[i]); + fetchunit_get_dprc(fu, dprc); + dpu_fw_put(fu); + } + + pc = pc_lookup_by_phandle(dpu->dev, "fsl,pixel-combiner"); + if (!pc) + return -EPROBE_DEFER; + + for (i = 0; i < tcons->num; i++) { + tcon = dpu_tcon_get(dpu, i); + tcon_get_pc(tcon, pc); + dpu_tcon_put(tcon); + } + + return 0; +} + +static int platform_remove_devices_fn(struct device *dev, void *unused) +{ + struct platform_device *pdev = to_platform_device(dev); + + platform_device_unregister(pdev); + + return 0; +} + +static void platform_device_unregister_children(struct platform_device *pdev) +{ + device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn); +} + +struct dpu_platform_reg { + struct dpu_client_platformdata pdata; + const char *name; +}; + +static struct dpu_platform_reg client_reg[] = { + { + .pdata = { + .stream_id = 0, + }, + .name = "imx-dpu-crtc", + }, { + .pdata = { + .stream_id = 1, + }, + .name = "imx-dpu-crtc", + }, { + .pdata = { }, + .name = IMX_DPU_BLITENG_NAME, + } +}; + +static DEFINE_MUTEX(dpu_client_id_mutex); +static int dpu_client_id; + +static int dpu_get_plane_resource(struct dpu_soc *dpu, + struct dpu_plane_res *res) +{ + const struct dpu_unit *fds = dpu->data->fds; + const struct dpu_unit *fls = dpu->data->fls; + const struct dpu_unit *fws = dpu->data->fws; + const struct dpu_unit *lbs = dpu->data->lbs; + struct dpu_plane_grp *grp = plane_res_to_grp(res); + int i; + + for (i = 0; i < ARRAY_SIZE(res->ed); i++) { + res->ed[i] = dpu_ed_get(dpu, i); + if (IS_ERR(res->ed[i])) + return PTR_ERR(res->ed[i]); + } + for (i = 0; i < fds->num; i++) { + res->fd[i] = dpu_fd_get(dpu, i); + if (IS_ERR(res->fd[i])) + return PTR_ERR(res->fd[i]); + } + for (i = 0; i < ARRAY_SIZE(res->fe); i++) { + res->fe[i] = dpu_fe_get(dpu, i); + if (IS_ERR(res->fe[i])) + return PTR_ERR(res->fe[i]); + grp->hw_plane_fetcheco_num = ARRAY_SIZE(res->fe); + } + for (i = 0; i < fls->num; i++) { + res->fl[i] = dpu_fl_get(dpu, i); + if (IS_ERR(res->fl[i])) + return PTR_ERR(res->fl[i]); + } + for (i = 0; i < fws->num; i++) { + res->fw[i] = dpu_fw_get(dpu, fw_ids[i]); + if (IS_ERR(res->fw[i])) + return PTR_ERR(res->fw[i]); + } + /* HScaler could be shared with capture. */ + if (display_plane_video_proc) { + for (i = 0; i < ARRAY_SIZE(res->hs); i++) { + res->hs[i] = dpu_hs_get(dpu, hs_ids[i]); + if (IS_ERR(res->hs[i])) + return PTR_ERR(res->hs[i]); + } + grp->hw_plane_hscaler_num = ARRAY_SIZE(res->hs); + } + for (i = 0; i < lbs->num; i++) { + res->lb[i] = dpu_lb_get(dpu, i); + if (IS_ERR(res->lb[i])) + return PTR_ERR(res->lb[i]); + } + /* VScaler could be shared with capture. */ + if (display_plane_video_proc) { + for (i = 0; i < ARRAY_SIZE(res->vs); i++) { + res->vs[i] = dpu_vs_get(dpu, vs_ids[i]); + if (IS_ERR(res->vs[i])) + return PTR_ERR(res->vs[i]); + } + grp->hw_plane_vscaler_num = ARRAY_SIZE(res->vs); + } + + grp->hw_plane_num = fds->num + fls->num + fws->num; + + return 0; +} + +static void dpu_put_plane_resource(struct dpu_plane_res *res) +{ + struct dpu_plane_grp *grp = plane_res_to_grp(res); + int i; + + for (i = 0; i < ARRAY_SIZE(res->ed); i++) { + if (!IS_ERR_OR_NULL(res->ed[i])) + dpu_ed_put(res->ed[i]); + } + for (i = 0; i < ARRAY_SIZE(res->fd); i++) { + if (!IS_ERR_OR_NULL(res->fd[i])) + dpu_fd_put(res->fd[i]); + } + for (i = 0; i < ARRAY_SIZE(res->fe); i++) { + if (!IS_ERR_OR_NULL(res->fe[i])) + dpu_fe_put(res->fe[i]); + } + for (i = 0; i < ARRAY_SIZE(res->fl); i++) { + if (!IS_ERR_OR_NULL(res->fl[i])) + dpu_fl_put(res->fl[i]); + } + for (i = 0; i < ARRAY_SIZE(res->fw); i++) { + if (!IS_ERR_OR_NULL(res->fw[i])) + dpu_fw_put(res->fw[i]); + } + for (i = 0; i < ARRAY_SIZE(res->hs); i++) { + if (!IS_ERR_OR_NULL(res->hs[i])) + dpu_hs_put(res->hs[i]); + } + for (i = 0; i < ARRAY_SIZE(res->lb); i++) { + if (!IS_ERR_OR_NULL(res->lb[i])) + dpu_lb_put(res->lb[i]); + } + for (i = 0; i < ARRAY_SIZE(res->vs); i++) { + if (!IS_ERR_OR_NULL(res->vs[i])) + dpu_vs_put(res->vs[i]); + } + + grp->hw_plane_num = 0; +} + +static int dpu_add_client_devices(struct dpu_soc *dpu) +{ + const struct dpu_data *data = dpu->data; + struct device *dev = dpu->dev; + struct dpu_platform_reg *reg; + struct dpu_plane_grp *plane_grp; + struct dpu_store *st9 = NULL; + size_t client_num, reg_size; + int i, id, ret; + + client_num = ARRAY_SIZE(client_reg); + + reg = devm_kcalloc(dev, client_num, sizeof(*reg), GFP_KERNEL); + if (!reg) + return -ENODEV; + + plane_grp = devm_kzalloc(dev, sizeof(*plane_grp), GFP_KERNEL); + if (!plane_grp) + return -ENODEV; + + mutex_init(&plane_grp->mutex); + + mutex_lock(&dpu_client_id_mutex); + id = dpu_client_id; + dpu_client_id += client_num; + mutex_unlock(&dpu_client_id_mutex); + + reg_size = client_num * sizeof(struct dpu_platform_reg); + memcpy(reg, &client_reg[0], reg_size); + + plane_grp->src_mask = data->plane_src_mask; + plane_grp->id = id / client_num; + plane_grp->has_vproc = display_plane_video_proc; + + ret = dpu_get_plane_resource(dpu, &plane_grp->res); + if (ret) + goto err_get_plane_res; + + st9 = dpu_st_get(dpu, 9); + if (IS_ERR(st9)) { + ret = PTR_ERR(st9); + goto err_get_plane_res; + } + + for (i = 0; i < client_num; i++) { + struct platform_device *pdev; + struct device_node *of_node = NULL; + + if (!strcmp(reg[i].name, IMX_DPU_BLITENG_NAME)) { + /* As bliteng has no of_node, so to use dpu's. */ + of_node = dev->of_node; + } else { + /* Associate subdevice with the corresponding port node. */ + of_node = of_graph_get_port_by_id(dev->of_node, i); + if (!of_node) { + dev_info(dev, + "no port@%d node in %s, not using DISP%d\n", + i, dev->of_node->full_name, i); + continue; + } + } + + reg[i].pdata.plane_grp = plane_grp; + reg[i].pdata.di_grp_id = plane_grp->id; + reg[i].pdata.st9 = st9; + + pdev = platform_device_alloc(reg[i].name, id++); + if (!pdev) { + ret = -ENOMEM; + goto err_register; + } + + pdev->dev.parent = dev; + + reg[i].pdata.of_node = of_node; + ret = platform_device_add_data(pdev, ®[i].pdata, + sizeof(reg[i].pdata)); + if (!ret) + ret = platform_device_add(pdev); + if (ret) { + platform_device_put(pdev); + goto err_register; + } + } + + return 0; + +err_register: + platform_device_unregister_children(to_platform_device(dev)); + dpu_st_put(st9); +err_get_plane_res: + dpu_put_plane_resource(&plane_grp->res); + + return ret; +} + +static int dpu_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id = + of_match_device(dpu_dt_ids, &pdev->dev); + struct device_node *np = pdev->dev.of_node; + struct dpu_soc *dpu; + struct resource *res; + unsigned long dpu_base; + const struct dpu_data *data = of_id->data; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + dpu_base = res->start; + + dpu = devm_kzalloc(&pdev->dev, sizeof(*dpu), GFP_KERNEL); + if (!dpu) + return -ENODEV; + + dpu->dev = &pdev->dev; + dpu->data = data; + dpu->id = of_alias_get_id(np, "dpu"); + dpu->irq_line_num = of_irq_count(np); + + dpu_units_addr_dbg(dpu, pdev, dpu_base); + + ret = dpu_get_irq(pdev, dpu); + if (ret < 0) + return ret; + + ret = dpu_sc_misc_get_handle(dpu); + if (ret < 0) + return ret; + + spin_lock_init(&dpu->lock); + + dpu->cm_reg = devm_ioremap(dpu->dev, dpu_base + data->cm_ofs, SZ_1K); + if (!dpu->cm_reg) + return -ENOMEM; + + ret = dpu_attach_pm_domains(dpu); + if (ret) + return ret; + + ret = dpu_irq_init(dpu); + if (ret) + goto failed_irq; + + ret = dpu_submodules_init(dpu, pdev, dpu_base); + if (ret) + goto failed_submodules_init; + + ret = dpu_sc_misc_init(dpu); + if (ret < 0) { + dev_err(dpu->dev, + "failed to initialize pixel link %d\n", ret); + goto failed_sc_misc_init; + } + + platform_set_drvdata(pdev, dpu); + + ret = dpu_add_client_devices(dpu); + if (ret) { + dev_err(dpu->dev, + "adding client devices failed with %d\n", ret); + goto failed_add_clients; + } + + dev_info(dpu->dev, "driver probed\n"); + + return 0; + +failed_add_clients: + platform_set_drvdata(pdev, NULL); +failed_sc_misc_init: +failed_submodules_init: + dpu_irq_exit(dpu); +failed_irq: + dpu_detach_pm_domains(dpu); + return ret; +} + +static int dpu_remove(struct platform_device *pdev) +{ + struct dpu_soc *dpu = platform_get_drvdata(pdev); + + platform_device_unregister_children(pdev); + + dpu_irq_exit(dpu); + dpu_detach_pm_domains(dpu); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int dpu_suspend(struct device *dev) +{ + /* + * The dpu core driver currently depends on the client drivers + * to do suspend operations to leave dpu a cleaned up state + * machine status before the system enters sleep mode. + */ + return 0; +} + +static int dpu_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dpu_soc *dpu = platform_get_drvdata(pdev); + + dpu_sc_misc_init(dpu); + + _dpu_submodules_init(dpu, pdev); + + return 0; +} +#endif + +static const struct dev_pm_ops dpu_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(dpu_suspend, dpu_resume) +}; + +static struct platform_driver dpu_driver = { + .driver = { + .pm = &dpu_pm_ops, + .name = "dpu-core", + .of_match_table = dpu_dt_ids, + }, + .probe = dpu_probe, + .remove = dpu_remove, +}; + +module_platform_driver(dpu_driver); + +MODULE_DESCRIPTION("i.MX DPU driver"); +MODULE_AUTHOR("Freescale Semiconductor, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/imx/dpu/dpu-constframe.c b/drivers/gpu/imx/dpu/dpu-constframe.c new file mode 100644 index 000000000000..26c7f85fa67a --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-constframe.c @@ -0,0 +1,253 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +static unsigned int safety_stream_cf_color = 0x0; +module_param(safety_stream_cf_color, uint, 0444); +MODULE_PARM_DESC(safety_stream_cf_color, +"Safety stream constframe color in hex(0xRRGGBBAA) [default=0x00000000]"); + +#define FRAMEDIMENSIONS 0xC +#define WIDTH(w) (((w) - 1) & 0x3FFF) +#define HEIGHT(h) ((((h) - 1) & 0x3FFF) << 16) +#define CONSTANTCOLOR 0x10 +#define RED(r) (((r) & 0xFF) << 24) +#define GREEN(g) (((g) & 0xFF) << 16) +#define BLUE(b) (((b) & 0xFF) << 8) +#define ALPHA(a) ((a) & 0xFF) +#define CONTROLTRIGGER 0x14 +#define START 0x18 +#define STATUS 0x1C + +struct dpu_constframe { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; +}; + +static inline u32 dpu_cf_read(struct dpu_constframe *cf, unsigned int offset) +{ + return readl(cf->base + offset); +} + +static inline void dpu_cf_write(struct dpu_constframe *cf, + unsigned int offset, u32 value) +{ + writel(value, cf->base + offset); +} + +void constframe_shden(struct dpu_constframe *cf, bool enable) +{ + u32 val; + + val = enable ? SHDEN : 0; + + mutex_lock(&cf->mutex); + dpu_cf_write(cf, STATICCONTROL, val); + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(constframe_shden); + +void constframe_framedimensions(struct dpu_constframe *cf, unsigned int w, + unsigned int h) +{ + u32 val; + + val = WIDTH(w) | HEIGHT(h); + + mutex_lock(&cf->mutex); + dpu_cf_write(cf, FRAMEDIMENSIONS, val); + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(constframe_framedimensions); + +void constframe_framedimensions_copy_prim(struct dpu_constframe *cf) +{ + struct dpu_constframe *prim_cf = NULL; + unsigned int prim_id; + int i; + u32 val; + + if (cf->id != 0 && cf->id != 1) { + dev_warn(cf->dpu->dev, "ConstFrame%d is not a secondary one\n", + cf->id); + return; + } + + prim_id = cf->id + 4; + + for (i = 0; i < ARRAY_SIZE(cf_ids); i++) + if (cf_ids[i] == prim_id) + prim_cf = cf->dpu->cf_priv[i]; + + if (!prim_cf) { + dev_warn(cf->dpu->dev, "cannot find ConstFrame%d's primary peer\n", + cf->id); + return; + } + + mutex_lock(&cf->mutex); + val = dpu_cf_read(prim_cf, FRAMEDIMENSIONS); + dpu_cf_write(cf, FRAMEDIMENSIONS, val); + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(constframe_framedimensions_copy_prim); + +void constframe_constantcolor(struct dpu_constframe *cf, unsigned int r, + unsigned int g, unsigned int b, unsigned int a) +{ + u32 val; + + val = RED(r) | GREEN(g) | BLUE(b) | ALPHA(a); + + mutex_lock(&cf->mutex); + dpu_cf_write(cf, CONSTANTCOLOR, val); + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(constframe_constantcolor); + +void constframe_controltrigger(struct dpu_constframe *cf, bool trigger) +{ + u32 val; + + val = trigger ? SHDTOKGEN : 0; + + mutex_lock(&cf->mutex); + dpu_cf_write(cf, CONTROLTRIGGER, val); + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(constframe_controltrigger); + +struct dpu_constframe *dpu_cf_get(struct dpu_soc *dpu, int id) +{ + struct dpu_constframe *cf; + int i; + + for (i = 0; i < ARRAY_SIZE(cf_ids); i++) + if (cf_ids[i] == id) + break; + + if (i == ARRAY_SIZE(cf_ids)) + return ERR_PTR(-EINVAL); + + cf = dpu->cf_priv[i]; + + mutex_lock(&cf->mutex); + + if (cf->inuse) { + mutex_unlock(&cf->mutex); + return ERR_PTR(-EBUSY); + } + + cf->inuse = true; + + mutex_unlock(&cf->mutex); + + return cf; +} +EXPORT_SYMBOL_GPL(dpu_cf_get); + +void dpu_cf_put(struct dpu_constframe *cf) +{ + mutex_lock(&cf->mutex); + + cf->inuse = false; + + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(dpu_cf_put); + +struct dpu_constframe *dpu_aux_cf_peek(struct dpu_constframe *cf) +{ + unsigned int aux_id = cf->id ^ 1; + int i; + + for (i = 0; i < ARRAY_SIZE(cf_ids); i++) + if (cf_ids[i] == aux_id) + return cf->dpu->cf_priv[i]; + + return NULL; +} +EXPORT_SYMBOL_GPL(dpu_aux_cf_peek); + +void _dpu_cf_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_constframe *cf; + int i; + + for (i = 0; i < ARRAY_SIZE(cf_ids); i++) + if (cf_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(cf_ids))) + return; + + cf = dpu->cf_priv[i]; + + constframe_shden(cf, true); + + if (id == 4 || id == 5) { + mutex_lock(&cf->mutex); + dpu_cf_write(cf, CONSTANTCOLOR, safety_stream_cf_color); + mutex_unlock(&cf->mutex); + } +} + +int dpu_cf_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_constframe *cf; + int i; + + cf = devm_kzalloc(dpu->dev, sizeof(*cf), GFP_KERNEL); + if (!cf) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(cf_ids); i++) + if (cf_ids[i] == id) + break; + + if (i == ARRAY_SIZE(cf_ids)) + return -EINVAL; + + dpu->cf_priv[i] = cf; + + cf->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16); + if (!cf->pec_base) + return -ENOMEM; + + cf->base = devm_ioremap(dpu->dev, base, SZ_32); + if (!cf->base) + return -ENOMEM; + + cf->dpu = dpu; + cf->id = id; + + mutex_init(&cf->mutex); + + _dpu_cf_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-disengcfg.c b/drivers/gpu/imx/dpu/dpu-disengcfg.c new file mode 100644 index 000000000000..1885dbf4bec9 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-disengcfg.c @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <drm/drm_mode.h> +#include <linux/io.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include "dpu-prv.h" + +#define CLOCKCTRL 0x8 +typedef enum { + DSPCLKDIVIDE__DIV1, /* Ext disp clk signal has pix clk freq. */ + DSPCLKDIVIDE__DIV2, /* Ext disp clk signal has 2x the pix clk freq. */ +} clkdivide_t; +#define POLARITYCTRL 0xC +#define POLHS_HIGH BIT(0) +#define POLVS_HIGH BIT(1) +#define POLEN_HIGH BIT(2) +#define PIXINV_INV BIT(3) +#define SRCSELECT 0x10 + +struct dpu_disengcfg { + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; +}; + +static inline u32 dpu_dec_read(struct dpu_disengcfg *dec, unsigned int offset) +{ + return readl(dec->base + offset); +} + +static inline void dpu_dec_write(struct dpu_disengcfg *dec, + unsigned int offset, u32 value) +{ + writel(value, dec->base + offset); +} + +struct dpu_disengcfg *dpu_dec_get(struct dpu_soc *dpu, int id) +{ + struct dpu_disengcfg *dec; + int i; + + for (i = 0; i < ARRAY_SIZE(dec_ids); i++) + if (dec_ids[i] == id) + break; + + if (i == ARRAY_SIZE(dec_ids)) + return ERR_PTR(-EINVAL); + + dec = dpu->dec_priv[i]; + + mutex_lock(&dec->mutex); + + if (dec->inuse) { + mutex_unlock(&dec->mutex); + return ERR_PTR(-EBUSY); + } + + dec->inuse = true; + + mutex_unlock(&dec->mutex); + + return dec; +} +EXPORT_SYMBOL_GPL(dpu_dec_get); + +void dpu_dec_put(struct dpu_disengcfg *dec) +{ + mutex_lock(&dec->mutex); + + dec->inuse = false; + + mutex_unlock(&dec->mutex); +} +EXPORT_SYMBOL_GPL(dpu_dec_put); + +struct dpu_disengcfg *dpu_aux_dec_peek(struct dpu_disengcfg *dec) +{ + return dec->dpu->dec_priv[dec->id ^ 1]; +} +EXPORT_SYMBOL_GPL(dpu_aux_dec_peek); + +void _dpu_dec_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_disengcfg *dec; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(dec_ids); i++) + if (ed_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(dec_ids))) + return; + + dec = dpu->dec_priv[i]; + + val = dpu_dec_read(dec, POLARITYCTRL); + val &= ~POLHS_HIGH; + val &= ~POLVS_HIGH; + dpu_dec_write(dec, POLARITYCTRL, val); +} + +int dpu_dec_init(struct dpu_soc *dpu, unsigned int id, + unsigned long unused, unsigned long base) +{ + struct dpu_disengcfg *dec; + + dec = devm_kzalloc(dpu->dev, sizeof(*dec), GFP_KERNEL); + if (!dec) + return -ENOMEM; + + dpu->dec_priv[id] = dec; + + dec->base = devm_ioremap(dpu->dev, base, SZ_16); + if (!dec->base) + return -ENOMEM; + + dec->dpu = dpu; + dec->id = id; + mutex_init(&dec->mutex); + + _dpu_dec_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-extdst.c b/drivers/gpu/imx/dpu/dpu-extdst.c new file mode 100644 index 000000000000..013e03a2537e --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-extdst.c @@ -0,0 +1,521 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_STATIC 0x8 +#define POWERDOWN BIT(4) +#define SYNC_MODE BIT(8) +#define SW_RESET BIT(11) +#define DIV(n) (((n) & 0xFF) << 16) +#define DIV_RESET 0x80 +#define PIXENGCFG_DYNAMIC 0xC +#define PIXENGCFG_REQUEST 0x10 +#define SHDLDREQ(n) BIT(n) +#define SEL_SHDLDREQ BIT(0) +#define PIXENGCFG_TRIGGER 0x14 +#define SYNC_TRIGGER BIT(0) +#define TRIGGER_SEQUENCE_COMPLETE BIT(4) +#define PIXENGCFG_STATUS 0x18 +#define SYNC_BUSY BIT(8) +#define KICK_MODE BIT(8) +#define PERFCOUNTMODE BIT(12) +#define CONTROL 0xC +#define GAMMAAPPLYENABLE BIT(0) +#define SOFTWAREKICK 0x10 +#define KICK BIT(0) +#define STATUS 0x14 +#define CNT_ERR_STS BIT(0) +#define CONTROLWORD 0x18 +#define CURPIXELCNT 0x1C +static u16 get_xval(u32 pixel_cnt) +{ + return pixel_cnt & 0xFFFF; +} + +static u16 get_yval(u32 pixel_cnt) +{ + return pixel_cnt >> 16; +} +#define LASTPIXELCNT 0x20 +#define PERFCOUNTER 0x24 + +struct dpu_extdst { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; +}; + +static inline u32 dpu_pec_ed_read(struct dpu_extdst *ed, unsigned int offset) +{ + return readl(ed->pec_base + offset); +} + +static inline void dpu_pec_ed_write(struct dpu_extdst *ed, + unsigned int offset, u32 value) +{ + writel(value, ed->pec_base + offset); +} + +static inline u32 dpu_ed_read(struct dpu_extdst *ed, unsigned int offset) +{ + return readl(ed->base + offset); +} + +static inline void dpu_ed_write(struct dpu_extdst *ed, + unsigned int offset, u32 value) +{ + writel(value, ed->base + offset); +} + +static inline bool dpu_ed_is_safety_stream(struct dpu_extdst *ed) +{ + if (ed->id == 4 || ed->id == 5) + return true; + + return false; +} + +void extdst_pixengcfg_shden(struct dpu_extdst *ed, bool enable) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_shden); + +void extdst_pixengcfg_powerdown(struct dpu_extdst *ed, bool powerdown) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + if (powerdown) + val |= POWERDOWN; + else + val &= ~POWERDOWN; + dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_powerdown); + +void extdst_pixengcfg_sync_mode(struct dpu_extdst *ed, ed_sync_mode_t mode) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + if (mode == AUTO) + val |= SYNC_MODE; + else + val &= ~SYNC_MODE; + dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_sync_mode); + +void extdst_pixengcfg_reset(struct dpu_extdst *ed, bool reset) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + if (reset) + val |= SW_RESET; + else + val &= ~SW_RESET; + dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_reset); + +void extdst_pixengcfg_div(struct dpu_extdst *ed, u16 div) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + val &= ~0xFF0000; + val |= DIV(div); + dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_div); + +void extdst_pixengcfg_syncmode_master(struct dpu_extdst *ed, bool enable) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + if (enable) + val |= BIT(16); + else + val &= ~BIT(16); + dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_syncmode_master); + +int extdst_pixengcfg_src_sel(struct dpu_extdst *ed, extdst_src_sel_t src) +{ + mutex_lock(&ed->mutex); + dpu_pec_ed_write(ed, PIXENGCFG_DYNAMIC, src); + mutex_unlock(&ed->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_src_sel); + +void extdst_pixengcfg_sel_shdldreq(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_REQUEST); + val |= SEL_SHDLDREQ; + dpu_pec_ed_write(ed, PIXENGCFG_REQUEST, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_sel_shdldreq); + +void extdst_pixengcfg_shdldreq(struct dpu_extdst *ed, u32 req_mask) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_REQUEST); + val |= req_mask; + dpu_pec_ed_write(ed, PIXENGCFG_REQUEST, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_shdldreq); + +void extdst_pixengcfg_sync_trigger(struct dpu_extdst *ed) +{ + mutex_lock(&ed->mutex); + dpu_pec_ed_write(ed, PIXENGCFG_TRIGGER, SYNC_TRIGGER); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_sync_trigger); + +void extdst_pixengcfg_trigger_sequence_complete(struct dpu_extdst *ed) +{ + mutex_lock(&ed->mutex); + dpu_pec_ed_write(ed, PIXENGCFG_TRIGGER, TRIGGER_SEQUENCE_COMPLETE); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_trigger_sequence_complete); + +bool extdst_pixengcfg_is_sync_busy(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATUS); + mutex_unlock(&ed->mutex); + + return val & SYNC_BUSY; +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_is_sync_busy); + +ed_pipeline_status_t extdst_pixengcfg_pipeline_status(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATUS); + mutex_unlock(&ed->mutex); + + return val & 0x3; +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_pipeline_status); + +void extdst_shden(struct dpu_extdst *ed, bool enable) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, STATICCONTROL); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_ed_write(ed, STATICCONTROL, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_shden); + +void extdst_kick_mode(struct dpu_extdst *ed, ed_kick_mode_t mode) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, STATICCONTROL); + val &= ~KICK_MODE; + val |= mode; + dpu_ed_write(ed, STATICCONTROL, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_kick_mode); + +void extdst_perfcountmode(struct dpu_extdst *ed, bool enable) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, STATICCONTROL); + if (enable) + val |= PERFCOUNTMODE; + else + val &= ~PERFCOUNTMODE; + dpu_ed_write(ed, STATICCONTROL, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_perfcountmode); + +void extdst_gamma_apply_enable(struct dpu_extdst *ed, bool enable) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, CONTROL); + if (enable) + val |= GAMMAAPPLYENABLE; + else + val &= ~GAMMAAPPLYENABLE; + dpu_ed_write(ed, CONTROL, val); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_gamma_apply_enable); + +void extdst_kick(struct dpu_extdst *ed) +{ + mutex_lock(&ed->mutex); + dpu_ed_write(ed, SOFTWAREKICK, KICK); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_kick); + +void extdst_cnt_err_clear(struct dpu_extdst *ed) +{ + mutex_lock(&ed->mutex); + dpu_ed_write(ed, STATUS, CNT_ERR_STS); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_cnt_err_clear); + +bool extdst_cnt_err_status(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, STATUS); + mutex_unlock(&ed->mutex); + + return val & CNT_ERR_STS; +} +EXPORT_SYMBOL_GPL(extdst_cnt_err_status); + +u32 extdst_last_control_word(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, CONTROLWORD); + mutex_unlock(&ed->mutex); + + return val; +} +EXPORT_SYMBOL_GPL(extdst_last_control_word); + +void extdst_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, CURPIXELCNT); + mutex_unlock(&ed->mutex); + + *x = get_xval(val); + *y = get_yval(val); +} +EXPORT_SYMBOL_GPL(extdst_pixel_cnt); + +void extdst_last_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, LASTPIXELCNT); + mutex_unlock(&ed->mutex); + + *x = get_xval(val); + *y = get_yval(val); +} +EXPORT_SYMBOL_GPL(extdst_last_pixel_cnt); + +u32 extdst_perfresult(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, PERFCOUNTER); + mutex_unlock(&ed->mutex); + + return val; +} +EXPORT_SYMBOL_GPL(extdst_perfresult); + +bool extdst_is_master(struct dpu_extdst *ed) +{ + const struct dpu_data *data = ed->dpu->data; + + return ed->id == data->master_stream_id; +} +EXPORT_SYMBOL_GPL(extdst_is_master); + +struct dpu_extdst *dpu_ed_get(struct dpu_soc *dpu, int id) +{ + struct dpu_extdst *ed; + int i; + + for (i = 0; i < ARRAY_SIZE(ed_ids); i++) + if (ed_ids[i] == id) + break; + + if (i == ARRAY_SIZE(ed_ids)) + return ERR_PTR(-EINVAL); + + ed = dpu->ed_priv[i]; + + mutex_lock(&ed->mutex); + + if (ed->inuse) { + mutex_unlock(&ed->mutex); + return ERR_PTR(-EBUSY); + } + + ed->inuse = true; + + mutex_unlock(&ed->mutex); + + return ed; +} +EXPORT_SYMBOL_GPL(dpu_ed_get); + +void dpu_ed_put(struct dpu_extdst *ed) +{ + mutex_lock(&ed->mutex); + + ed->inuse = false; + + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(dpu_ed_put); + +struct dpu_extdst *dpu_aux_ed_peek(struct dpu_extdst *ed) +{ + unsigned int aux_id = ed->id ^ 1; + int i; + + for (i = 0; i < ARRAY_SIZE(ed_ids); i++) + if (ed_ids[i] == aux_id) + return ed->dpu->ed_priv[i]; + + return NULL; +} +EXPORT_SYMBOL_GPL(dpu_aux_ed_peek); + +void _dpu_ed_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_extdst *ed; + int i; + + for (i = 0; i < ARRAY_SIZE(ed_ids); i++) + if (ed_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(ed_ids))) + return; + + ed = dpu->ed_priv[i]; + + extdst_pixengcfg_src_sel(ed, ED_SRC_DISABLE); + extdst_pixengcfg_shden(ed, true); + extdst_pixengcfg_powerdown(ed, false); + extdst_pixengcfg_sync_mode(ed, SINGLE); + extdst_pixengcfg_reset(ed, false); + extdst_pixengcfg_div(ed, DIV_RESET); + extdst_shden(ed, true); + extdst_perfcountmode(ed, false); + extdst_kick_mode(ed, EXTERNAL); +} + +int dpu_ed_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_extdst *ed; + int ret, i; + + ed = devm_kzalloc(dpu->dev, sizeof(*ed), GFP_KERNEL); + if (!ed) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(ed_ids); i++) + if (ed_ids[i] == id) + break; + + if (i == ARRAY_SIZE(ed_ids)) + return -EINVAL; + + dpu->ed_priv[i] = ed; + + ed->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_32); + if (!ed->pec_base) + return -ENOMEM; + + ed->base = devm_ioremap(dpu->dev, base, SZ_64); + if (!ed->base) + return -ENOMEM; + + ed->dpu = dpu; + ed->id = id; + mutex_init(&ed->mutex); + + ret = extdst_pixengcfg_src_sel(ed, ED_SRC_DISABLE); + if (ret < 0) + return ret; + + _dpu_ed_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-fetchdecode.c b/drivers/gpu/imx/dpu/dpu-fetchdecode.c new file mode 100644 index 000000000000..fa9076c1be37 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-fetchdecode.c @@ -0,0 +1,676 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <drm/drm_blend.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +static const u32 fd_vproc_cap[2] = { + DPU_VPROC_CAP_HSCALER4 | DPU_VPROC_CAP_VSCALER4 | + DPU_VPROC_CAP_FETCHECO0, + DPU_VPROC_CAP_HSCALER5 | DPU_VPROC_CAP_VSCALER5 | + DPU_VPROC_CAP_FETCHECO1, +}; + +#define PIXENGCFG_DYNAMIC 0x8 +static const fd_dynamic_src_sel_t fd_srcs[2][4] = { + { + FD_SRC_DISABLE, FD_SRC_FETCHECO0, + FD_SRC_FETCHDECODE1, FD_SRC_FETCHWARP2 + }, { + FD_SRC_DISABLE, FD_SRC_FETCHECO1, + FD_SRC_FETCHDECODE0, FD_SRC_FETCHWARP2 + }, +}; + +#define PIXENGCFG_STATUS 0xC + +#define RINGBUFSTARTADDR0 0x10 +#define RINGBUFWRAPADDR0 0x14 +#define FRAMEPROPERTIES0 0x18 +#define BASEADDRESS0 0x1C +#define SOURCEBUFFERATTRIBUTES0 0x20 +#define SOURCEBUFFERDIMENSION0 0x24 +#define COLORCOMPONENTBITS0 0x28 +#define COLORCOMPONENTSHIFT0 0x2C +#define LAYEROFFSET0 0x30 +#define CLIPWINDOWOFFSET0 0x34 +#define CLIPWINDOWDIMENSIONS0 0x38 +#define CONSTANTCOLOR0 0x3C +#define LAYERPROPERTY0 0x40 +#define FRAMEDIMENSIONS 0x44 +#define FRAMERESAMPLING 0x48 +#define DECODECONTROL 0x4C +#define SOURCEBUFFERLENGTH 0x50 +#define CONTROL 0x54 +#define CONTROLTRIGGER 0x58 +#define START 0x5C +#define FETCHTYPE 0x60 +#define DECODERSTATUS 0x64 +#define READADDRESS0 0x68 +#define BURSTBUFFERPROPERTIES 0x6C +#define STATUS 0x70 +#define HIDDENSTATUS 0x74 + +struct dpu_fetchdecode { + struct dpu_fetchunit fu; + fetchtype_t fetchtype; +}; + +int fetchdecode_pixengcfg_dynamic_src_sel(struct dpu_fetchunit *fu, + fd_dynamic_src_sel_t src) +{ + int i; + + mutex_lock(&fu->mutex); + for (i = 0; i < 4; i++) { + if (fd_srcs[fu->id][i] == src) { + dpu_pec_fu_write(fu, PIXENGCFG_DYNAMIC, src); + mutex_unlock(&fu->mutex); + return 0; + } + } + mutex_unlock(&fu->mutex); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(fetchdecode_pixengcfg_dynamic_src_sel); + +static void +fetchdecode_set_baseaddress(struct dpu_fetchunit *fu, unsigned int width, + unsigned int x_offset, unsigned int y_offset, + unsigned int mt_w, unsigned int mt_h, + int bpp, dma_addr_t baddr) +{ + unsigned int burst_size, stride; + bool nonzero_mod = !!mt_w; + + if (nonzero_mod) { + /* consider PRG x offset to calculate buffer address */ + baddr += (x_offset % mt_w) * (bpp / 8); + + burst_size = fetchunit_burst_size_fixup_tkt343664(baddr); + + stride = width * (bpp / 8); + stride = fetchunit_stride_fixup_tkt339017(stride, burst_size, + baddr, nonzero_mod); + + /* consider PRG y offset to calculate buffer address */ + baddr += (y_offset % mt_h) * stride; + } + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, BASEADDRESS0, baddr); + mutex_unlock(&fu->mutex); +} + +static void fetchdecode_set_src_bpp(struct dpu_fetchunit *fu, int bpp) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES0); + val &= ~0x3f0000; + val |= BITSPERPIXEL(bpp); + dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES0, val); + mutex_unlock(&fu->mutex); +} + +static void +fetchdecode_set_src_stride(struct dpu_fetchunit *fu, + unsigned int width, unsigned int x_offset, + unsigned int mt_w, int bpp, unsigned int stride, + dma_addr_t baddr, bool use_prefetch) +{ + unsigned int burst_size; + bool nonzero_mod = !!mt_w; + u32 val; + + if (use_prefetch) { + /* consider PRG x offset to calculate buffer address */ + if (nonzero_mod) + baddr += (x_offset % mt_w) * (bpp / 8); + + burst_size = fetchunit_burst_size_fixup_tkt343664(baddr); + + stride = width * (bpp / 8); + stride = fetchunit_stride_fixup_tkt339017(stride, burst_size, + baddr, nonzero_mod); + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES0); + val &= ~0xffff; + val |= STRIDE(stride); + dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES0, val); + mutex_unlock(&fu->mutex); +} + +static void +fetchdecode_set_src_buf_dimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + u32 unused, bool deinterlace) +{ + u32 val; + + if (deinterlace) + h /= 2; + + val = LINEWIDTH(w) | LINECOUNT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SOURCEBUFFERDIMENSION0, val); + mutex_unlock(&fu->mutex); +} + +static void fetchdecode_set_fmt(struct dpu_fetchunit *fu, + u32 fmt, + enum drm_color_encoding color_encoding, + enum drm_color_range color_range, + bool deinterlace) +{ + u32 val, bits, shift; + bool is_planar_yuv = false, is_rastermode_yuv422 = false; + bool is_yuv422upsamplingmode_interpolate = false; + bool is_inputselect_compact = false; + bool need_csc = false; + int i; + + switch (fmt) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + is_rastermode_yuv422 = true; + is_yuv422upsamplingmode_interpolate = true; + need_csc = true; + break; + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + is_yuv422upsamplingmode_interpolate = true; + /* fall-through */ + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + if (deinterlace) + is_yuv422upsamplingmode_interpolate = true; + is_planar_yuv = true; + is_rastermode_yuv422 = true; + is_inputselect_compact = true; + need_csc = true; + break; + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + is_planar_yuv = true; + is_yuv422upsamplingmode_interpolate = true; + is_inputselect_compact = true; + need_csc = true; + break; + default: + break; + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, CONTROL); + val &= ~YUV422UPSAMPLINGMODE_MASK; + val &= ~INPUTSELECT_MASK; + val &= ~RASTERMODE_MASK; + if (is_yuv422upsamplingmode_interpolate) + val |= YUV422UPSAMPLINGMODE(YUV422UPSAMPLINGMODE__INTERPOLATE); + else + val |= YUV422UPSAMPLINGMODE(YUV422UPSAMPLINGMODE__REPLICATE); + if (is_inputselect_compact) + val |= INPUTSELECT(INPUTSELECT__COMPPACK); + else + val |= INPUTSELECT(INPUTSELECT__INACTIVE); + if (is_rastermode_yuv422) + val |= RASTERMODE(RASTERMODE__YUV422); + else + val |= RASTERMODE(RASTERMODE__NORMAL); + dpu_fu_write(fu, CONTROL, val); + + val = dpu_fu_read(fu, LAYERPROPERTY0); + val &= ~YUVCONVERSIONMODE_MASK; + if (need_csc) { + /* assuming fetchdecode always ouputs RGB pixel formats */ + if (color_encoding == DRM_COLOR_YCBCR_BT709) + val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__ITU709); + else if (color_encoding == DRM_COLOR_YCBCR_BT601 && + color_range == DRM_COLOR_YCBCR_FULL_RANGE) + val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__ITU601_FR); + else + val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__ITU601); + } else { + val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__OFF); + } + dpu_fu_write(fu, LAYERPROPERTY0, val); + mutex_unlock(&fu->mutex); + + for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) { + if (dpu_pixel_format_matrix[i].pixel_format == fmt) { + bits = dpu_pixel_format_matrix[i].bits; + shift = dpu_pixel_format_matrix[i].shift; + + if (is_planar_yuv) { + bits &= ~(U_BITS_MASK | V_BITS_MASK); + shift &= ~(U_SHIFT_MASK | V_SHIFT_MASK); + } + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, COLORCOMPONENTBITS0, bits); + dpu_fu_write(fu, COLORCOMPONENTSHIFT0, shift); + mutex_unlock(&fu->mutex); + return; + } + } + + WARN_ON(1); +} + +void fetchdecode_layeroffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y) +{ + u32 val; + + val = LAYERXOFFSET(x) | LAYERYOFFSET(y); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, LAYEROFFSET0, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchdecode_layeroffset); + +void fetchdecode_clipoffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y) +{ + u32 val; + + val = CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CLIPWINDOWOFFSET0, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchdecode_clipoffset); + +static void +fetchdecode_set_pixel_blend_mode(struct dpu_fetchunit *fu, + unsigned int pixel_blend_mode, u16 alpha, + u32 fb_format) +{ + u32 mode = 0, val; + + if (pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || + pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { + mode = ALPHACONSTENABLE; + + switch (fb_format) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + mode |= ALPHASRCENABLE; + break; + } + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY0); + val &= ~(PREMULCONSTRGB | ALPHA_ENABLE_MASK | RGB_ENABLE_MASK); + val |= mode; + dpu_fu_write(fu, LAYERPROPERTY0, val); + + val = dpu_fu_read(fu, CONSTANTCOLOR0); + val &= ~CONSTANTALPHA_MASK; + val |= CONSTANTALPHA(alpha >> 8); + dpu_fu_write(fu, CONSTANTCOLOR0, val); + mutex_unlock(&fu->mutex); +} + +static void fetchdecode_enable_src_buf(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY0); + val |= SOURCEBUFFERENABLE; + dpu_fu_write(fu, LAYERPROPERTY0, val); + mutex_unlock(&fu->mutex); +} + +static void fetchdecode_disable_src_buf(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY0); + val &= ~SOURCEBUFFERENABLE; + dpu_fu_write(fu, LAYERPROPERTY0, val); + mutex_unlock(&fu->mutex); +} + +static bool fetchdecode_is_enabled(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY0); + mutex_unlock(&fu->mutex); + + return !!(val & SOURCEBUFFERENABLE); +} + +void fetchdecode_clipdimensions(struct dpu_fetchunit *fu, unsigned int w, + unsigned int h) +{ + u32 val; + + val = CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CLIPWINDOWDIMENSIONS0, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchdecode_clipdimensions); + +static void +fetchdecode_set_framedimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + bool deinterlace) +{ + u32 val; + + if (deinterlace) + h /= 2; + + val = FRAMEWIDTH(w) | FRAMEHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, FRAMEDIMENSIONS, val); + mutex_unlock(&fu->mutex); +} + +void fetchdecode_rgb_constantcolor(struct dpu_fetchunit *fu, + u8 r, u8 g, u8 b, u8 a) +{ + u32 val; + + val = rgb_color(r, g, b, a); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CONSTANTCOLOR0, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchdecode_rgb_constantcolor); + +void fetchdecode_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v) +{ + u32 val; + + val = yuv_color(y, u, v); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CONSTANTCOLOR0, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchdecode_yuv_constantcolor); + +static void fetchdecode_set_controltrigger(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN); + mutex_unlock(&fu->mutex); +} + +int fetchdecode_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type) +{ + struct dpu_soc *dpu = fu->dpu; + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FETCHTYPE); + val &= FETCHTYPE_MASK; + mutex_unlock(&fu->mutex); + + switch (val) { + case FETCHTYPE__DECODE: + case FETCHTYPE__LAYER: + case FETCHTYPE__WARP: + case FETCHTYPE__ECO: + case FETCHTYPE__PERSP: + case FETCHTYPE__ROT: + case FETCHTYPE__DECODEL: + case FETCHTYPE__LAYERL: + case FETCHTYPE__ROTL: + break; + default: + dev_warn(dpu->dev, "Invalid fetch type %u for FetchDecode%d\n", + val, fu->id); + return -EINVAL; + } + + *type = val; + return 0; +} +EXPORT_SYMBOL_GPL(fetchdecode_fetchtype); + +u32 fetchdecode_get_vproc_mask(struct dpu_fetchunit *fu) +{ + return fd_vproc_cap[fu->id]; +} +EXPORT_SYMBOL_GPL(fetchdecode_get_vproc_mask); + +struct dpu_fetchunit *fetchdecode_get_fetcheco(struct dpu_fetchunit *fu) +{ + struct dpu_soc *dpu = fu->dpu; + + switch (fu->id) { + case 0: + case 1: + return dpu->fe_priv[fu->id]; + default: + WARN_ON(1); + } + + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(fetchdecode_get_fetcheco); + +bool fetchdecode_need_fetcheco(struct dpu_fetchunit *fu, u32 fmt) +{ + struct dpu_fetchunit *fe = fetchdecode_get_fetcheco(fu); + + if (IS_ERR_OR_NULL(fe)) + return false; + + switch (fmt) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(fetchdecode_need_fetcheco); + +struct dpu_hscaler *fetchdecode_get_hscaler(struct dpu_fetchunit *fu) +{ + struct dpu_soc *dpu = fu->dpu; + + switch (fu->id) { + case 0: + case 2: + return dpu->hs_priv[0]; + case 1: + case 3: + return dpu->hs_priv[1]; + default: + WARN_ON(1); + } + + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(fetchdecode_get_hscaler); + +struct dpu_vscaler *fetchdecode_get_vscaler(struct dpu_fetchunit *fu) +{ + struct dpu_soc *dpu = fu->dpu; + + switch (fu->id) { + case 0: + case 2: + return dpu->vs_priv[0]; + case 1: + case 3: + return dpu->vs_priv[1]; + default: + WARN_ON(1); + } + + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(fetchdecode_get_vscaler); + +struct dpu_fetchunit *dpu_fd_get(struct dpu_soc *dpu, int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fd_ids); i++) + if (fd_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fd_ids)) + return ERR_PTR(-EINVAL); + + fu = dpu->fd_priv[i]; + + mutex_lock(&fu->mutex); + + if (fu->inuse) { + mutex_unlock(&fu->mutex); + return ERR_PTR(-EBUSY); + } + + fu->inuse = true; + + mutex_unlock(&fu->mutex); + + return fu; +} +EXPORT_SYMBOL_GPL(dpu_fd_get); + +void dpu_fd_put(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + + fu->inuse = false; + + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(dpu_fd_put); + +static const struct dpu_fetchunit_ops fd_ops = { + .set_burstlength = fetchunit_set_burstlength, + .set_baseaddress = fetchdecode_set_baseaddress, + .set_src_bpp = fetchdecode_set_src_bpp, + .set_src_stride = fetchdecode_set_src_stride, + .set_src_buf_dimensions = fetchdecode_set_src_buf_dimensions, + .set_fmt = fetchdecode_set_fmt, + .set_pixel_blend_mode = fetchdecode_set_pixel_blend_mode, + .enable_src_buf = fetchdecode_enable_src_buf, + .disable_src_buf = fetchdecode_disable_src_buf, + .is_enabled = fetchdecode_is_enabled, + .set_framedimensions = fetchdecode_set_framedimensions, + .set_controltrigger = fetchdecode_set_controltrigger, + .get_stream_id = fetchunit_get_stream_id, + .set_stream_id = fetchunit_set_stream_id, + .pin_off = fetchunit_pin_off, + .unpin_off = fetchunit_unpin_off, + .is_pinned_off = fetchunit_is_pinned_off, +}; + +void _dpu_fd_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fd_ids); i++) + if (fd_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(fd_ids))) + return; + + fu = dpu->fd_priv[i]; + + fetchdecode_pixengcfg_dynamic_src_sel(fu, FD_SRC_DISABLE); + fetchunit_baddr_autoupdate(fu, 0x0); + fetchunit_shden(fu, true); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, BURSTBUFFERMANAGEMENT, + SETNUMBUFFERS(16) | SETBURSTLENGTH(16)); + mutex_unlock(&fu->mutex); +} + +int dpu_fd_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_fetchdecode *fd; + struct dpu_fetchunit *fu; + int ret; + + fd = devm_kzalloc(dpu->dev, sizeof(*fd), GFP_KERNEL); + if (!fd) + return -ENOMEM; + + fu = &fd->fu; + dpu->fd_priv[id] = fu; + + fu->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16); + if (!fu->pec_base) + return -ENOMEM; + + fu->base = devm_ioremap(dpu->dev, base, SZ_1K); + if (!fu->base) + return -ENOMEM; + + fu->dpu = dpu; + fu->id = id; + fu->type = FU_T_FD; + fu->ops = &fd_ops; + fu->name = "fetchdecode"; + + mutex_init(&fu->mutex); + + ret = fetchdecode_pixengcfg_dynamic_src_sel(fu, FD_SRC_DISABLE); + if (ret < 0) + return ret; + + ret = fetchdecode_fetchtype(fu, &fd->fetchtype); + if (ret < 0) + return ret; + + _dpu_fd_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-fetcheco.c b/drivers/gpu/imx/dpu/dpu-fetcheco.c new file mode 100644 index 000000000000..870e680f5cfb --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-fetcheco.c @@ -0,0 +1,410 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define BASEADDRESS0 0x10 +#define SOURCEBUFFERATTRIBUTES0 0x14 +#define SOURCEBUFFERDIMENSION0 0x18 +#define COLORCOMPONENTBITS0 0x1C +#define COLORCOMPONENTSHIFT0 0x20 +#define LAYEROFFSET0 0x24 +#define CLIPWINDOWOFFSET0 0x28 +#define CLIPWINDOWDIMENSIONS0 0x2C +#define CONSTANTCOLOR0 0x30 +#define LAYERPROPERTY0 0x34 +#define FRAMEDIMENSIONS 0x38 +#define FRAMERESAMPLING 0x3C +#define CONTROL 0x40 +#define CONTROLTRIGGER 0x44 +#define START 0x48 +#define FETCHTYPE 0x4C +#define BURSTBUFFERPROPERTIES 0x50 +#define HIDDENSTATUS 0x54 + +struct dpu_fetcheco { + struct dpu_fetchunit fu; +}; + +static void +fetcheco_set_src_buf_dimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + u32 fmt, bool deinterlace) +{ + int width, height; + u32 val; + + if (deinterlace) { + width = w; + height = h / 2; + } else { + width = dpu_format_plane_width(w, fmt, 1); + height = dpu_format_plane_height(h, fmt, 1); + } + + switch (fmt) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + break; + default: + WARN(1, "Unsupported FetchEco pixel format 0x%08x\n", fmt); + return; + } + + val = LINEWIDTH(width) | LINECOUNT(height); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SOURCEBUFFERDIMENSION0, val); + mutex_unlock(&fu->mutex); +} + +static void fetcheco_set_fmt(struct dpu_fetchunit *fu, + u32 fmt, + enum drm_color_encoding unused1, + enum drm_color_range unused2, + bool unused3) +{ + u32 val, bits, shift; + int i, hsub, vsub; + unsigned int x, y; + + switch (fmt) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + break; + default: + WARN(1, "Unsupported FetchEco pixel format 0x%08x\n", fmt); + return; + } + + hsub = dpu_format_horz_chroma_subsampling(fmt); + switch (hsub) { + case 1: + x = 0x4; + break; + case 2: + x = 0x2; + break; + default: + WARN_ON(1); + return; + } + + vsub = dpu_format_vert_chroma_subsampling(fmt); + switch (vsub) { + case 1: + y = 0x4; + break; + case 2: + y = 0x2; + break; + default: + WARN_ON(1); + return; + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FRAMERESAMPLING); + val &= ~(DELTAX_MASK | DELTAY_MASK); + val |= DELTAX(x) | DELTAY(y); + dpu_fu_write(fu, FRAMERESAMPLING, val); + + val = dpu_fu_read(fu, CONTROL); + val &= ~RASTERMODE_MASK; + val |= RASTERMODE(RASTERMODE__NORMAL); + dpu_fu_write(fu, CONTROL, val); + mutex_unlock(&fu->mutex); + + for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) { + if (dpu_pixel_format_matrix[i].pixel_format == fmt) { + bits = dpu_pixel_format_matrix[i].bits; + shift = dpu_pixel_format_matrix[i].shift; + + bits &= ~Y_BITS_MASK; + shift &= ~Y_SHIFT_MASK; + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, COLORCOMPONENTBITS0, bits); + dpu_fu_write(fu, COLORCOMPONENTSHIFT0, shift); + mutex_unlock(&fu->mutex); + return; + } + } + + WARN_ON(1); +} + +void fetcheco_layeroffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y) +{ + u32 val; + + val = LAYERXOFFSET(x) | LAYERYOFFSET(y); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, LAYEROFFSET0, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetcheco_layeroffset); + +void fetcheco_clipoffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y) +{ + u32 val; + + val = CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CLIPWINDOWOFFSET0, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetcheco_clipoffset); + +void fetcheco_clipdimensions(struct dpu_fetchunit *fu, unsigned int w, + unsigned int h) +{ + u32 val; + + val = CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CLIPWINDOWDIMENSIONS0, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetcheco_clipdimensions); + +static void +fetcheco_set_framedimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + bool deinterlace) +{ + u32 val; + + if (deinterlace) + h /= 2; + + val = FRAMEWIDTH(w) | FRAMEHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, FRAMEDIMENSIONS, val); + mutex_unlock(&fu->mutex); +} + +void fetcheco_frameresampling(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FRAMERESAMPLING); + val &= ~(DELTAX_MASK | DELTAY_MASK); + val |= DELTAX(x) | DELTAY(y); + dpu_fu_write(fu, FRAMERESAMPLING, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetcheco_frameresampling); + +static void fetcheco_set_controltrigger(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN); + mutex_unlock(&fu->mutex); +} + +int fetcheco_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type) +{ + struct dpu_soc *dpu = fu->dpu; + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FETCHTYPE); + val &= FETCHTYPE_MASK; + mutex_unlock(&fu->mutex); + + switch (val) { + case FETCHTYPE__DECODE: + case FETCHTYPE__LAYER: + case FETCHTYPE__WARP: + case FETCHTYPE__ECO: + case FETCHTYPE__PERSP: + case FETCHTYPE__ROT: + case FETCHTYPE__DECODEL: + case FETCHTYPE__LAYERL: + case FETCHTYPE__ROTL: + break; + default: + dev_warn(dpu->dev, "Invalid fetch type %u for FetchEco%d\n", + val, fu->id); + return -EINVAL; + } + + *type = val; + return 0; +} +EXPORT_SYMBOL_GPL(fetcheco_fetchtype); + +dpu_block_id_t fetcheco_get_block_id(struct dpu_fetchunit *fu) +{ + switch (fu->id) { + case 0: + return ID_FETCHECO0; + case 1: + return ID_FETCHECO1; + case 2: + return ID_FETCHECO2; + case 9: + return ID_FETCHECO9; + default: + WARN_ON(1); + } + + return ID_NONE; +} +EXPORT_SYMBOL_GPL(fetcheco_get_block_id); + +struct dpu_fetchunit *dpu_fe_get(struct dpu_soc *dpu, int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fe_ids); i++) + if (fe_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fe_ids)) + return ERR_PTR(-EINVAL); + + fu = dpu->fe_priv[i]; + + mutex_lock(&fu->mutex); + + if (fu->inuse) { + mutex_unlock(&fu->mutex); + return ERR_PTR(-EBUSY); + } + + fu->inuse = true; + + mutex_unlock(&fu->mutex); + + return fu; +} +EXPORT_SYMBOL_GPL(dpu_fe_get); + +void dpu_fe_put(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + + fu->inuse = false; + + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(dpu_fe_put); + +static const struct dpu_fetchunit_ops fe_ops = { + .set_burstlength = fetchunit_set_burstlength, + .set_baseaddress = fetchunit_set_baseaddress, + .set_src_bpp = fetchunit_set_src_bpp, + .set_src_stride = fetchunit_set_src_stride, + .set_src_buf_dimensions = fetcheco_set_src_buf_dimensions, + .set_fmt = fetcheco_set_fmt, + .enable_src_buf = fetchunit_enable_src_buf, + .disable_src_buf = fetchunit_disable_src_buf, + .is_enabled = fetchunit_is_enabled, + .set_framedimensions = fetcheco_set_framedimensions, + .set_controltrigger = fetcheco_set_controltrigger, + .get_stream_id = fetchunit_get_stream_id, + .set_stream_id = fetchunit_set_stream_id, + .pin_off = fetchunit_pin_off, + .unpin_off = fetchunit_unpin_off, + .is_pinned_off = fetchunit_is_pinned_off, +}; + +void _dpu_fe_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fe_ids); i++) + if (fe_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(fe_ids))) + return; + + fu = dpu->fe_priv[i]; + + fetchunit_shden(fu, true); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, BURSTBUFFERMANAGEMENT, + SETNUMBUFFERS(16) | SETBURSTLENGTH(16)); + mutex_unlock(&fu->mutex); +} + +int dpu_fe_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_fetcheco *fe; + struct dpu_fetchunit *fu; + int i; + + fe = devm_kzalloc(dpu->dev, sizeof(*fe), GFP_KERNEL); + if (!fe) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(fe_ids); i++) + if (fe_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fe_ids)) + return -EINVAL; + + fu = &fe->fu; + dpu->fe_priv[i] = fu; + + fu->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16); + if (!fu->pec_base) + return -ENOMEM; + + fu->base = devm_ioremap(dpu->dev, base, SZ_128); + if (!fu->base) + return -ENOMEM; + + fu->dpu = dpu; + fu->id = id; + fu->type = FU_T_FE; + fu->ops = &fe_ops; + fu->name = "fetcheco"; + + mutex_init(&fu->mutex); + + _dpu_fe_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-fetchlayer.c b/drivers/gpu/imx/dpu/dpu-fetchlayer.c new file mode 100644 index 000000000000..984679ec54d1 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-fetchlayer.c @@ -0,0 +1,297 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_STATUS 0x8 +#define BASEADDRESS(n) (0x10 + (n) * 0x28) +#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28) +#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28) +#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28) +#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28) +#define LAYEROFFSET(n) (0x24 + (n) * 0x28) +#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28) +#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28) +#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28) +#define LAYERPROPERTY(n) (0x34 + (n) * 0x28) +#define FRAMEDIMENSIONS 0x150 +#define FRAMERESAMPLING 0x154 +#define CONTROL 0x158 +#define TRIGGERENABLE 0x15C +#define SHDLDREQ(lm) ((lm) & 0xFF) +#define CONTROLTRIGGER 0x160 +#define START 0x164 +#define FETCHTYPE 0x168 +#define BURSTBUFFERPROPERTIES 0x16C +#define STATUS 0x170 +#define HIDDENSTATUS 0x174 + +struct dpu_fetchlayer { + struct dpu_fetchunit fu; + fetchtype_t fetchtype; +}; + +static void +fetchlayer_set_src_buf_dimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + u32 unused1, bool unused2) +{ + u32 val; + + val = LINEWIDTH(w) | LINECOUNT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SOURCEBUFFERDIMENSION(fu->sub_id), val); + mutex_unlock(&fu->mutex); +} + +static void fetchlayer_set_fmt(struct dpu_fetchunit *fu, + u32 fmt, + enum drm_color_encoding color_encoding, + enum drm_color_range color_range, + bool unused) +{ + u32 val, bits, shift; + int i, sub_id = fu->sub_id; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY(sub_id)); + val &= ~YUVCONVERSIONMODE_MASK; + val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__OFF); + dpu_fu_write(fu, LAYERPROPERTY(sub_id), val); + mutex_unlock(&fu->mutex); + + for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) { + if (dpu_pixel_format_matrix[i].pixel_format == fmt) { + bits = dpu_pixel_format_matrix[i].bits; + shift = dpu_pixel_format_matrix[i].shift; + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, COLORCOMPONENTBITS(sub_id), bits); + dpu_fu_write(fu, COLORCOMPONENTSHIFT(sub_id), shift); + mutex_unlock(&fu->mutex); + return; + } + } + + WARN_ON(1); +} + +static void +fetchlayer_set_framedimensions(struct dpu_fetchunit *fu, unsigned int w, + unsigned int h, bool unused) +{ + u32 val; + + val = FRAMEWIDTH(w) | FRAMEHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, FRAMEDIMENSIONS, val); + mutex_unlock(&fu->mutex); +} + +void fetchlayer_rgb_constantcolor(struct dpu_fetchunit *fu, + u8 r, u8 g, u8 b, u8 a) +{ + u32 val; + + val = rgb_color(r, g, b, a); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchlayer_rgb_constantcolor); + +void fetchlayer_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v) +{ + u32 val; + + val = yuv_color(y, u, v); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchlayer_yuv_constantcolor); + +static void fetchlayer_set_controltrigger(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN); + mutex_unlock(&fu->mutex); +} + +int fetchlayer_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type) +{ + struct dpu_soc *dpu = fu->dpu; + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FETCHTYPE); + val &= FETCHTYPE_MASK; + mutex_unlock(&fu->mutex); + + switch (val) { + case FETCHTYPE__DECODE: + case FETCHTYPE__LAYER: + case FETCHTYPE__WARP: + case FETCHTYPE__ECO: + case FETCHTYPE__PERSP: + case FETCHTYPE__ROT: + case FETCHTYPE__DECODEL: + case FETCHTYPE__LAYERL: + case FETCHTYPE__ROTL: + break; + default: + dev_warn(dpu->dev, "Invalid fetch type %u for FetchLayer%d\n", + val, fu->id); + return -EINVAL; + } + + *type = val; + return 0; +} +EXPORT_SYMBOL_GPL(fetchlayer_fetchtype); + +struct dpu_fetchunit *dpu_fl_get(struct dpu_soc *dpu, int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fl_ids); i++) + if (fl_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fl_ids)) + return ERR_PTR(-EINVAL); + + fu = dpu->fl_priv[i]; + + mutex_lock(&fu->mutex); + + if (fu->inuse) { + mutex_unlock(&fu->mutex); + return ERR_PTR(-EBUSY); + } + + fu->inuse = true; + + mutex_unlock(&fu->mutex); + + return fu; +} +EXPORT_SYMBOL_GPL(dpu_fl_get); + +void dpu_fl_put(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + + fu->inuse = false; + + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(dpu_fl_put); + +static const struct dpu_fetchunit_ops fl_ops = { + .set_burstlength = fetchunit_set_burstlength, + .set_baseaddress = fetchunit_set_baseaddress, + .set_src_bpp = fetchunit_set_src_bpp, + .set_src_stride = fetchunit_set_src_stride, + .set_src_buf_dimensions = fetchlayer_set_src_buf_dimensions, + .set_fmt = fetchlayer_set_fmt, + .set_pixel_blend_mode = fetchunit_set_pixel_blend_mode, + .enable_src_buf = fetchunit_enable_src_buf, + .disable_src_buf = fetchunit_disable_src_buf, + .is_enabled = fetchunit_is_enabled, + .set_framedimensions = fetchlayer_set_framedimensions, + .set_controltrigger = fetchlayer_set_controltrigger, + .get_stream_id = fetchunit_get_stream_id, + .set_stream_id = fetchunit_set_stream_id, + .pin_off = fetchunit_pin_off, + .unpin_off = fetchunit_unpin_off, + .is_pinned_off = fetchunit_is_pinned_off, +}; + +void _dpu_fl_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fl_ids); i++) + if (fl_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(fl_ids))) + return; + + fu = dpu->fl_priv[i]; + + fetchunit_baddr_autoupdate(fu, 0x0); + fetchunit_shden(fu, true); + fetchunit_shdldreq_sticky(fu, 0xFF); + fetchunit_disable_src_buf(fu); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, BURSTBUFFERMANAGEMENT, + SETNUMBUFFERS(16) | SETBURSTLENGTH(16)); + mutex_unlock(&fu->mutex); +} + +int dpu_fl_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_fetchlayer *fl; + struct dpu_fetchunit *fu; + int ret; + + fl = devm_kzalloc(dpu->dev, sizeof(*fl), GFP_KERNEL); + if (!fl) + return -ENOMEM; + + fu = &fl->fu; + dpu->fl_priv[id] = fu; + + fu->pec_base = devm_ioremap(dpu->dev, base, SZ_16); + if (!fu->pec_base) + return -ENOMEM; + + fu->base = devm_ioremap(dpu->dev, base, SZ_512); + if (!fu->base) + return -ENOMEM; + + fu->dpu = dpu; + fu->id = id; + fu->sub_id = 0; + fu->type = FU_T_FL; + fu->ops = &fl_ops; + fu->name = "fetchlayer"; + + mutex_init(&fu->mutex); + + ret = fetchlayer_fetchtype(fu, &fl->fetchtype); + if (ret < 0) + return ret; + + _dpu_fl_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-fetchunit.c b/drivers/gpu/imx/dpu/dpu-fetchunit.c new file mode 100644 index 000000000000..343d2ab781a4 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-fetchunit.c @@ -0,0 +1,373 @@ +/* + * Copyright 2018-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <drm/drm_blend.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define BASEADDRESS(n) (0x10 + (n) * 0x28) +#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28) +#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28) +#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28) +#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28) +#define LAYEROFFSET(n) (0x24 + (n) * 0x28) +#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28) +#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28) +#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28) +#define LAYERPROPERTY(n) (0x34 + (n) * 0x28) + +/* base address has to align to burst size */ +unsigned int fetchunit_burst_size_fixup_tkt343664(dma_addr_t baddr) +{ + unsigned int burst_size; + + burst_size = 1 << (ffs(baddr) - 1); + burst_size = round_up(burst_size, 8); + burst_size = min(burst_size, 128U); + + return burst_size; +} +EXPORT_SYMBOL_GPL(fetchunit_burst_size_fixup_tkt343664); + +/* fixup for burst size vs stride mismatch */ +unsigned int +fetchunit_stride_fixup_tkt339017(unsigned int stride, unsigned int burst_size, + dma_addr_t baddr, bool nonzero_mod) +{ + if (nonzero_mod) + stride = round_up(stride + round_up(baddr % 8, 8), burst_size); + else + stride = round_up(stride, burst_size); + + return stride; +} +EXPORT_SYMBOL_GPL(fetchunit_stride_fixup_tkt339017); + +void fetchunit_get_dprc(struct dpu_fetchunit *fu, void *data) +{ + if (WARN_ON(!fu)) + return; + + fu->dprc = data; +} +EXPORT_SYMBOL_GPL(fetchunit_get_dprc); + +void fetchunit_shden(struct dpu_fetchunit *fu, bool enable) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, STATICCONTROL); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_fu_write(fu, STATICCONTROL, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_shden); + +void fetchunit_baddr_autoupdate(struct dpu_fetchunit *fu, u8 layer_mask) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, STATICCONTROL); + val &= ~BASEADDRESSAUTOUPDATE_MASK; + val |= BASEADDRESSAUTOUPDATE(layer_mask); + dpu_fu_write(fu, STATICCONTROL, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_baddr_autoupdate); + +void fetchunit_shdldreq_sticky(struct dpu_fetchunit *fu, u8 layer_mask) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, STATICCONTROL); + val &= ~SHDLDREQSTICKY_MASK; + val |= SHDLDREQSTICKY(layer_mask); + dpu_fu_write(fu, STATICCONTROL, val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_shdldreq_sticky); + +void fetchunit_set_burstlength(struct dpu_fetchunit *fu, + unsigned int x_offset, unsigned int mt_w, + int bpp, dma_addr_t baddr, bool use_prefetch) +{ + struct dpu_soc *dpu = fu->dpu; + unsigned int burst_size, burst_length; + bool nonzero_mod = !!mt_w; + u32 val; + + if (use_prefetch) { + /* consider PRG x offset to calculate buffer address */ + if (nonzero_mod) + baddr += (x_offset % mt_w) * (bpp / 8); + + burst_size = fetchunit_burst_size_fixup_tkt343664(baddr); + burst_length = burst_size / 8; + } else { + burst_length = 16; + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, BURSTBUFFERMANAGEMENT); + val &= ~SETBURSTLENGTH_MASK; + val |= SETBURSTLENGTH(burst_length); + dpu_fu_write(fu, BURSTBUFFERMANAGEMENT, val); + mutex_unlock(&fu->mutex); + + dev_dbg(dpu->dev, "%s%d burst length is %u\n", + fu->name, fu->id, burst_length); +} +EXPORT_SYMBOL_GPL(fetchunit_set_burstlength); + +void fetchunit_set_baseaddress(struct dpu_fetchunit *fu, unsigned int width, + unsigned int x_offset, unsigned int y_offset, + unsigned int mt_w, unsigned int mt_h, + int bpp, dma_addr_t baddr) +{ + unsigned int burst_size, stride; + bool nonzero_mod = !!mt_w; + + if (nonzero_mod) { + /* consider PRG x offset to calculate buffer address */ + baddr += (x_offset % mt_w) * (bpp / 8); + + burst_size = fetchunit_burst_size_fixup_tkt343664(baddr); + + stride = width * (bpp / 8); + stride = fetchunit_stride_fixup_tkt339017(stride, burst_size, + baddr, nonzero_mod); + + /* consider PRG y offset to calculate buffer address */ + baddr += (y_offset % mt_h) * stride; + } + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, BASEADDRESS(fu->sub_id), baddr); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_set_baseaddress); + +void fetchunit_set_src_bpp(struct dpu_fetchunit *fu, int bpp) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id)); + val &= ~0x3f0000; + val |= BITSPERPIXEL(bpp); + dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id), val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_set_src_bpp); + +/* + * The arguments width and bpp are valid only when use_prefetch is true. + * For fetcheco, since the pixel format has to be NV12 or NV21 when + * use_prefetch is true, we assume width stands for how many UV we have + * in bytes for one line, while bpp should be 8bits for every U or V component. + */ +void fetchunit_set_src_stride(struct dpu_fetchunit *fu, + unsigned int width, unsigned int x_offset, + unsigned int mt_w, int bpp, unsigned int stride, + dma_addr_t baddr, bool use_prefetch) +{ + unsigned int burst_size; + bool nonzero_mod = !!mt_w; + u32 val; + + if (use_prefetch) { + /* consider PRG x offset to calculate buffer address */ + if (nonzero_mod) + baddr += (x_offset % mt_w) * (bpp / 8); + + burst_size = fetchunit_burst_size_fixup_tkt343664(baddr); + + stride = width * (bpp / 8); + stride = fetchunit_stride_fixup_tkt339017(stride, burst_size, + baddr, nonzero_mod); + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id)); + val &= ~0xffff; + val |= STRIDE(stride); + dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id), val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_set_src_stride); + +void fetchunit_set_pixel_blend_mode(struct dpu_fetchunit *fu, + unsigned int pixel_blend_mode, u16 alpha, + u32 fb_format) +{ + u32 mode = 0, val; + + if (pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || + pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { + mode = ALPHACONSTENABLE; + + switch (fb_format) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + mode |= ALPHASRCENABLE; + break; + } + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id)); + val &= ~(PREMULCONSTRGB | ALPHA_ENABLE_MASK | RGB_ENABLE_MASK); + val |= mode; + dpu_fu_write(fu, LAYERPROPERTY(fu->sub_id), val); + + val = dpu_fu_read(fu, CONSTANTCOLOR(fu->sub_id)); + val &= ~CONSTANTALPHA_MASK; + val |= CONSTANTALPHA(alpha >> 8); + dpu_fu_write(fu, CONSTANTCOLOR(fu->sub_id), val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_set_pixel_blend_mode); + +void fetchunit_enable_src_buf(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id)); + val |= SOURCEBUFFERENABLE; + dpu_fu_write(fu, LAYERPROPERTY(fu->sub_id), val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_enable_src_buf); + +void fetchunit_disable_src_buf(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id)); + val &= ~SOURCEBUFFERENABLE; + dpu_fu_write(fu, LAYERPROPERTY(fu->sub_id), val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_disable_src_buf); + +bool fetchunit_is_enabled(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id)); + mutex_unlock(&fu->mutex); + + return !!(val & SOURCEBUFFERENABLE); +} +EXPORT_SYMBOL_GPL(fetchunit_is_enabled); + +unsigned int fetchunit_get_stream_id(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return DPU_PLANE_SRC_DISABLED; + + return fu->stream_id; +} +EXPORT_SYMBOL_GPL(fetchunit_get_stream_id); + +void fetchunit_set_stream_id(struct dpu_fetchunit *fu, unsigned int id) +{ + if (WARN_ON(!fu)) + return; + + switch (id) { + case DPU_PLANE_SRC_TO_DISP_STREAM0: + case DPU_PLANE_SRC_TO_DISP_STREAM1: + case DPU_PLANE_SRC_DISABLED: + fu->stream_id = id; + break; + default: + WARN_ON(1); + } +} +EXPORT_SYMBOL_GPL(fetchunit_set_stream_id); + +void fetchunit_pin_off(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return; + + fu->pin_off = true; +} +EXPORT_SYMBOL_GPL(fetchunit_pin_off); + +void fetchunit_unpin_off(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return; + + fu->pin_off = false; +} +EXPORT_SYMBOL_GPL(fetchunit_unpin_off); + +bool fetchunit_is_pinned_off(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return false; + + return fu->pin_off; +} +EXPORT_SYMBOL_GPL(fetchunit_is_pinned_off); + +bool fetchunit_is_fetchdecode(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return false; + + return fu->type == FU_T_FD; +} +EXPORT_SYMBOL_GPL(fetchunit_is_fetchdecode); + +bool fetchunit_is_fetcheco(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return false; + + return fu->type == FU_T_FE; +} +EXPORT_SYMBOL_GPL(fetchunit_is_fetcheco); + +bool fetchunit_is_fetchlayer(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return false; + + return fu->type == FU_T_FL; +} +EXPORT_SYMBOL_GPL(fetchunit_is_fetchlayer); + +bool fetchunit_is_fetchwarp(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return false; + + return fu->type == FU_T_FW; +} +EXPORT_SYMBOL_GPL(fetchunit_is_fetchwarp); diff --git a/drivers/gpu/imx/dpu/dpu-fetchwarp.c b/drivers/gpu/imx/dpu/dpu-fetchwarp.c new file mode 100644 index 000000000000..aea9b9beb131 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-fetchwarp.c @@ -0,0 +1,308 @@ +/* + * Copyright 2018-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_STATUS 0x8 +#define BASEADDRESS(n) (0x10 + (n) * 0x28) +#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28) +#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28) +#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28) +#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28) +#define LAYEROFFSET(n) (0x24 + (n) * 0x28) +#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28) +#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28) +#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28) +#define LAYERPROPERTY(n) (0x34 + (n) * 0x28) +#define FRAMEDIMENSIONS 0x150 +#define FRAMERESAMPLING 0x154 +#define WARPCONTROL 0x158 +#define ARBSTARTX 0x15c +#define ARBSTARTY 0x160 +#define ARBDELTA 0x164 +#define FIRPOSITIONS 0x168 +#define FIRCOEFFICIENTS 0x16c +#define CONTROL 0x170 +#define TRIGGERENABLE 0x174 +#define SHDLDREQ(lm) ((lm) & 0xFF) +#define CONTROLTRIGGER 0x178 +#define START 0x17c +#define FETCHTYPE 0x180 +#define BURSTBUFFERPROPERTIES 0x184 +#define STATUS 0x188 +#define HIDDENSTATUS 0x18c + +struct dpu_fetchwarp { + struct dpu_fetchunit fu; + fetchtype_t fetchtype; +}; + +static void +fetchwarp_set_src_buf_dimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + u32 unused1, bool unused2) +{ + u32 val; + + val = LINEWIDTH(w) | LINECOUNT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SOURCEBUFFERDIMENSION(fu->sub_id), val); + mutex_unlock(&fu->mutex); +} + +static void fetchwarp_set_fmt(struct dpu_fetchunit *fu, + u32 fmt, + enum drm_color_encoding color_encoding, + enum drm_color_range color_range, + bool unused) +{ + u32 val, bits, shift; + int i, sub_id = fu->sub_id; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY(sub_id)); + val &= ~YUVCONVERSIONMODE_MASK; + dpu_fu_write(fu, LAYERPROPERTY(sub_id), val); + mutex_unlock(&fu->mutex); + + for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) { + if (dpu_pixel_format_matrix[i].pixel_format == fmt) { + bits = dpu_pixel_format_matrix[i].bits; + shift = dpu_pixel_format_matrix[i].shift; + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, COLORCOMPONENTBITS(sub_id), bits); + dpu_fu_write(fu, COLORCOMPONENTSHIFT(sub_id), shift); + mutex_unlock(&fu->mutex); + return; + } + } + + WARN_ON(1); +} + +static void +fetchwarp_set_framedimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, bool unused) +{ + u32 val; + + val = FRAMEWIDTH(w) | FRAMEHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, FRAMEDIMENSIONS, val); + mutex_unlock(&fu->mutex); +} + +void fetchwarp_rgb_constantcolor(struct dpu_fetchunit *fu, + u8 r, u8 g, u8 b, u8 a) +{ + u32 val; + + val = rgb_color(r, g, b, a); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchwarp_rgb_constantcolor); + +void fetchwarp_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v) +{ + u32 val; + + val = yuv_color(y, u, v); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchwarp_yuv_constantcolor); + +static void fetchwarp_set_controltrigger(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN); + mutex_unlock(&fu->mutex); +} + +int fetchwarp_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type) +{ + struct dpu_soc *dpu = fu->dpu; + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FETCHTYPE); + val &= FETCHTYPE_MASK; + mutex_unlock(&fu->mutex); + + switch (val) { + case FETCHTYPE__DECODE: + case FETCHTYPE__LAYER: + case FETCHTYPE__WARP: + case FETCHTYPE__ECO: + case FETCHTYPE__PERSP: + case FETCHTYPE__ROT: + case FETCHTYPE__DECODEL: + case FETCHTYPE__LAYERL: + case FETCHTYPE__ROTL: + break; + default: + dev_warn(dpu->dev, "Invalid fetch type %u for FetchWarp%d\n", + val, fu->id); + return -EINVAL; + } + + *type = val; + return 0; +} +EXPORT_SYMBOL_GPL(fetchwarp_fetchtype); + +struct dpu_fetchunit *dpu_fw_get(struct dpu_soc *dpu, int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fw_ids); i++) + if (fw_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fw_ids)) + return ERR_PTR(-EINVAL); + + fu = dpu->fw_priv[i]; + + mutex_lock(&fu->mutex); + + if (fu->inuse) { + mutex_unlock(&fu->mutex); + return ERR_PTR(-EBUSY); + } + + fu->inuse = true; + + mutex_unlock(&fu->mutex); + + return fu; +} +EXPORT_SYMBOL_GPL(dpu_fw_get); + +void dpu_fw_put(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + + fu->inuse = false; + + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(dpu_fw_put); + +static const struct dpu_fetchunit_ops fw_ops = { + .set_burstlength = fetchunit_set_burstlength, + .set_baseaddress = fetchunit_set_baseaddress, + .set_src_bpp = fetchunit_set_src_bpp, + .set_src_stride = fetchunit_set_src_stride, + .set_src_buf_dimensions = fetchwarp_set_src_buf_dimensions, + .set_fmt = fetchwarp_set_fmt, + .set_pixel_blend_mode = fetchunit_set_pixel_blend_mode, + .enable_src_buf = fetchunit_enable_src_buf, + .disable_src_buf = fetchunit_disable_src_buf, + .is_enabled = fetchunit_is_enabled, + .set_framedimensions = fetchwarp_set_framedimensions, + .set_controltrigger = fetchwarp_set_controltrigger, + .get_stream_id = fetchunit_get_stream_id, + .set_stream_id = fetchunit_set_stream_id, + .pin_off = fetchunit_pin_off, + .unpin_off = fetchunit_unpin_off, + .is_pinned_off = fetchunit_is_pinned_off, +}; + +void _dpu_fw_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fw_ids); i++) + if (fw_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(fw_ids))) + return; + + fu = dpu->fw_priv[i]; + + fetchunit_baddr_autoupdate(fu, 0x0); + fetchunit_shden(fu, true); + fetchunit_shdldreq_sticky(fu, 0xFF); + fetchunit_disable_src_buf(fu); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, BURSTBUFFERMANAGEMENT, + SETNUMBUFFERS(16) | SETBURSTLENGTH(16)); + mutex_unlock(&fu->mutex); +} + +int dpu_fw_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_fetchwarp *fw; + struct dpu_fetchunit *fu; + int i, ret; + + fw = devm_kzalloc(dpu->dev, sizeof(*fw), GFP_KERNEL); + if (!fw) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(fw_ids); i++) + if (fw_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fw_ids)) + return -EINVAL; + + fu = &fw->fu; + dpu->fw_priv[i] = fu; + + fu->pec_base = devm_ioremap(dpu->dev, base, SZ_16); + if (!fu->pec_base) + return -ENOMEM; + + fu->base = devm_ioremap(dpu->dev, base, SZ_512); + if (!fu->base) + return -ENOMEM; + + fu->dpu = dpu; + fu->id = id; + fu->sub_id = 0; + fu->type = FU_T_FW; + fu->ops = &fw_ops; + fu->name = "fetchwarp"; + + mutex_init(&fu->mutex); + + ret = fetchwarp_fetchtype(fu, &fw->fetchtype); + if (ret < 0) + return ret; + + _dpu_fw_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-framegen.c b/drivers/gpu/imx/dpu/dpu-framegen.c new file mode 100644 index 000000000000..314a134769f4 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-framegen.c @@ -0,0 +1,586 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <drm/drm_mode.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define FGSTCTRL 0x8 +#define FGSYNCMODE_MASK 0x6 +#define HTCFG1 0xC +#define HTOTAL(n) ((((n) - 1) & 0x3FFF) << 16) +#define HACT(n) ((n) & 0x3FFF) +#define HTCFG2 0x10 +#define HSEN BIT(31) +#define HSBP(n) ((((n) - 1) & 0x3FFF) << 16) +#define HSYNC(n) (((n) - 1) & 0x3FFF) +#define VTCFG1 0x14 +#define VTOTAL(n) ((((n) - 1) & 0x3FFF) << 16) +#define VACT(n) ((n) & 0x3FFF) +#define VTCFG2 0x18 +#define VSEN BIT(31) +#define VSBP(n) ((((n) - 1) & 0x3FFF) << 16) +#define VSYNC(n) (((n) - 1) & 0x3FFF) +#define INTCONFIG(n) (0x1C + 4 * (n)) +#define EN BIT(31) +#define ROW(n) (((n) & 0x3FFF) << 16) +#define COL(n) ((n) & 0x3FFF) +#define PKICKCONFIG 0x2C +#define SKICKCONFIG 0x30 +#define SECSTATCONFIG 0x34 +#define FGSRCR1 0x38 +#define FGSRCR2 0x3C +#define FGSRCR3 0x40 +#define FGSRCR4 0x44 +#define FGSRCR5 0x48 +#define FGSRCR6 0x4C +#define FGKSDR 0x50 +#define PACFG 0x54 +#define STARTX(n) (((n) + 1) & 0x3FFF) +#define STARTY(n) (((((n) + 1) & 0x3FFF)) << 16) +#define SACFG 0x58 +#define FGINCTRL 0x5C +#define FGDM_MASK 0x7 +#define ENPRIMALPHA BIT(3) +#define ENSECALPHA BIT(4) +#define FGINCTRLPANIC 0x60 +#define FGCCR 0x64 +#define CCALPHA(a) (((a) & 0x1) << 30) +#define CCRED(r) (((r) & 0x3FF) << 20) +#define CCGREEN(g) (((g) & 0x3FF) << 10) +#define CCBLUE(b) ((b) & 0x3FF) +#define FGENABLE 0x68 +#define FGEN BIT(0) +#define FGSLR 0x6C +#define FGENSTS 0x70 +#define ENSTS BIT(0) +#define FGTIMESTAMP 0x74 +#define LINEINDEX_MASK 0x3FFF +#define LINEINDEX_SHIFT 0 +#define FRAMEINDEX_MASK 0xFFFFC000 +#define FRAMEINDEX_SHIFT 14 +#define FGCHSTAT 0x78 +#define SECSYNCSTAT BIT(24) +#define SFIFOEMPTY BIT(16) +#define FGCHSTATCLR 0x7C +#define CLRSECSTAT BIT(16) +#define FGSKEWMON 0x80 +#define FGSFIFOMIN 0x84 +#define FGSFIFOMAX 0x88 +#define FGSFIFOFILLCLR 0x8C +#define FGSREPD 0x90 +#define FGSRFTD 0x94 + +#define KHZ 1000 +#define PLL_MIN_FREQ_HZ 648000000 + +struct dpu_framegen { + void __iomem *base; + struct clk *clk_pll; + struct clk *clk_bypass; + struct clk *clk_disp; + struct clk *clk_disp_lpcg; + struct mutex mutex; + int id; + unsigned int encoder_type; + bool inuse; + bool use_bypass_clk; + bool side_by_side; + struct dpu_soc *dpu; +}; + +static inline u32 dpu_fg_read(struct dpu_framegen *fg, unsigned int offset) +{ + return readl(fg->base + offset); +} + +static inline void dpu_fg_write(struct dpu_framegen *fg, + unsigned int offset, u32 value) +{ + writel(value, fg->base + offset); +} + +void framegen_enable(struct dpu_framegen *fg) +{ + dpu_fg_write(fg, FGENABLE, FGEN); +} +EXPORT_SYMBOL_GPL(framegen_enable); + +void framegen_disable(struct dpu_framegen *fg) +{ + dpu_fg_write(fg, FGENABLE, 0); +} +EXPORT_SYMBOL_GPL(framegen_disable); + +void framegen_enable_pixel_link(struct dpu_framegen *fg) +{ + struct dpu_soc *dpu = fg->dpu; + const struct dpu_data *data = dpu->data; + + if (!(data->has_dual_ldb && fg->encoder_type == DRM_MODE_ENCODER_LVDS)) + dpu_pxlink_set_mst_enable(fg->dpu, fg->id, true); +} +EXPORT_SYMBOL_GPL(framegen_enable_pixel_link); + +void framegen_disable_pixel_link(struct dpu_framegen *fg) +{ + struct dpu_soc *dpu = fg->dpu; + const struct dpu_data *data = dpu->data; + + if (!(data->has_dual_ldb && fg->encoder_type == DRM_MODE_ENCODER_LVDS)) + dpu_pxlink_set_mst_enable(fg->dpu, fg->id, false); +} +EXPORT_SYMBOL_GPL(framegen_disable_pixel_link); + +void framegen_shdtokgen(struct dpu_framegen *fg) +{ + dpu_fg_write(fg, FGSLR, SHDTOKGEN); +} +EXPORT_SYMBOL_GPL(framegen_shdtokgen); + +void framegen_syncmode(struct dpu_framegen *fg, fgsyncmode_t mode) +{ + u32 val; + + val = dpu_fg_read(fg, FGSTCTRL); + val &= ~FGSYNCMODE_MASK; + val |= mode; + dpu_fg_write(fg, FGSTCTRL, val); + + dpu_pxlink_set_dc_sync_mode(fg->dpu, mode != FGSYNCMODE__OFF); +} +EXPORT_SYMBOL_GPL(framegen_syncmode); + +void framegen_cfg_videomode(struct dpu_framegen *fg, struct drm_display_mode *m, + bool side_by_side, unsigned int encoder_type) +{ + struct dpu_soc *dpu = fg->dpu; + u32 hact, htotal, hsync, hsbp; + u32 vact, vtotal, vsync, vsbp; + u32 kick_row, kick_col; + u32 val; + unsigned long disp_clock_rate, pll_clock_rate = 0; + int div = 0; + + fg->side_by_side = side_by_side; + fg->encoder_type = encoder_type; + + hact = m->crtc_hdisplay; + htotal = m->crtc_htotal; + hsync = m->crtc_hsync_end - m->crtc_hsync_start; + hsbp = m->crtc_htotal - m->crtc_hsync_start; + + if (side_by_side) { + hact /= 2; + htotal /= 2; + hsync /= 2; + hsbp /= 2; + } + + vact = m->crtc_vdisplay; + vtotal = m->crtc_vtotal; + vsync = m->crtc_vsync_end - m->crtc_vsync_start; + vsbp = m->crtc_vtotal - m->crtc_vsync_start; + + /* video mode */ + dpu_fg_write(fg, HTCFG1, HACT(hact) | HTOTAL(htotal)); + dpu_fg_write(fg, HTCFG2, HSYNC(hsync) | HSBP(hsbp) | HSEN); + dpu_fg_write(fg, VTCFG1, VACT(vact) | VTOTAL(vtotal)); + dpu_fg_write(fg, VTCFG2, VSYNC(vsync) | VSBP(vsbp) | VSEN); + + kick_col = hact + 1; + kick_row = vact; + /* + * FrameGen as slave needs to be kicked later for + * one line comparing to the master. + */ + if (side_by_side && framegen_is_slave(fg)) + kick_row++; + + /* pkickconfig */ + dpu_fg_write(fg, PKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN); + + /* skikconfig */ + dpu_fg_write(fg, SKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN); + + /* primary and secondary area position config */ + dpu_fg_write(fg, PACFG, STARTX(0) | STARTY(0)); + dpu_fg_write(fg, SACFG, STARTX(0) | STARTY(0)); + + /* alpha */ + val = dpu_fg_read(fg, FGINCTRL); + val &= ~(ENPRIMALPHA | ENSECALPHA); + dpu_fg_write(fg, FGINCTRL, val); + + val = dpu_fg_read(fg, FGINCTRLPANIC); + val &= ~(ENPRIMALPHA | ENSECALPHA); + dpu_fg_write(fg, FGINCTRLPANIC, val); + + /* constant color */ + dpu_fg_write(fg, FGCCR, 0); + + disp_clock_rate = m->crtc_clock * 1000; + + if (encoder_type == DRM_MODE_ENCODER_TMDS) { + if (side_by_side) + dpu_pxlink_set_mst_addr(dpu, fg->id, fg->id ? 2 : 1); + else + dpu_pxlink_set_mst_addr(dpu, fg->id, 1); + + clk_set_parent(fg->clk_disp, fg->clk_bypass); + + fg->use_bypass_clk = true; + } else { + dpu_pxlink_set_mst_addr(dpu, fg->id, 0); + + clk_set_parent(fg->clk_disp, fg->clk_pll); + + /* find an even divisor for PLL */ + do { + div += 2; + pll_clock_rate = disp_clock_rate * div; + } while (pll_clock_rate < PLL_MIN_FREQ_HZ); + + clk_set_rate(fg->clk_pll, pll_clock_rate); + clk_set_rate(fg->clk_disp, disp_clock_rate); + + fg->use_bypass_clk = false; + } +} +EXPORT_SYMBOL_GPL(framegen_cfg_videomode); + +void framegen_pkickconfig(struct dpu_framegen *fg, bool enable) +{ + u32 val; + + val = dpu_fg_read(fg, PKICKCONFIG); + if (enable) + val |= EN; + else + val &= ~EN; + dpu_fg_write(fg, PKICKCONFIG, val); +} +EXPORT_SYMBOL_GPL(framegen_pkickconfig); + +void framegen_syncmode_fixup(struct dpu_framegen *fg, bool enable) +{ + u32 val; + + val = dpu_fg_read(fg, SECSTATCONFIG); + if (enable) + val |= BIT(7); + else + val &= ~BIT(7); + dpu_fg_write(fg, SECSTATCONFIG, val); +} +EXPORT_SYMBOL_GPL(framegen_syncmode_fixup); + +void framegen_displaymode(struct dpu_framegen *fg, fgdm_t mode) +{ + u32 val; + + val = dpu_fg_read(fg, FGINCTRL); + val &= ~FGDM_MASK; + val |= mode; + dpu_fg_write(fg, FGINCTRL, val); +} +EXPORT_SYMBOL_GPL(framegen_displaymode); + +void framegen_panic_displaymode(struct dpu_framegen *fg, fgdm_t mode) +{ + u32 val; + + val = dpu_fg_read(fg, FGINCTRLPANIC); + val &= ~FGDM_MASK; + val |= mode; + dpu_fg_write(fg, FGINCTRLPANIC, val); +} +EXPORT_SYMBOL_GPL(framegen_panic_displaymode); + +void framegen_wait_done(struct dpu_framegen *fg, struct drm_display_mode *m) +{ + unsigned long timeout, pending_framedur_jiffies; + int frame_size = m->crtc_htotal * m->crtc_vtotal; + int dotclock, pending_framedur_ns; + u32 val; + + dotclock = clk_get_rate(fg->clk_disp) / KHZ; + if (dotclock == 0) { + /* fall back to display mode's clock */ + dotclock = m->crtc_clock; + } + + /* + * The SoC designer indicates that there are two pending frames + * to complete in the worst case. + * So, three pending frames are enough for sure. + */ + pending_framedur_ns = div_u64((u64) 3 * frame_size * 1000000, dotclock); + pending_framedur_jiffies = nsecs_to_jiffies(pending_framedur_ns); + if (pending_framedur_jiffies > (3 * HZ)) { + pending_framedur_jiffies = 3 * HZ; + + dev_warn(fg->dpu->dev, + "truncate FrameGen%d pending frame duration to 3sec\n", + fg->id); + } + timeout = jiffies + pending_framedur_jiffies; + + do { + val = dpu_fg_read(fg, FGENSTS); + } while ((val & ENSTS) && time_before(jiffies, timeout)); + + dev_dbg(fg->dpu->dev, "FrameGen%d pending frame duration is %ums\n", + fg->id, jiffies_to_msecs(pending_framedur_jiffies)); + + if (val & ENSTS) + dev_err(fg->dpu->dev, "failed to wait for FrameGen%d done\n", + fg->id); +} +EXPORT_SYMBOL_GPL(framegen_wait_done); + +static inline u32 framegen_frame_index(u32 stamp) +{ + return (stamp & FRAMEINDEX_MASK) >> FRAMEINDEX_SHIFT; +} + +static inline u32 framegen_line_index(u32 stamp) +{ + return (stamp & LINEINDEX_MASK) >> LINEINDEX_SHIFT; +} + +void framegen_read_timestamp(struct dpu_framegen *fg, + u32 *frame_index, u32 *line_index) +{ + u32 stamp; + + stamp = dpu_fg_read(fg, FGTIMESTAMP); + *frame_index = framegen_frame_index(stamp); + *line_index = framegen_line_index(stamp); +} +EXPORT_SYMBOL_GPL(framegen_read_timestamp); + +void framegen_wait_for_frame_counter_moving(struct dpu_framegen *fg) +{ + u32 frame_index, line_index, last_frame_index; + unsigned long timeout = jiffies + msecs_to_jiffies(50); + + framegen_read_timestamp(fg, &frame_index, &line_index); + do { + last_frame_index = frame_index; + framegen_read_timestamp(fg, &frame_index, &line_index); + } while (last_frame_index == frame_index && + time_before(jiffies, timeout)); + + if (last_frame_index == frame_index) + dev_err(fg->dpu->dev, + "failed to wait for FrameGen%d frame counter moving\n", + fg->id); + else + dev_dbg(fg->dpu->dev, + "FrameGen%d frame counter moves - last %u, curr %d\n", + fg->id, last_frame_index, frame_index); +} +EXPORT_SYMBOL_GPL(framegen_wait_for_frame_counter_moving); + +bool framegen_secondary_requests_to_read_empty_fifo(struct dpu_framegen *fg) +{ + u32 val; + bool empty; + + val = dpu_fg_read(fg, FGCHSTAT); + + empty = !!(val & SFIFOEMPTY); + + if (empty) + dev_dbg(fg->dpu->dev, + "FrameGen%d secondary requests to read empty FIFO\n", + fg->id); + + return empty; +} +EXPORT_SYMBOL_GPL(framegen_secondary_requests_to_read_empty_fifo); + +void framegen_secondary_clear_channel_status(struct dpu_framegen *fg) +{ + dpu_fg_write(fg, FGCHSTATCLR, CLRSECSTAT); +} +EXPORT_SYMBOL_GPL(framegen_secondary_clear_channel_status); + +bool framegen_secondary_is_syncup(struct dpu_framegen *fg) +{ + u32 val = dpu_fg_read(fg, FGCHSTAT); + + return val & SECSYNCSTAT; +} +EXPORT_SYMBOL_GPL(framegen_secondary_is_syncup); + +void framegen_wait_for_secondary_syncup(struct dpu_framegen *fg) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(50); + bool syncup; + + do { + syncup = framegen_secondary_is_syncup(fg); + } while (!syncup && time_before(jiffies, timeout)); + + if (syncup) + dev_dbg(fg->dpu->dev, "FrameGen%d secondary syncup\n", fg->id); + else + dev_err(fg->dpu->dev, + "failed to wait for FrameGen%d secondary syncup\n", + fg->id); +} +EXPORT_SYMBOL_GPL(framegen_wait_for_secondary_syncup); + +void framegen_enable_clock(struct dpu_framegen *fg) +{ + if (!fg->use_bypass_clk) + clk_prepare_enable(fg->clk_pll); + clk_prepare_enable(fg->clk_disp); + clk_prepare_enable(fg->clk_disp_lpcg); +} +EXPORT_SYMBOL_GPL(framegen_enable_clock); + +void framegen_disable_clock(struct dpu_framegen *fg) +{ + if (!fg->use_bypass_clk) + clk_disable_unprepare(fg->clk_pll); + clk_disable_unprepare(fg->clk_disp); + clk_disable_unprepare(fg->clk_disp_lpcg); +} +EXPORT_SYMBOL_GPL(framegen_disable_clock); + +bool framegen_is_master(struct dpu_framegen *fg) +{ + const struct dpu_data *data = fg->dpu->data; + + return fg->id == data->master_stream_id; +} +EXPORT_SYMBOL_GPL(framegen_is_master); + +bool framegen_is_slave(struct dpu_framegen *fg) +{ + return !framegen_is_master(fg); +} +EXPORT_SYMBOL_GPL(framegen_is_slave); + +struct dpu_framegen *dpu_fg_get(struct dpu_soc *dpu, int id) +{ + struct dpu_framegen *fg; + int i; + + for (i = 0; i < ARRAY_SIZE(fg_ids); i++) + if (fg_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fg_ids)) + return ERR_PTR(-EINVAL); + + fg = dpu->fg_priv[i]; + + mutex_lock(&fg->mutex); + + if (fg->inuse) { + mutex_unlock(&fg->mutex); + return ERR_PTR(-EBUSY); + } + + fg->inuse = true; + + mutex_unlock(&fg->mutex); + + return fg; +} +EXPORT_SYMBOL_GPL(dpu_fg_get); + +void dpu_fg_put(struct dpu_framegen *fg) +{ + mutex_lock(&fg->mutex); + + fg->inuse = false; + + mutex_unlock(&fg->mutex); +} +EXPORT_SYMBOL_GPL(dpu_fg_put); + +struct dpu_framegen *dpu_aux_fg_peek(struct dpu_framegen *fg) +{ + return fg->dpu->fg_priv[fg->id ^ 1]; +} +EXPORT_SYMBOL_GPL(dpu_aux_fg_peek); + +void _dpu_fg_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_framegen *fg; + int i; + + for (i = 0; i < ARRAY_SIZE(fg_ids); i++) + if (fg_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(fg_ids))) + return; + + fg = dpu->fg_priv[i]; + + framegen_syncmode(fg, FGSYNCMODE__OFF); +} + +int dpu_fg_init(struct dpu_soc *dpu, unsigned int id, + unsigned long unused, unsigned long base) +{ + struct dpu_framegen *fg; + + fg = devm_kzalloc(dpu->dev, sizeof(*fg), GFP_KERNEL); + if (!fg) + return -ENOMEM; + + dpu->fg_priv[id] = fg; + + fg->base = devm_ioremap(dpu->dev, base, SZ_256); + if (!fg->base) + return -ENOMEM; + + fg->clk_pll = devm_clk_get(dpu->dev, id ? "pll1" : "pll0"); + if (IS_ERR(fg->clk_pll)) + return PTR_ERR(fg->clk_pll); + + fg->clk_bypass = devm_clk_get(dpu->dev, "bypass0"); + if (IS_ERR(fg->clk_bypass)) + return PTR_ERR(fg->clk_bypass); + + fg->clk_disp = devm_clk_get(dpu->dev, id ? "disp1" : "disp0"); + if (IS_ERR(fg->clk_disp)) + return PTR_ERR(fg->clk_disp); + + fg->clk_disp_lpcg = devm_clk_get(dpu->dev, id ? "disp1_lpcg" : "disp0_lpcg"); + if (IS_ERR(fg->clk_disp_lpcg)) + return PTR_ERR(fg->clk_disp_lpcg); + + fg->dpu = dpu; + fg->id = id; + mutex_init(&fg->mutex); + + _dpu_fg_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-hscaler.c b/drivers/gpu/imx/dpu/dpu-hscaler.c new file mode 100644 index 000000000000..9e69c619bd3f --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-hscaler.c @@ -0,0 +1,386 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_DYNAMIC 0x8 +#define PIXENGCFG_DYNAMIC_SRC_SEL_MASK 0x3F + +#define SETUP1 0xC +#define SCALE_FACTOR_MASK 0xFFFFF +#define SCALE_FACTOR(n) ((n) & 0xFFFFF) +#define SETUP2 0x10 +#define PHASE_OFFSET_MASK 0x1FFFFF +#define PHASE_OFFSET(n) ((n) & 0x1FFFFF) +#define CONTROL 0x14 +#define OUTPUT_SIZE_MASK 0x3FFF0000 +#define OUTPUT_SIZE(n) ((((n) - 1) << 16) & OUTPUT_SIZE_MASK) +#define FILTER_MODE 0x100 +#define SCALE_MODE 0x10 +#define MODE 0x1 + +static const hs_src_sel_t src_sels[3][6] = { + { + HS_SRC_SEL__DISABLE, + HS_SRC_SEL__FETCHDECODE0, + HS_SRC_SEL__MATRIX4, + HS_SRC_SEL__VSCALER4, + }, { + HS_SRC_SEL__DISABLE, + HS_SRC_SEL__FETCHDECODE1, + HS_SRC_SEL__MATRIX5, + HS_SRC_SEL__VSCALER5, + }, { + HS_SRC_SEL__DISABLE, + HS_SRC_SEL__MATRIX9, + HS_SRC_SEL__VSCALER9, + HS_SRC_SEL__FILTER9, + }, +}; + +struct dpu_hscaler { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; + /* see DPU_PLANE_SRC_xxx */ + unsigned int stream_id; +}; + +static inline u32 dpu_pec_hs_read(struct dpu_hscaler *hs, + unsigned int offset) +{ + return readl(hs->pec_base + offset); +} + +static inline void dpu_pec_hs_write(struct dpu_hscaler *hs, + unsigned int offset, u32 value) +{ + writel(value, hs->pec_base + offset); +} + +static inline u32 dpu_hs_read(struct dpu_hscaler *hs, unsigned int offset) +{ + return readl(hs->base + offset); +} + +static inline void dpu_hs_write(struct dpu_hscaler *hs, + unsigned int offset, u32 value) +{ + writel(value, hs->base + offset); +} + +int hscaler_pixengcfg_dynamic_src_sel(struct dpu_hscaler *hs, hs_src_sel_t src) +{ + struct dpu_soc *dpu = hs->dpu; + const unsigned int hs_id_array[] = {4, 5, 9}; + int i, j; + u32 val; + + for (i = 0; i < ARRAY_SIZE(hs_id_array); i++) + if (hs_id_array[i] == hs->id) + break; + + if (WARN_ON(i == ARRAY_SIZE(hs_id_array))) + return -EINVAL; + + mutex_lock(&hs->mutex); + for (j = 0; j < ARRAY_SIZE(src_sels[0]); j++) { + if (src_sels[i][j] == src) { + val = dpu_pec_hs_read(hs, PIXENGCFG_DYNAMIC); + val &= ~PIXENGCFG_DYNAMIC_SRC_SEL_MASK; + val |= src; + dpu_pec_hs_write(hs, PIXENGCFG_DYNAMIC, val); + mutex_unlock(&hs->mutex); + return 0; + } + } + mutex_unlock(&hs->mutex); + + dev_err(dpu->dev, "Invalid source for HScaler%d\n", hs->id); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(hscaler_pixengcfg_dynamic_src_sel); + +void hscaler_pixengcfg_clken(struct dpu_hscaler *hs, pixengcfg_clken_t clken) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_pec_hs_read(hs, PIXENGCFG_DYNAMIC); + val &= ~CLKEN_MASK; + val |= clken << CLKEN_MASK_SHIFT; + dpu_pec_hs_write(hs, PIXENGCFG_DYNAMIC, val); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_pixengcfg_clken); + +void hscaler_shden(struct dpu_hscaler *hs, bool enable) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, STATICCONTROL); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_hs_write(hs, STATICCONTROL, val); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_shden); + +void hscaler_setup1(struct dpu_hscaler *hs, u32 src, u32 dst) +{ + struct dpu_soc *dpu = hs->dpu; + u32 scale_factor; + u64 tmp64; + + if (src == dst) { + scale_factor = 0x80000; + } else { + if (src > dst) { + tmp64 = (u64)((u64)dst * 0x80000); + do_div(tmp64, src); + + } else { + tmp64 = (u64)((u64)src * 0x80000); + do_div(tmp64, dst); + } + scale_factor = (u32)tmp64; + } + + WARN_ON(scale_factor > 0x80000); + + mutex_lock(&hs->mutex); + dpu_hs_write(hs, SETUP1, SCALE_FACTOR(scale_factor)); + mutex_unlock(&hs->mutex); + + dev_dbg(dpu->dev, "Hscaler%d scale factor 0x%08x\n", + hs->id, scale_factor); +} +EXPORT_SYMBOL_GPL(hscaler_setup1); + +void hscaler_setup2(struct dpu_hscaler *hs, u32 phase_offset) +{ + mutex_lock(&hs->mutex); + dpu_hs_write(hs, SETUP2, PHASE_OFFSET(phase_offset)); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_setup2); + +void hscaler_output_size(struct dpu_hscaler *hs, u32 line_num) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, CONTROL); + val &= ~OUTPUT_SIZE_MASK; + val |= OUTPUT_SIZE(line_num); + dpu_hs_write(hs, CONTROL, val); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_output_size); + +void hscaler_filter_mode(struct dpu_hscaler *hs, scaler_filter_mode_t m) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, CONTROL); + val &= ~FILTER_MODE; + val |= m; + dpu_hs_write(hs, CONTROL, val); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_filter_mode); + +void hscaler_scale_mode(struct dpu_hscaler *hs, scaler_scale_mode_t m) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, CONTROL); + val &= ~SCALE_MODE; + val |= m; + dpu_hs_write(hs, CONTROL, val); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_scale_mode); + +void hscaler_mode(struct dpu_hscaler *hs, scaler_mode_t m) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, CONTROL); + val &= ~MODE; + val |= m; + dpu_hs_write(hs, CONTROL, val); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_mode); + +bool hscaler_is_enabled(struct dpu_hscaler *hs) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, CONTROL); + mutex_unlock(&hs->mutex); + + return (val & MODE) == SCALER_ACTIVE; +} +EXPORT_SYMBOL_GPL(hscaler_is_enabled); + +dpu_block_id_t hscaler_get_block_id(struct dpu_hscaler *hs) +{ + switch (hs->id) { + case 4: + return ID_HSCALER4; + case 5: + return ID_HSCALER5; + case 9: + return ID_HSCALER9; + default: + WARN_ON(1); + } + + return ID_NONE; +} +EXPORT_SYMBOL_GPL(hscaler_get_block_id); + +unsigned int hscaler_get_stream_id(struct dpu_hscaler *hs) +{ + return hs->stream_id; +} +EXPORT_SYMBOL_GPL(hscaler_get_stream_id); + +void hscaler_set_stream_id(struct dpu_hscaler *hs, unsigned int id) +{ + switch (id) { + case DPU_PLANE_SRC_TO_DISP_STREAM0: + case DPU_PLANE_SRC_TO_DISP_STREAM1: + case DPU_PLANE_SRC_DISABLED: + hs->stream_id = id; + break; + default: + WARN_ON(1); + } +} +EXPORT_SYMBOL_GPL(hscaler_set_stream_id); + +struct dpu_hscaler *dpu_hs_get(struct dpu_soc *dpu, int id) +{ + struct dpu_hscaler *hs; + int i; + + for (i = 0; i < ARRAY_SIZE(hs_ids); i++) + if (hs_ids[i] == id) + break; + + if (i == ARRAY_SIZE(hs_ids)) + return ERR_PTR(-EINVAL); + + hs = dpu->hs_priv[i]; + + mutex_lock(&hs->mutex); + + if (hs->inuse) { + mutex_unlock(&hs->mutex); + return ERR_PTR(-EBUSY); + } + + hs->inuse = true; + + mutex_unlock(&hs->mutex); + + return hs; +} +EXPORT_SYMBOL_GPL(dpu_hs_get); + +void dpu_hs_put(struct dpu_hscaler *hs) +{ + mutex_lock(&hs->mutex); + + hs->inuse = false; + + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(dpu_hs_put); + +void _dpu_hs_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_hscaler *hs; + int i; + + for (i = 0; i < ARRAY_SIZE(hs_ids); i++) + if (hs_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(hs_ids))) + return; + + hs = dpu->hs_priv[i]; + + hscaler_shden(hs, true); + hscaler_setup2(hs, 0); + hscaler_pixengcfg_dynamic_src_sel(hs, HS_SRC_SEL__DISABLE); +} + +int dpu_hs_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_hscaler *hs; + int i; + + hs = devm_kzalloc(dpu->dev, sizeof(*hs), GFP_KERNEL); + if (!hs) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(hs_ids); i++) + if (hs_ids[i] == id) + break; + + if (i == ARRAY_SIZE(hs_ids)) + return -EINVAL; + + dpu->hs_priv[i] = hs; + + hs->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_8); + if (!hs->pec_base) + return -ENOMEM; + + hs->base = devm_ioremap(dpu->dev, base, SZ_1K); + if (!hs->base) + return -ENOMEM; + + hs->dpu = dpu; + hs->id = id; + + mutex_init(&hs->mutex); + + _dpu_hs_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-layerblend.c b/drivers/gpu/imx/dpu/dpu-layerblend.c new file mode 100644 index 000000000000..c19fcbdb169b --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-layerblend.c @@ -0,0 +1,346 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <drm/drm_blend.h> +#include <linux/io.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_DYNAMIC 0x8 +#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK 0x3F +#define PIXENGCFG_DYNAMIC_SEC_SEL_MASK 0x3F00 +#define PIXENGCFG_DYNAMIC_SEC_SEL_SHIFT 8 + +static const lb_prim_sel_t prim_sels[] = { + LB_PRIM_SEL__DISABLE, + LB_PRIM_SEL__BLITBLEND9, + LB_PRIM_SEL__CONSTFRAME0, + LB_PRIM_SEL__CONSTFRAME1, + LB_PRIM_SEL__CONSTFRAME4, + LB_PRIM_SEL__CONSTFRAME5, + LB_PRIM_SEL__MATRIX4, + LB_PRIM_SEL__HSCALER4, + LB_PRIM_SEL__VSCALER4, + LB_PRIM_SEL__MATRIX5, + LB_PRIM_SEL__HSCALER5, + LB_PRIM_SEL__VSCALER5, + LB_PRIM_SEL__LAYERBLEND0, + LB_PRIM_SEL__LAYERBLEND1, + LB_PRIM_SEL__LAYERBLEND2, + LB_PRIM_SEL__LAYERBLEND3, +}; + +#define PIXENGCFG_STATUS 0xC +#define SHDTOKSEL (0x3 << 3) +#define SHDTOKSEL_SHIFT 3 +#define SHDLDSEL (0x3 << 1) +#define SHDLDSEL_SHIFT 1 +#define CONTROL 0xC +#define OPERATION_MODE_MASK BIT(0) +#define BLENDCONTROL 0x10 +#define ALPHA(a) (((a) & 0xFF) << 16) +#define PRIM_C_BLD_FUNC__ONE_MINUS_CONST_ALPHA 0x7 +#define PRIM_C_BLD_FUNC__ONE_MINUS_SEC_ALPHA 0x5 +#define PRIM_C_BLD_FUNC__ZERO 0x0 +#define SEC_C_BLD_FUNC__CONST_ALPHA (0x6 << 4) +#define SEC_C_BLD_FUNC__SEC_ALPHA (0x4 << 4) +#define PRIM_A_BLD_FUNC__ZERO (0x0 << 8) +#define SEC_A_BLD_FUNC__ZERO (0x0 << 12) +#define POSITION 0x14 +#define XPOS(x) ((x) & 0x7FFF) +#define YPOS(y) (((y) & 0x7FFF) << 16) +#define PRIMCONTROLWORD 0x18 +#define SECCONTROLWORD 0x1C + +struct dpu_layerblend { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; +}; + +static inline u32 dpu_pec_lb_read(struct dpu_layerblend *lb, + unsigned int offset) +{ + return readl(lb->pec_base + offset); +} + +static inline void dpu_pec_lb_write(struct dpu_layerblend *lb, + unsigned int offset, u32 value) +{ + writel(value, lb->pec_base + offset); +} + +static inline u32 dpu_lb_read(struct dpu_layerblend *lb, unsigned int offset) +{ + return readl(lb->base + offset); +} + +static inline void dpu_lb_write(struct dpu_layerblend *lb, + unsigned int offset, u32 value) +{ + writel(value, lb->base + offset); +} + +int layerblend_pixengcfg_dynamic_prim_sel(struct dpu_layerblend *lb, + lb_prim_sel_t prim) +{ + struct dpu_soc *dpu = lb->dpu; + int fixed_sels_num = ARRAY_SIZE(prim_sels) - 4; + int i; + u32 val; + + mutex_lock(&lb->mutex); + for (i = 0; i < fixed_sels_num + lb->id; i++) { + if (prim_sels[i] == prim) { + val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC); + val &= ~PIXENGCFG_DYNAMIC_PRIM_SEL_MASK; + val |= prim; + dpu_pec_lb_write(lb, PIXENGCFG_DYNAMIC, val); + mutex_unlock(&lb->mutex); + return 0; + } + } + mutex_unlock(&lb->mutex); + + dev_err(dpu->dev, "Invalid primary source for LayerBlend%d\n", lb->id); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(layerblend_pixengcfg_dynamic_prim_sel); + +void layerblend_pixengcfg_dynamic_sec_sel(struct dpu_layerblend *lb, + lb_sec_sel_t sec) +{ + u32 val; + + mutex_lock(&lb->mutex); + val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC); + val &= ~PIXENGCFG_DYNAMIC_SEC_SEL_MASK; + val |= sec << PIXENGCFG_DYNAMIC_SEC_SEL_SHIFT; + dpu_pec_lb_write(lb, PIXENGCFG_DYNAMIC, val); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_pixengcfg_dynamic_sec_sel); + +void layerblend_pixengcfg_clken(struct dpu_layerblend *lb, + pixengcfg_clken_t clken) +{ + u32 val; + + mutex_lock(&lb->mutex); + val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC); + val &= ~CLKEN_MASK; + val |= clken << CLKEN_MASK_SHIFT; + dpu_pec_lb_write(lb, PIXENGCFG_DYNAMIC, val); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_pixengcfg_clken); + +void layerblend_shden(struct dpu_layerblend *lb, bool enable) +{ + u32 val; + + mutex_lock(&lb->mutex); + val = dpu_lb_read(lb, STATICCONTROL); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_lb_write(lb, STATICCONTROL, val); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_shden); + +void layerblend_shdtoksel(struct dpu_layerblend *lb, lb_shadow_sel_t sel) +{ + u32 val; + + mutex_lock(&lb->mutex); + val = dpu_lb_read(lb, STATICCONTROL); + val &= ~SHDTOKSEL; + val |= (sel << SHDTOKSEL_SHIFT); + dpu_lb_write(lb, STATICCONTROL, val); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_shdtoksel); + +void layerblend_shdldsel(struct dpu_layerblend *lb, lb_shadow_sel_t sel) +{ + u32 val; + + mutex_lock(&lb->mutex); + val = dpu_lb_read(lb, STATICCONTROL); + val &= ~SHDLDSEL; + val |= (sel << SHDLDSEL_SHIFT); + dpu_lb_write(lb, STATICCONTROL, val); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_shdldsel); + +void layerblend_control(struct dpu_layerblend *lb, lb_mode_t mode) +{ + u32 val; + + mutex_lock(&lb->mutex); + val = dpu_lb_read(lb, CONTROL); + val &= ~OPERATION_MODE_MASK; + val |= mode; + dpu_lb_write(lb, CONTROL, val); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_control); + +void layerblend_blendcontrol(struct dpu_layerblend *lb, unsigned int zpos, + unsigned int pixel_blend_mode, u16 alpha) +{ + u32 val = PRIM_A_BLD_FUNC__ZERO | SEC_A_BLD_FUNC__ZERO; + + if (zpos == 0) { + val |= PRIM_C_BLD_FUNC__ZERO | SEC_C_BLD_FUNC__CONST_ALPHA; + alpha = DRM_BLEND_ALPHA_OPAQUE; + } else { + switch (pixel_blend_mode) { + case DRM_MODE_BLEND_PIXEL_NONE: + val |= PRIM_C_BLD_FUNC__ONE_MINUS_CONST_ALPHA | + SEC_C_BLD_FUNC__CONST_ALPHA; + break; + case DRM_MODE_BLEND_PREMULTI: + val |= PRIM_C_BLD_FUNC__ONE_MINUS_SEC_ALPHA | + SEC_C_BLD_FUNC__CONST_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + val |= PRIM_C_BLD_FUNC__ONE_MINUS_SEC_ALPHA | + SEC_C_BLD_FUNC__SEC_ALPHA; + break; + default: + break; + } + } + + val |= ALPHA(alpha >> 8); + + mutex_lock(&lb->mutex); + dpu_lb_write(lb, BLENDCONTROL, val); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_blendcontrol); + +void layerblend_position(struct dpu_layerblend *lb, int x, int y) +{ + mutex_lock(&lb->mutex); + dpu_lb_write(lb, POSITION, XPOS(x) | YPOS(y)); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_position); + +struct dpu_layerblend *dpu_lb_get(struct dpu_soc *dpu, int id) +{ + struct dpu_layerblend *lb; + int i; + + for (i = 0; i < ARRAY_SIZE(lb_ids); i++) + if (lb_ids[i] == id) + break; + + if (i == ARRAY_SIZE(lb_ids)) + return ERR_PTR(-EINVAL); + + lb = dpu->lb_priv[i]; + + mutex_lock(&lb->mutex); + + if (lb->inuse) { + mutex_unlock(&lb->mutex); + return ERR_PTR(-EBUSY); + } + + lb->inuse = true; + + mutex_unlock(&lb->mutex); + + return lb; +} +EXPORT_SYMBOL_GPL(dpu_lb_get); + +void dpu_lb_put(struct dpu_layerblend *lb) +{ + mutex_lock(&lb->mutex); + + lb->inuse = false; + + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(dpu_lb_put); + +void _dpu_lb_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_layerblend *lb; + int i; + + for (i = 0; i < ARRAY_SIZE(lb_ids); i++) + if (lb_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(lb_ids))) + return; + + lb = dpu->lb_priv[i]; + + layerblend_pixengcfg_dynamic_prim_sel(lb, LB_PRIM_SEL__DISABLE); + layerblend_pixengcfg_dynamic_sec_sel(lb, LB_SEC_SEL__DISABLE); + layerblend_pixengcfg_clken(lb, CLKEN__AUTOMATIC); + layerblend_shdldsel(lb, BOTH); + layerblend_shdtoksel(lb, BOTH); + layerblend_shden(lb, true); +} + +int dpu_lb_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_layerblend *lb; + int ret; + + lb = devm_kzalloc(dpu->dev, sizeof(*lb), GFP_KERNEL); + if (!lb) + return -ENOMEM; + + dpu->lb_priv[id] = lb; + + lb->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16); + if (!lb->pec_base) + return -ENOMEM; + + lb->base = devm_ioremap(dpu->dev, base, SZ_32); + if (!lb->base) + return -ENOMEM; + + lb->dpu = dpu; + lb->id = id; + mutex_init(&lb->mutex); + + ret = layerblend_pixengcfg_dynamic_prim_sel(lb, LB_PRIM_SEL__DISABLE); + if (ret < 0) + return ret; + + _dpu_lb_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-prv.h b/drivers/gpu/imx/dpu/dpu-prv.h new file mode 100644 index 000000000000..f8805037da2e --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-prv.h @@ -0,0 +1,445 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ +#ifndef __DPU_PRV_H__ +#define __DPU_PRV_H__ + +#include <linux/firmware/imx/sci.h> +#include <drm/drm_fourcc.h> +#include <video/dpu.h> + +#define STATICCONTROL 0x8 +#define SHDLDREQSTICKY(lm) (((lm) & 0xFF) << 24) +#define SHDLDREQSTICKY_MASK (0xFF << 24) +#define BASEADDRESSAUTOUPDATE(lm) (((lm) & 0xFF) << 16) +#define BASEADDRESSAUTOUPDATE_MASK (0xFF << 16) +#define SHDEN BIT(0) +#define BURSTBUFFERMANAGEMENT 0xC +#define SETNUMBUFFERS(n) ((n) & 0xFF) +#define SETBURSTLENGTH(n) (((n) & 0x1F) << 8) +#define SETBURSTLENGTH_MASK 0x1F00 +#define LINEMODE_MASK 0x80000000U +#define LINEMODE_SHIFT 31U +enum linemode { + /* + * Mandatory setting for operation in the Display Controller. + * Works also for Blit Engine with marginal performance impact. + */ + LINEMODE__DISPLAY = 0, + /* Recommended setting for operation in the Blit Engine. */ + LINEMODE__BLIT = 1 << LINEMODE_SHIFT, +}; + +#define BITSPERPIXEL(bpp) (((bpp) & 0x3F) << 16) +#define STRIDE(n) (((n) - 1) & 0xFFFF) +#define LINEWIDTH(w) (((w) - 1) & 0x3FFF) +#define LINECOUNT(h) ((((h) - 1) & 0x3FFF) << 16) +#define ITUFORMAT BIT(31) +#define R_BITS(n) (((n) & 0xF) << 24) +#define G_BITS(n) (((n) & 0xF) << 16) +#define B_BITS(n) (((n) & 0xF) << 8) +#define A_BITS(n) ((n) & 0xF) +#define R_SHIFT(n) (((n) & 0x1F) << 24) +#define G_SHIFT(n) (((n) & 0x1F) << 16) +#define B_SHIFT(n) (((n) & 0x1F) << 8) +#define A_SHIFT(n) ((n) & 0x1F) +#define Y_BITS(n) R_BITS(n) +#define Y_BITS_MASK 0xF000000 +#define U_BITS(n) G_BITS(n) +#define U_BITS_MASK 0xF0000 +#define V_BITS(n) B_BITS(n) +#define V_BITS_MASK 0xF00 +#define Y_SHIFT(n) R_SHIFT(n) +#define Y_SHIFT_MASK 0x1F000000 +#define U_SHIFT(n) G_SHIFT(n) +#define U_SHIFT_MASK 0x1F0000 +#define V_SHIFT(n) B_SHIFT(n) +#define V_SHIFT_MASK 0x1F00 +#define LAYERXOFFSET(x) ((x) & 0x7FFF) +#define LAYERYOFFSET(y) (((y) & 0x7FFF) << 16) +#define CLIPWINDOWXOFFSET(x) ((x) & 0x7FFF) +#define CLIPWINDOWYOFFSET(y) (((y) & 0x7FFF) << 16) +#define CLIPWINDOWWIDTH(w) (((w) - 1) & 0x3FFF) +#define CLIPWINDOWHEIGHT(h) ((((h) - 1) & 0x3FFF) << 16) +#define CONSTANTALPHA_MASK 0xFF +#define CONSTANTALPHA(n) ((n) & CONSTANTALPHA_MASK) +#define PALETTEENABLE BIT(0) +typedef enum { + TILE_FILL_ZERO, + TILE_FILL_CONSTANT, + TILE_PAD, + TILE_PAD_ZERO, +} tilemode_t; +#define ALPHASRCENABLE BIT(8) +#define ALPHACONSTENABLE BIT(9) +#define ALPHAMASKENABLE BIT(10) +#define ALPHATRANSENABLE BIT(11) +#define ALPHA_ENABLE_MASK (ALPHASRCENABLE | ALPHACONSTENABLE | \ + ALPHAMASKENABLE | ALPHATRANSENABLE) +#define RGBALPHASRCENABLE BIT(12) +#define RGBALPHACONSTENABLE BIT(13) +#define RGBALPHAMASKENABLE BIT(14) +#define RGBALPHATRANSENABLE BIT(15) +#define RGB_ENABLE_MASK (RGBALPHASRCENABLE | \ + RGBALPHACONSTENABLE | \ + RGBALPHAMASKENABLE | \ + RGBALPHATRANSENABLE) +#define PREMULCONSTRGB BIT(16) +typedef enum { + YUVCONVERSIONMODE__OFF, + YUVCONVERSIONMODE__ITU601, + YUVCONVERSIONMODE__ITU601_FR, + YUVCONVERSIONMODE__ITU709, +} yuvconversionmode_t; +#define YUVCONVERSIONMODE_MASK 0x60000 +#define YUVCONVERSIONMODE(m) (((m) & 0x3) << 17) +#define GAMMAREMOVEENABLE BIT(20) +#define CLIPWINDOWENABLE BIT(30) +#define SOURCEBUFFERENABLE BIT(31) +#define EMPTYFRAME BIT(31) +#define FRAMEWIDTH(w) (((w) - 1) & 0x3FFF) +#define FRAMEHEIGHT(h) ((((h) - 1) & 0x3FFF) << 16) +#define DELTAX_MASK 0x3F000 +#define DELTAY_MASK 0xFC0000 +#define DELTAX(x) (((x) & 0x3F) << 12) +#define DELTAY(y) (((y) & 0x3F) << 18) +#define YUV422UPSAMPLINGMODE_MASK BIT(5) +#define YUV422UPSAMPLINGMODE(m) (((m) & 0x1) << 5) +typedef enum { + YUV422UPSAMPLINGMODE__REPLICATE, + YUV422UPSAMPLINGMODE__INTERPOLATE, +} yuv422upsamplingmode_t; +#define INPUTSELECT_MASK 0x18 +#define INPUTSELECT(s) (((s) & 0x3) << 3) +typedef enum { + INPUTSELECT__INACTIVE, + INPUTSELECT__COMPPACK, + INPUTSELECT__ALPHAMASK, + INPUTSELECT__COORDINATE, +} inputselect_t; +#define RASTERMODE_MASK 0x7 +#define RASTERMODE(m) ((m) & 0x7) +typedef enum { + RASTERMODE__NORMAL, + RASTERMODE__DECODE, + RASTERMODE__ARBITRARY, + RASTERMODE__PERSPECTIVE, + RASTERMODE__YUV422, + RASTERMODE__AFFINE, +} rastermode_t; +#define SHDTOKGEN BIT(0) +#define FETCHTYPE_MASK 0xF + +#define DPU_FRAC_PLANE_LAYER_NUM 8 + +#define DPU_VPROC_CAP_HSCALER4 BIT(0) +#define DPU_VPROC_CAP_VSCALER4 BIT(1) +#define DPU_VPROC_CAP_HSCALER5 BIT(2) +#define DPU_VPROC_CAP_VSCALER5 BIT(3) +#define DPU_VPROC_CAP_FETCHECO0 BIT(4) +#define DPU_VPROC_CAP_FETCHECO1 BIT(5) + +#define DPU_VPROC_CAP_HSCALE (DPU_VPROC_CAP_HSCALER4 | \ + DPU_VPROC_CAP_HSCALER5) +#define DPU_VPROC_CAP_VSCALE (DPU_VPROC_CAP_VSCALER4 | \ + DPU_VPROC_CAP_VSCALER5) +#define DPU_VPROC_CAP_FETCHECO (DPU_VPROC_CAP_FETCHECO0 | \ + DPU_VPROC_CAP_FETCHECO1) + +struct dpu_unit { + char *name; + unsigned int num; + const unsigned int *ids; + const unsigned long *pec_ofss; /* PixEngCFG */ + const unsigned long *ofss; + const unsigned int *dprc_ids; +}; + +struct cm_reg_ofs { + u32 ipidentifier; + u32 lockunlock; + u32 lockstatus; + u32 userinterruptmask; + u32 interruptenable; + u32 interruptpreset; + u32 interruptclear; + u32 interruptstatus; + u32 userinterruptenable; + u32 userinterruptpreset; + u32 userinterruptclear; + u32 userinterruptstatus; + u32 generalpurpose; +}; + +struct dpu_data { + unsigned long cm_ofs; /* common */ + const struct dpu_unit *cfs; + const struct dpu_unit *decs; + const struct dpu_unit *eds; + const struct dpu_unit *fds; + const struct dpu_unit *fes; + const struct dpu_unit *fgs; + const struct dpu_unit *fls; + const struct dpu_unit *fws; + const struct dpu_unit *hss; + const struct dpu_unit *lbs; + const struct dpu_unit *sts; + const struct dpu_unit *tcons; + const struct dpu_unit *vss; + const struct cm_reg_ofs *cm_reg_ofs; + const unsigned long *unused_irq; + + unsigned int syncmode_min_prate; /* need pixel combiner, KHz */ + unsigned int singlemode_max_width; + unsigned int master_stream_id; + + u32 plane_src_mask; + + bool has_dual_ldb; +}; + +struct dpu_soc { + struct device *dev; + const struct dpu_data *data; + spinlock_t lock; + struct list_head list; + + struct device *pd_dc_dev; + struct device *pd_pll0_dev; + struct device *pd_pll1_dev; + struct device_link *pd_dc_link; + struct device_link *pd_pll0_link; + struct device_link *pd_pll1_link; + + void __iomem *cm_reg; + + int id; + int usecount; + + int irq_extdst0_shdload; + int irq_extdst4_shdload; + int irq_extdst1_shdload; + int irq_extdst5_shdload; + int irq_disengcfg_shdload0; + int irq_disengcfg_framecomplete0; + int irq_disengcfg_shdload1; + int irq_disengcfg_framecomplete1; + int irq_line_num; + + struct irq_domain *domain; + + struct imx_sc_ipc *dpu_ipc_handle; + + struct dpu_constframe *cf_priv[4]; + struct dpu_disengcfg *dec_priv[2]; + struct dpu_extdst *ed_priv[4]; + struct dpu_fetchunit *fd_priv[2]; + struct dpu_fetchunit *fe_priv[4]; + struct dpu_framegen *fg_priv[2]; + struct dpu_fetchunit *fl_priv[1]; + struct dpu_fetchunit *fw_priv[1]; + struct dpu_hscaler *hs_priv[3]; + struct dpu_layerblend *lb_priv[4]; + struct dpu_store *st_priv[1]; + struct dpu_tcon *tcon_priv[2]; + struct dpu_vscaler *vs_priv[3]; +}; + +int dpu_format_horz_chroma_subsampling(u32 format); +int dpu_format_vert_chroma_subsampling(u32 format); +int dpu_format_num_planes(u32 format); +int dpu_format_plane_width(int width, u32 format, int plane); +int dpu_format_plane_height(int height, u32 format, int plane); + +#define _DECLARE_DPU_UNIT_INIT_FUNC(block) \ +void _dpu_##block##_init(struct dpu_soc *dpu, unsigned int id) \ + +_DECLARE_DPU_UNIT_INIT_FUNC(cf); +_DECLARE_DPU_UNIT_INIT_FUNC(dec); +_DECLARE_DPU_UNIT_INIT_FUNC(ed); +_DECLARE_DPU_UNIT_INIT_FUNC(fd); +_DECLARE_DPU_UNIT_INIT_FUNC(fe); +_DECLARE_DPU_UNIT_INIT_FUNC(fg); +_DECLARE_DPU_UNIT_INIT_FUNC(fl); +_DECLARE_DPU_UNIT_INIT_FUNC(fw); +_DECLARE_DPU_UNIT_INIT_FUNC(hs); +_DECLARE_DPU_UNIT_INIT_FUNC(lb); +_DECLARE_DPU_UNIT_INIT_FUNC(st); +_DECLARE_DPU_UNIT_INIT_FUNC(tcon); +_DECLARE_DPU_UNIT_INIT_FUNC(vs); + +#define DECLARE_DPU_UNIT_INIT_FUNC(block) \ +int dpu_##block##_init(struct dpu_soc *dpu, unsigned int id, \ + unsigned long pec_base, unsigned long base) + +DECLARE_DPU_UNIT_INIT_FUNC(cf); +DECLARE_DPU_UNIT_INIT_FUNC(dec); +DECLARE_DPU_UNIT_INIT_FUNC(ed); +DECLARE_DPU_UNIT_INIT_FUNC(fd); +DECLARE_DPU_UNIT_INIT_FUNC(fe); +DECLARE_DPU_UNIT_INIT_FUNC(fg); +DECLARE_DPU_UNIT_INIT_FUNC(fl); +DECLARE_DPU_UNIT_INIT_FUNC(fw); +DECLARE_DPU_UNIT_INIT_FUNC(hs); +DECLARE_DPU_UNIT_INIT_FUNC(lb); +DECLARE_DPU_UNIT_INIT_FUNC(st); +DECLARE_DPU_UNIT_INIT_FUNC(tcon); +DECLARE_DPU_UNIT_INIT_FUNC(vs); + +static inline u32 dpu_pec_fu_read(struct dpu_fetchunit *fu, unsigned int offset) +{ + return readl(fu->pec_base + offset); +} + +static inline void dpu_pec_fu_write(struct dpu_fetchunit *fu, + unsigned int offset, u32 value) +{ + writel(value, fu->pec_base + offset); +} + +static inline u32 dpu_fu_read(struct dpu_fetchunit *fu, unsigned int offset) +{ + return readl(fu->base + offset); +} + +static inline void dpu_fu_write(struct dpu_fetchunit *fu, + unsigned int offset, u32 value) +{ + writel(value, fu->base + offset); +} + +static inline u32 rgb_color(u8 r, u8 g, u8 b, u8 a) +{ + return (r << 24) | (g << 16) | (b << 8) | a; +} + +static inline u32 yuv_color(u8 y, u8 u, u8 v) +{ + return (y << 24) | (u << 16) | (v << 8); +} + +void tcon_get_pc(struct dpu_tcon *tcon, void *data); + +static const unsigned int cf_ids[] = {0, 1, 4, 5}; +static const unsigned int dec_ids[] = {0, 1}; +static const unsigned int ed_ids[] = {0, 1, 4, 5}; +static const unsigned int fd_ids[] = {0, 1}; +static const unsigned int fe_ids[] = {0, 1, 2, 9}; +static const unsigned int fg_ids[] = {0, 1}; +static const unsigned int fl_ids[] = {0}; +static const unsigned int fw_ids[] = {2}; +static const unsigned int hs_ids[] = {4, 5, 9}; +static const unsigned int lb_ids[] = {0, 1, 2, 3}; +static const unsigned int st_ids[] = {9}; +static const unsigned int tcon_ids[] = {0, 1}; +static const unsigned int vs_ids[] = {4, 5, 9}; + +static const unsigned int fd_dprc_ids[] = {3, 4}; +static const unsigned int fl_dprc_ids[] = {2}; +static const unsigned int fw_dprc_ids[] = {5}; + +struct dpu_pixel_format { + u32 pixel_format; + u32 bits; + u32 shift; +}; + +static const struct dpu_pixel_format dpu_pixel_format_matrix[] = { + { + DRM_FORMAT_ARGB8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8), + R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(24), + }, { + DRM_FORMAT_XRGB8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_ABGR8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8), + R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(24), + }, { + DRM_FORMAT_XBGR8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(0), + }, { + DRM_FORMAT_RGBA8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8), + R_SHIFT(24) | G_SHIFT(16) | B_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_RGBX8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(24) | G_SHIFT(16) | B_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_BGRA8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8), + R_SHIFT(8) | G_SHIFT(16) | B_SHIFT(24) | A_SHIFT(0), + }, { + DRM_FORMAT_BGRX8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(8) | G_SHIFT(16) | B_SHIFT(24) | A_SHIFT(0), + }, { + DRM_FORMAT_RGB888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_BGR888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(0), + }, { + DRM_FORMAT_RGB565, + R_BITS(5) | G_BITS(6) | B_BITS(5) | A_BITS(0), + R_SHIFT(11) | G_SHIFT(5) | B_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_YUYV, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_UYVY, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(8) | U_SHIFT(0) | V_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_NV12, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_NV21, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_NV16, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_NV61, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_NV24, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_NV42, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0), + }, +}; + +int dpu_sc_misc_get_handle(struct dpu_soc *dpu); +int dpu_pxlink_set_mst_addr(struct dpu_soc *dpu, int disp_id, u32 val); +int dpu_pxlink_set_mst_enable(struct dpu_soc *dpu, int disp_id, bool enable); +int dpu_pxlink_set_mst_valid(struct dpu_soc *dpu, int disp_id, bool enable); +int dpu_pxlink_set_sync_ctrl(struct dpu_soc *dpu, int disp_id, bool enable); +int dpu_pxlink_set_dc_sync_mode(struct dpu_soc *dpu, bool enable); +int dpu_sc_misc_init(struct dpu_soc *dpu); +#endif /* __DPU_PRV_H__ */ diff --git a/drivers/gpu/imx/dpu/dpu-sc-misc.c b/drivers/gpu/imx/dpu/dpu-sc-misc.c new file mode 100644 index 000000000000..20f600cb5a68 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-sc-misc.c @@ -0,0 +1,93 @@ +/* + * Copyright 2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <dt-bindings/firmware/imx/rsrc.h> +#include "dpu-prv.h" + +static inline int +dpu_sc_misc_set_ctrl(struct dpu_soc *dpu, u32 rsc, u8 ctrl, u32 val) +{ + return imx_sc_misc_set_control(dpu->dpu_ipc_handle, rsc, ctrl, val); +} + +int dpu_sc_misc_get_handle(struct dpu_soc *dpu) +{ + return imx_scu_get_handle(&dpu->dpu_ipc_handle); +} + +int dpu_pxlink_set_mst_addr(struct dpu_soc *dpu, int disp_id, u32 val) +{ + u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0; + u8 ctrl = disp_id ? + IMX_SC_C_PXL_LINK_MST2_ADDR : IMX_SC_C_PXL_LINK_MST1_ADDR; + + return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, val); +} + +int dpu_pxlink_set_mst_enable(struct dpu_soc *dpu, int disp_id, bool enable) +{ + u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0; + u8 ctrl = disp_id ? + IMX_SC_C_PXL_LINK_MST2_ENB: IMX_SC_C_PXL_LINK_MST1_ENB; + + return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, enable); +} + +int dpu_pxlink_set_mst_valid(struct dpu_soc *dpu, int disp_id, bool enable) +{ + u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0; + u8 ctrl = disp_id ? + IMX_SC_C_PXL_LINK_MST2_VLD : IMX_SC_C_PXL_LINK_MST1_VLD; + + return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, enable); +} + +int dpu_pxlink_set_sync_ctrl(struct dpu_soc *dpu, int disp_id, bool enable) +{ + u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0; + u8 ctrl = disp_id ? IMX_SC_C_SYNC_CTRL1 : IMX_SC_C_SYNC_CTRL0; + + return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, enable); +} + +int dpu_pxlink_set_dc_sync_mode(struct dpu_soc *dpu, bool enable) +{ + u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0; + + return dpu_sc_misc_set_ctrl(dpu, rsc, IMX_SC_C_MODE, enable); +} + +/* KACHUNK_CNT is needed for blit engine */ +int dpu_sc_misc_set_kachunk_cnt(struct dpu_soc *dpu, u32 cnt) +{ + u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0; + + return dpu_sc_misc_set_ctrl(dpu, rsc, IMX_SC_C_KACHUNK_CNT, cnt); +} + +int dpu_sc_misc_init(struct dpu_soc *dpu) +{ + int disp_id, ret = 0; + + for (disp_id = 0; disp_id < 2; disp_id++) { + ret |= dpu_pxlink_set_mst_addr(dpu, disp_id, 0); + ret |= dpu_pxlink_set_mst_enable(dpu, disp_id, false); + ret |= dpu_pxlink_set_mst_valid(dpu, disp_id, false); + ret |= dpu_pxlink_set_sync_ctrl(dpu, disp_id, false); + } + + ret |= dpu_sc_misc_set_kachunk_cnt(dpu, 32); + + return ret; +} diff --git a/drivers/gpu/imx/dpu/dpu-store.c b/drivers/gpu/imx/dpu/dpu-store.c new file mode 100644 index 000000000000..cbd06b83581b --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-store.c @@ -0,0 +1,157 @@ +/* + * Copyright 2018-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include "dpu-prv.h" + +#define PIXENGCFG_STATIC 0x8 +#define DIV(n) (((n) & 0xFF) << 16) +#define DIV_RESET 0x80 + +struct dpu_store { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; +}; + +static inline u32 dpu_pec_st_read(struct dpu_store *st, unsigned int offset) +{ + return readl(st->pec_base + offset); +} + +static inline void dpu_pec_st_write(struct dpu_store *st, + unsigned int offset, u32 value) +{ + writel(value, st->pec_base + offset); +} + +void store_pixengcfg_syncmode_fixup(struct dpu_store *st, bool enable) +{ + struct dpu_soc *dpu; + u32 val; + + if (!st) + return; + + dpu = st->dpu; + + mutex_lock(&st->mutex); + val = dpu_pec_st_read(st, PIXENGCFG_STATIC); + if (enable) + val |= BIT(16); + else + val &= ~BIT(16); + dpu_pec_st_write(st, PIXENGCFG_STATIC, val); + mutex_unlock(&st->mutex); +} +EXPORT_SYMBOL_GPL(store_pixengcfg_syncmode_fixup); + +struct dpu_store *dpu_st_get(struct dpu_soc *dpu, int id) +{ + struct dpu_store *st; + int i; + + for (i = 0; i < ARRAY_SIZE(st_ids); i++) + if (st_ids[i] == id) + break; + + if (i == ARRAY_SIZE(st_ids)) + return ERR_PTR(-EINVAL); + + st = dpu->st_priv[i]; + + mutex_lock(&st->mutex); + + if (st->inuse) { + mutex_unlock(&st->mutex); + return ERR_PTR(-EBUSY); + } + + st->inuse = true; + + mutex_unlock(&st->mutex); + + return st; +} +EXPORT_SYMBOL_GPL(dpu_st_get); + +void dpu_st_put(struct dpu_store *st) +{ + mutex_lock(&st->mutex); + + st->inuse = false; + + mutex_unlock(&st->mutex); +} +EXPORT_SYMBOL_GPL(dpu_st_put); + +void _dpu_st_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_store *st; + int i; + + for (i = 0; i < ARRAY_SIZE(st_ids); i++) + if (st_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(st_ids))) + return; + + st = dpu->st_priv[i]; + + dpu_pec_st_write(st, PIXENGCFG_STATIC, SHDEN | DIV(DIV_RESET)); +} + +int dpu_st_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_store *st; + int i; + + st = devm_kzalloc(dpu->dev, sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(st_ids); i++) + if (st_ids[i] == id) + break; + + if (i == ARRAY_SIZE(st_ids)) + return -EINVAL; + + dpu->st_priv[i] = st; + + st->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_32); + if (!st->pec_base) + return -ENOMEM; + + st->base = devm_ioremap(dpu->dev, base, SZ_256); + if (!st->base) + return -ENOMEM; + + st->dpu = dpu; + st->id = id; + mutex_init(&st->mutex); + + _dpu_st_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-tcon.c b/drivers/gpu/imx/dpu/dpu-tcon.c new file mode 100644 index 000000000000..fe046520b13a --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-tcon.c @@ -0,0 +1,329 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include <video/imx8-pc.h> +#include "dpu-prv.h" + +#define SSQCNTS 0 +#define SSQCYCLE 0x8 +#define SWRESET 0xC +#define TCON_CTRL 0x10 +#define BYPASS BIT(3) +#define RSDSINVCTRL 0x14 +#define MAPBIT3_0 0x18 +#define MAPBIT7_4 0x1C +#define MAPBIT11_8 0x20 +#define MAPBIT15_12 0x24 +#define MAPBIT19_16 0x28 +#define MAPBIT23_20 0x2C +#define MAPBIT27_24 0x30 +#define MAPBIT31_28 0x34 +#define MAPBIT34_32 0x38 +#define MAPBIT3_0_DUAL 0x3C +#define MAPBIT7_4_DUAL 0x40 +#define MAPBIT11_8_DUAL 0x44 +#define MAPBIT15_12_DUAL 0x48 +#define MAPBIT19_16_DUAL 0x4C +#define MAPBIT23_20_DUAL 0x50 +#define MAPBIT27_24_DUAL 0x54 +#define MAPBIT31_28_DUAL 0x58 +#define MAPBIT34_32_DUAL 0x5C +#define SPGPOSON(n) (0x60 + (n) * 16) +#define X(n) (((n) & 0x7FFF) << 16) +#define Y(n) ((n) & 0x7FFF) +#define SPGMASKON(n) (0x64 + (n) * 16) +#define SPGPOSOFF(n) (0x68 + (n) * 16) +#define SPGMASKOFF(n) (0x6C + (n) * 16) +#define SMXSIGS(n) (0x120 + (n) * 8) +#define SMXFCTTABLE(n) (0x124 + (n) * 8) +#define RESET_OVER_UNFERFLOW 0x180 +#define DUAL_DEBUG 0x184 + +struct dpu_tcon { + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; + struct pc *pc; +}; + +static inline u32 dpu_tcon_read(struct dpu_tcon *tcon, unsigned int offset) +{ + return readl(tcon->base + offset); +} + +static inline void dpu_tcon_write(struct dpu_tcon *tcon, + unsigned int offset, u32 value) +{ + writel(value, tcon->base + offset); +} + +int tcon_set_fmt(struct dpu_tcon *tcon, u32 bus_format) +{ + switch (bus_format) { + case MEDIA_BUS_FMT_RGB888_1X24: + dpu_tcon_write(tcon, MAPBIT3_0, 0x19181716); + dpu_tcon_write(tcon, MAPBIT7_4, 0x1d1c1b1a); + dpu_tcon_write(tcon, MAPBIT11_8, 0x0f0e0d0c); + dpu_tcon_write(tcon, MAPBIT15_12, 0x13121110); + dpu_tcon_write(tcon, MAPBIT19_16, 0x05040302); + dpu_tcon_write(tcon, MAPBIT23_20, 0x09080706); + break; + case MEDIA_BUS_FMT_RGB101010_1X30: + case MEDIA_BUS_FMT_RGB888_1X30_PADLO: + case MEDIA_BUS_FMT_RGB666_1X30_PADLO: + dpu_tcon_write(tcon, MAPBIT3_0, 0x17161514); + dpu_tcon_write(tcon, MAPBIT7_4, 0x1b1a1918); + dpu_tcon_write(tcon, MAPBIT11_8, 0x0b0a1d1c); + dpu_tcon_write(tcon, MAPBIT15_12, 0x0f0e0d0c); + dpu_tcon_write(tcon, MAPBIT19_16, 0x13121110); + dpu_tcon_write(tcon, MAPBIT23_20, 0x03020100); + dpu_tcon_write(tcon, MAPBIT27_24, 0x07060504); + dpu_tcon_write(tcon, MAPBIT31_28, 0x00000908); + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(tcon_set_fmt); + +/* This function is used to workaround TKT320590 which is related to DPR/PRG. */ +void tcon_set_operation_mode(struct dpu_tcon *tcon) +{ + u32 val; + + val = dpu_tcon_read(tcon, TCON_CTRL); + val &= ~BYPASS; + dpu_tcon_write(tcon, TCON_CTRL, val); +} +EXPORT_SYMBOL_GPL(tcon_set_operation_mode); + +void tcon_cfg_videomode(struct dpu_tcon *tcon, + struct drm_display_mode *m, bool side_by_side) +{ + u32 val; + int hdisplay, hsync_start, hsync_end; + int vdisplay, vsync_start, vsync_end; + int y; + + hdisplay = m->hdisplay; + vdisplay = m->vdisplay; + hsync_start = m->hsync_start; + vsync_start = m->vsync_start; + hsync_end = m->hsync_end; + vsync_end = m->vsync_end; + + if (side_by_side) { + hdisplay /= 2; + hsync_start /= 2; + hsync_end /= 2; + } + + /* + * TKT320590: + * Turn TCON into operation mode later after the first dumb frame is + * generated by DPU. This makes DPR/PRG be able to evade the frame. + */ + val = dpu_tcon_read(tcon, TCON_CTRL); + val |= BYPASS; + dpu_tcon_write(tcon, TCON_CTRL, val); + + /* dsp_control[0]: hsync */ + dpu_tcon_write(tcon, SPGPOSON(0), X(hsync_start)); + dpu_tcon_write(tcon, SPGMASKON(0), 0xffff); + + dpu_tcon_write(tcon, SPGPOSOFF(0), X(hsync_end)); + dpu_tcon_write(tcon, SPGMASKOFF(0), 0xffff); + + dpu_tcon_write(tcon, SMXSIGS(0), 0x2); + dpu_tcon_write(tcon, SMXFCTTABLE(0), 0x1); + + /* dsp_control[1]: vsync */ + dpu_tcon_write(tcon, SPGPOSON(1), X(hsync_start) | Y(vsync_start - 1)); + dpu_tcon_write(tcon, SPGMASKON(1), 0x0); + + dpu_tcon_write(tcon, SPGPOSOFF(1), X(hsync_start) | Y(vsync_end - 1)); + dpu_tcon_write(tcon, SPGMASKOFF(1), 0x0); + + dpu_tcon_write(tcon, SMXSIGS(1), 0x3); + dpu_tcon_write(tcon, SMXFCTTABLE(1), 0x1); + + /* dsp_control[2]: data enable */ + /* horizontal */ + dpu_tcon_write(tcon, SPGPOSON(2), 0x0); + dpu_tcon_write(tcon, SPGMASKON(2), 0xffff); + + dpu_tcon_write(tcon, SPGPOSOFF(2), X(hdisplay)); + dpu_tcon_write(tcon, SPGMASKOFF(2), 0xffff); + + /* vertical */ + dpu_tcon_write(tcon, SPGPOSON(3), 0x0); + dpu_tcon_write(tcon, SPGMASKON(3), 0x7fff0000); + + dpu_tcon_write(tcon, SPGPOSOFF(3), Y(vdisplay)); + dpu_tcon_write(tcon, SPGMASKOFF(3), 0x7fff0000); + + dpu_tcon_write(tcon, SMXSIGS(2), 0x2c); + dpu_tcon_write(tcon, SMXFCTTABLE(2), 0x8); + + /* dsp_control[3]: kachuck */ + y = vdisplay + 1; + /* + * If sync mode fixup is present, the kachuck signal from slave tcon + * should be one line later than the one from master tcon. + */ + if (side_by_side && tcon_is_slave(tcon)) + y++; + + dpu_tcon_write(tcon, SPGPOSON(4), X(0x0) | Y(y)); + dpu_tcon_write(tcon, SPGMASKON(4), 0x0); + + dpu_tcon_write(tcon, SPGPOSOFF(4), X(0x20) | Y(y)); + dpu_tcon_write(tcon, SPGMASKOFF(4), 0x0); + + dpu_tcon_write(tcon, SMXSIGS(3), 0x6); + dpu_tcon_write(tcon, SMXFCTTABLE(3), 0x2); +} +EXPORT_SYMBOL_GPL(tcon_cfg_videomode); + +bool tcon_is_master(struct dpu_tcon *tcon) +{ + const struct dpu_data *data = tcon->dpu->data; + + return tcon->id == data->master_stream_id; +} +EXPORT_SYMBOL_GPL(tcon_is_master); + +bool tcon_is_slave(struct dpu_tcon *tcon) +{ + return !tcon_is_master(tcon); +} +EXPORT_SYMBOL_GPL(tcon_is_slave); + +void tcon_configure_pc(struct dpu_tcon *tcon, unsigned int di, + unsigned int frame_width, u32 mode, u32 format) +{ + if (WARN_ON(!tcon || !tcon->pc)) + return; + + pc_configure(tcon->pc, di, frame_width, mode, format); +} +EXPORT_SYMBOL_GPL(tcon_configure_pc); + +void tcon_enable_pc(struct dpu_tcon *tcon) +{ + if (WARN_ON(!tcon || !tcon->pc)) + return; + + pc_enable(tcon->pc); +} +EXPORT_SYMBOL_GPL(tcon_enable_pc); + +void tcon_disable_pc(struct dpu_tcon *tcon) +{ + if (WARN_ON(!tcon || !tcon->pc)) + return; + + pc_disable(tcon->pc); +} +EXPORT_SYMBOL_GPL(tcon_disable_pc); + +struct dpu_tcon *dpu_tcon_get(struct dpu_soc *dpu, int id) +{ + struct dpu_tcon *tcon; + int i; + + for (i = 0; i < ARRAY_SIZE(tcon_ids); i++) + if (tcon_ids[i] == id) + break; + + if (i == ARRAY_SIZE(tcon_ids)) + return ERR_PTR(-EINVAL); + + tcon = dpu->tcon_priv[i]; + + mutex_lock(&tcon->mutex); + + if (tcon->inuse) { + mutex_unlock(&tcon->mutex); + return ERR_PTR(-EBUSY); + } + + tcon->inuse = true; + + mutex_unlock(&tcon->mutex); + + return tcon; +} +EXPORT_SYMBOL_GPL(dpu_tcon_get); + +void dpu_tcon_put(struct dpu_tcon *tcon) +{ + mutex_lock(&tcon->mutex); + + tcon->inuse = false; + + mutex_unlock(&tcon->mutex); +} +EXPORT_SYMBOL_GPL(dpu_tcon_put); + +struct dpu_tcon *dpu_aux_tcon_peek(struct dpu_tcon *tcon) +{ + return tcon->dpu->tcon_priv[tcon->id ^ 1]; +} +EXPORT_SYMBOL_GPL(dpu_aux_tcon_peek); + +void _dpu_tcon_init(struct dpu_soc *dpu, unsigned int id) +{ +} + +int dpu_tcon_init(struct dpu_soc *dpu, unsigned int id, + unsigned long unused, unsigned long base) +{ + struct dpu_tcon *tcon; + + tcon = devm_kzalloc(dpu->dev, sizeof(*tcon), GFP_KERNEL); + if (!tcon) + return -ENOMEM; + + dpu->tcon_priv[id] = tcon; + + tcon->base = devm_ioremap(dpu->dev, base, SZ_512); + if (!tcon->base) + return -ENOMEM; + + tcon->dpu = dpu; + mutex_init(&tcon->mutex); + + return 0; +} + +void tcon_get_pc(struct dpu_tcon *tcon, void *data) +{ + if (WARN_ON(!tcon)) + return; + + tcon->pc = data; +} diff --git a/drivers/gpu/imx/dpu/dpu-vscaler.c b/drivers/gpu/imx/dpu/dpu-vscaler.c new file mode 100644 index 000000000000..b1bdcd596392 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-vscaler.c @@ -0,0 +1,438 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_DYNAMIC 0x8 +#define PIXENGCFG_DYNAMIC_SRC_SEL_MASK 0x3F + +#define SETUP1 0xC +#define SCALE_FACTOR_MASK 0xFFFFF +#define SCALE_FACTOR(n) ((n) & 0xFFFFF) +#define SETUP2 0x10 +#define SETUP3 0x14 +#define SETUP4 0x18 +#define SETUP5 0x1C +#define PHASE_OFFSET_MASK 0x1FFFFF +#define PHASE_OFFSET(n) ((n) & 0x1FFFFF) +#define CONTROL 0x20 +#define OUTPUT_SIZE_MASK 0x3FFF0000 +#define OUTPUT_SIZE(n) ((((n) - 1) << 16) & OUTPUT_SIZE_MASK) +#define FIELD_MODE 0x3000 +#define FILTER_MODE 0x100 +#define SCALE_MODE 0x10 +#define MODE 0x1 + +static const vs_src_sel_t src_sels[3][6] = { + { + VS_SRC_SEL__DISABLE, + VS_SRC_SEL__FETCHDECODE0, + VS_SRC_SEL__MATRIX4, + VS_SRC_SEL__HSCALER4, + }, { + VS_SRC_SEL__DISABLE, + VS_SRC_SEL__FETCHDECODE1, + VS_SRC_SEL__MATRIX5, + VS_SRC_SEL__HSCALER5, + }, { + VS_SRC_SEL__DISABLE, + VS_SRC_SEL__MATRIX9, + VS_SRC_SEL__HSCALER9, + }, +}; + +struct dpu_vscaler { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; + /* see DPU_PLANE_SRC_xxx */ + unsigned int stream_id; +}; + +static inline u32 dpu_pec_vs_read(struct dpu_vscaler *vs, + unsigned int offset) +{ + return readl(vs->pec_base + offset); +} + +static inline void dpu_pec_vs_write(struct dpu_vscaler *vs, + unsigned int offset, u32 value) +{ + writel(value, vs->pec_base + offset); +} + +static inline u32 dpu_vs_read(struct dpu_vscaler *vs, unsigned int offset) +{ + return readl(vs->base + offset); +} + +static inline void dpu_vs_write(struct dpu_vscaler *vs, + unsigned int offset, u32 value) +{ + writel(value, vs->base + offset); +} + +int vscaler_pixengcfg_dynamic_src_sel(struct dpu_vscaler *vs, vs_src_sel_t src) +{ + struct dpu_soc *dpu = vs->dpu; + const unsigned int vs_id_array[] = {4, 5, 9}; + int i, j; + u32 val; + + for (i = 0; i < ARRAY_SIZE(vs_id_array); i++) + if (vs_id_array[i] == vs->id) + break; + + if (WARN_ON(i == ARRAY_SIZE(vs_id_array))) + return -EINVAL; + + mutex_lock(&vs->mutex); + for (j = 0; j < ARRAY_SIZE(src_sels[0]); j++) { + if (src_sels[i][j] == src) { + val = dpu_pec_vs_read(vs, PIXENGCFG_DYNAMIC); + val &= ~PIXENGCFG_DYNAMIC_SRC_SEL_MASK; + val |= src; + dpu_pec_vs_write(vs, PIXENGCFG_DYNAMIC, val); + mutex_unlock(&vs->mutex); + return 0; + } + } + mutex_unlock(&vs->mutex); + + dev_err(dpu->dev, "Invalid source for VScaler%d\n", vs->id); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(vscaler_pixengcfg_dynamic_src_sel); + +void vscaler_pixengcfg_clken(struct dpu_vscaler *vs, pixengcfg_clken_t clken) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_pec_vs_read(vs, PIXENGCFG_DYNAMIC); + val &= ~CLKEN_MASK; + val |= clken << CLKEN_MASK_SHIFT; + dpu_pec_vs_write(vs, PIXENGCFG_DYNAMIC, val); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_pixengcfg_clken); + +void vscaler_shden(struct dpu_vscaler *vs, bool enable) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, STATICCONTROL); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_vs_write(vs, STATICCONTROL, val); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_shden); + +void vscaler_setup1(struct dpu_vscaler *vs, u32 src, u32 dst, bool deinterlace) +{ + struct dpu_soc *dpu = vs->dpu; + u32 scale_factor; + u64 tmp64; + + if (deinterlace) + dst *= 2; + + if (src == dst) { + scale_factor = 0x80000; + } else { + if (src > dst) { + tmp64 = (u64)((u64)dst * 0x80000); + do_div(tmp64, src); + + } else { + tmp64 = (u64)((u64)src * 0x80000); + do_div(tmp64, dst); + } + scale_factor = (u32)tmp64; + } + + WARN_ON(scale_factor > 0x80000); + + mutex_lock(&vs->mutex); + dpu_vs_write(vs, SETUP1, SCALE_FACTOR(scale_factor)); + mutex_unlock(&vs->mutex); + + dev_dbg(dpu->dev, "Vscaler%d scale factor 0x%08x\n", + vs->id, scale_factor); +} +EXPORT_SYMBOL_GPL(vscaler_setup1); + +void vscaler_setup2(struct dpu_vscaler *vs, bool deinterlace) +{ + /* 0x20000: +0.25 phase offset for deinterlace */ + u32 phase_offset = deinterlace ? 0x20000 : 0; + + mutex_lock(&vs->mutex); + dpu_vs_write(vs, SETUP2, PHASE_OFFSET(phase_offset)); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_setup2); + +void vscaler_setup3(struct dpu_vscaler *vs, bool deinterlace) +{ + /* 0x1e0000: -0.25 phase offset for deinterlace */ + u32 phase_offset = deinterlace ? 0x1e0000 : 0; + + mutex_lock(&vs->mutex); + dpu_vs_write(vs, SETUP3, PHASE_OFFSET(phase_offset)); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_setup3); + +void vscaler_setup4(struct dpu_vscaler *vs, u32 phase_offset) +{ + mutex_lock(&vs->mutex); + dpu_vs_write(vs, SETUP4, PHASE_OFFSET(phase_offset)); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_setup4); + +void vscaler_setup5(struct dpu_vscaler *vs, u32 phase_offset) +{ + mutex_lock(&vs->mutex); + dpu_vs_write(vs, SETUP5, PHASE_OFFSET(phase_offset)); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_setup5); + +void vscaler_output_size(struct dpu_vscaler *vs, u32 line_num) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + val &= ~OUTPUT_SIZE_MASK; + val |= OUTPUT_SIZE(line_num); + dpu_vs_write(vs, CONTROL, val); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_output_size); + +void vscaler_field_mode(struct dpu_vscaler *vs, scaler_field_mode_t m) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + val &= ~FIELD_MODE; + val |= m; + dpu_vs_write(vs, CONTROL, val); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_field_mode); + +void vscaler_filter_mode(struct dpu_vscaler *vs, scaler_filter_mode_t m) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + val &= ~FILTER_MODE; + val |= m; + dpu_vs_write(vs, CONTROL, val); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_filter_mode); + +void vscaler_scale_mode(struct dpu_vscaler *vs, scaler_scale_mode_t m) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + val &= ~SCALE_MODE; + val |= m; + dpu_vs_write(vs, CONTROL, val); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_scale_mode); + +void vscaler_mode(struct dpu_vscaler *vs, scaler_mode_t m) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + val &= ~MODE; + val |= m; + dpu_vs_write(vs, CONTROL, val); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_mode); + +bool vscaler_is_enabled(struct dpu_vscaler *vs) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + mutex_unlock(&vs->mutex); + + return (val & MODE) == SCALER_ACTIVE; +} +EXPORT_SYMBOL_GPL(vscaler_is_enabled); + +dpu_block_id_t vscaler_get_block_id(struct dpu_vscaler *vs) +{ + switch (vs->id) { + case 4: + return ID_VSCALER4; + case 5: + return ID_VSCALER5; + case 9: + return ID_VSCALER9; + default: + WARN_ON(1); + } + + return ID_NONE; +} +EXPORT_SYMBOL_GPL(vscaler_get_block_id); + +unsigned int vscaler_get_stream_id(struct dpu_vscaler *vs) +{ + return vs->stream_id; +} +EXPORT_SYMBOL_GPL(vscaler_get_stream_id); + +void vscaler_set_stream_id(struct dpu_vscaler *vs, unsigned int id) +{ + switch (id) { + case DPU_PLANE_SRC_TO_DISP_STREAM0: + case DPU_PLANE_SRC_TO_DISP_STREAM1: + case DPU_PLANE_SRC_DISABLED: + vs->stream_id = id; + break; + default: + WARN_ON(1); + } +} +EXPORT_SYMBOL_GPL(vscaler_set_stream_id); + +struct dpu_vscaler *dpu_vs_get(struct dpu_soc *dpu, int id) +{ + struct dpu_vscaler *vs; + int i; + + for (i = 0; i < ARRAY_SIZE(vs_ids); i++) + if (vs_ids[i] == id) + break; + + if (i == ARRAY_SIZE(vs_ids)) + return ERR_PTR(-EINVAL); + + vs = dpu->vs_priv[i]; + + mutex_lock(&vs->mutex); + + if (vs->inuse) { + mutex_unlock(&vs->mutex); + return ERR_PTR(-EBUSY); + } + + vs->inuse = true; + + mutex_unlock(&vs->mutex); + + return vs; +} +EXPORT_SYMBOL_GPL(dpu_vs_get); + +void dpu_vs_put(struct dpu_vscaler *vs) +{ + mutex_lock(&vs->mutex); + + vs->inuse = false; + + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(dpu_vs_put); + +void _dpu_vs_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_vscaler *vs; + int i; + + for (i = 0; i < ARRAY_SIZE(vs_ids); i++) + if (vs_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(vs_ids))) + return; + + vs = dpu->vs_priv[i]; + + vscaler_shden(vs, true); + vscaler_setup2(vs, false); + vscaler_setup3(vs, false); + vscaler_setup4(vs, 0); + vscaler_setup5(vs, 0); + vscaler_pixengcfg_dynamic_src_sel(vs, VS_SRC_SEL__DISABLE); +} + +int dpu_vs_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_vscaler *vs; + int i; + + vs = devm_kzalloc(dpu->dev, sizeof(*vs), GFP_KERNEL); + if (!vs) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(vs_ids); i++) + if (vs_ids[i] == id) + break; + + if (i == ARRAY_SIZE(vs_ids)) + return -EINVAL; + + dpu->vs_priv[i] = vs; + + vs->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_8); + if (!vs->pec_base) + return -ENOMEM; + + vs->base = devm_ioremap(dpu->dev, base, SZ_1K); + if (!vs->base) + return -ENOMEM; + + vs->dpu = dpu; + vs->id = id; + + mutex_init(&vs->mutex); + + _dpu_vs_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/imx/ipu-v3/Kconfig index 061fb990c120..061fb990c120 100644 --- a/drivers/gpu/ipu-v3/Kconfig +++ b/drivers/gpu/imx/ipu-v3/Kconfig diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/imx/ipu-v3/Makefile index 5fe5ef20701a..5fe5ef20701a 100644 --- a/drivers/gpu/ipu-v3/Makefile +++ b/drivers/gpu/imx/ipu-v3/Makefile diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/imx/ipu-v3/ipu-common.c index ee2a025e54cf..ee2a025e54cf 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/imx/ipu-v3/ipu-common.c diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/imx/ipu-v3/ipu-cpmem.c index a1c85d1521f5..a1c85d1521f5 100644 --- a/drivers/gpu/ipu-v3/ipu-cpmem.c +++ b/drivers/gpu/imx/ipu-v3/ipu-cpmem.c diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/imx/ipu-v3/ipu-csi.c index 8ae301eef643..8ae301eef643 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/imx/ipu-v3/ipu-csi.c diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/imx/ipu-v3/ipu-dc.c index dbcc16721931..dbcc16721931 100644 --- a/drivers/gpu/ipu-v3/ipu-dc.c +++ b/drivers/gpu/imx/ipu-v3/ipu-dc.c diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/imx/ipu-v3/ipu-di.c index b4a31d506fcc..b4a31d506fcc 100644 --- a/drivers/gpu/ipu-v3/ipu-di.c +++ b/drivers/gpu/imx/ipu-v3/ipu-di.c diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/imx/ipu-v3/ipu-dmfc.c index ae682084a10a..ae682084a10a 100644 --- a/drivers/gpu/ipu-v3/ipu-dmfc.c +++ b/drivers/gpu/imx/ipu-v3/ipu-dmfc.c diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/imx/ipu-v3/ipu-dp.c index 8f67e985f26a..8f67e985f26a 100644 --- a/drivers/gpu/ipu-v3/ipu-dp.c +++ b/drivers/gpu/imx/ipu-v3/ipu-dp.c diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/imx/ipu-v3/ipu-ic.c index 846461bac70d..846461bac70d 100644 --- a/drivers/gpu/ipu-v3/ipu-ic.c +++ b/drivers/gpu/imx/ipu-v3/ipu-ic.c diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/imx/ipu-v3/ipu-image-convert.c index eeca50d9a1ee..eeca50d9a1ee 100644 --- a/drivers/gpu/ipu-v3/ipu-image-convert.c +++ b/drivers/gpu/imx/ipu-v3/ipu-image-convert.c diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/imx/ipu-v3/ipu-pre.c index ad82c9e0252f..ad82c9e0252f 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/imx/ipu-v3/ipu-pre.c diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/imx/ipu-v3/ipu-prg.c index 196797c1b4b3..196797c1b4b3 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/imx/ipu-v3/ipu-prg.c diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/imx/ipu-v3/ipu-prv.h index 291ac1bab66d..291ac1bab66d 100644 --- a/drivers/gpu/ipu-v3/ipu-prv.h +++ b/drivers/gpu/imx/ipu-v3/ipu-prv.h diff --git a/drivers/gpu/ipu-v3/ipu-smfc.c b/drivers/gpu/imx/ipu-v3/ipu-smfc.c index 46ffc0a5906d..46ffc0a5906d 100644 --- a/drivers/gpu/ipu-v3/ipu-smfc.c +++ b/drivers/gpu/imx/ipu-v3/ipu-smfc.c diff --git a/drivers/gpu/ipu-v3/ipu-vdi.c b/drivers/gpu/imx/ipu-v3/ipu-vdi.c index a593b232b6d3..a593b232b6d3 100644 --- a/drivers/gpu/ipu-v3/ipu-vdi.c +++ b/drivers/gpu/imx/ipu-v3/ipu-vdi.c diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 427a993c7f57..df9bd4b452f9 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -15,7 +15,7 @@ source "drivers/char/agp/Kconfig" source "drivers/gpu/vga/Kconfig" source "drivers/gpu/host1x/Kconfig" -source "drivers/gpu/ipu-v3/Kconfig" +source "drivers/gpu/imx/Kconfig" source "drivers/gpu/drm/Kconfig" diff --git a/include/video/dpu.h b/include/video/dpu.h new file mode 100644 index 000000000000..b541d07513ef --- /dev/null +++ b/include/video/dpu.h @@ -0,0 +1,718 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef __DRM_DPU_H__ +#define __DRM_DPU_H__ + +#include <drm/drm_crtc.h> +#include <drm/drm_modes.h> +#include <video/videomode.h> + +struct dpu_soc; + +enum dpu_irq { + IRQ_STORE9_SHDLOAD = 0, + IRQ_STORE9_FRAMECOMPLETE = 1, + IRQ_STORE9_SEQCOMPLETE = 2, + IRQ_EXTDST0_SHDLOAD = 3, + IRQ_EXTDST0_FRAMECOMPLETE = 4, + IRQ_EXTDST0_SEQCOMPLETE = 5, + IRQ_EXTDST4_SHDLOAD = 6, + IRQ_EXTDST4_FRAMECOMPLETE = 7, + IRQ_EXTDST4_SEQCOMPLETE = 8, + IRQ_EXTDST1_SHDLOAD = 9, + IRQ_EXTDST1_FRAMECOMPLETE = 10, + IRQ_EXTDST1_SEQCOMPLETE = 11, + IRQ_EXTDST5_SHDLOAD = 12, + IRQ_EXTDST5_FRAMECOMPLETE = 13, + IRQ_EXTDST5_SEQCOMPLETE = 14, + IRQ_DISENGCFG_SHDLOAD0 = 15, + IRQ_DISENGCFG_FRAMECOMPLETE0 = 16, + IRQ_DISENGCFG_SEQCOMPLETE0 = 17, + IRQ_FRAMEGEN0_INT0 = 18, + IRQ_FRAMEGEN0_INT1 = 19, + IRQ_FRAMEGEN0_INT2 = 20, + IRQ_FRAMEGEN0_INT3 = 21, + IRQ_SIG0_SHDLOAD = 22, + IRQ_SIG0_VALID = 23, + IRQ_SIG0_ERROR = 24, + IRQ_DISENGCFG_SHDLOAD1 = 25, + IRQ_DISENGCFG_FRAMECOMPLETE1 = 26, + IRQ_DISENGCFG_SEQCOMPLETE1 = 27, + IRQ_FRAMEGEN1_INT0 = 28, + IRQ_FRAMEGEN1_INT1 = 29, + IRQ_FRAMEGEN1_INT2 = 30, + IRQ_FRAMEGEN1_INT3 = 31, + IRQ_SIG1_SHDLOAD = 32, + IRQ_SIG1_VALID = 33, + IRQ_SIG1_ERROR = 34, + IRQ_RESERVED = 35, + IRQ_CMDSEQ_ERROR = 36, + IRQ_COMCTRL_SW0 = 37, + IRQ_COMCTRL_SW1 = 38, + IRQ_COMCTRL_SW2 = 39, + IRQ_COMCTRL_SW3 = 40, + IRQ_FRAMEGEN0_PRIMSYNC_ON = 41, + IRQ_FRAMEGEN0_PRIMSYNC_OFF = 42, + IRQ_FRAMEGEN0_SECSYNC_ON = 43, + IRQ_FRAMEGEN0_SECSYNC_OFF = 44, + IRQ_FRAMEGEN1_PRIMSYNC_ON = 45, + IRQ_FRAMEGEN1_PRIMSYNC_OFF = 46, + IRQ_FRAMEGEN1_SECSYNC_ON = 47, + IRQ_FRAMEGEN1_SECSYNC_OFF = 48, +}; + +typedef enum { + ID_NONE = 0x00, /* 0 */ + ID_FETCHDECODE9 = 0x01, /* 1 */ + ID_FETCHPERSP9 = 0x02, /* 2 */ + ID_FETCHECO9 = 0x03, /* 3 */ + ID_ROP9 = 0x04, /* 4 */ + ID_CLUT9 = 0x05, /* 5 */ + ID_MATRIX9 = 0x06, /* 6 */ + ID_HSCALER9 = 0x07, /* 7 */ + ID_VSCALER9 = 0x08, /* 8 */ + ID_FILTER9 = 0x09, /* 9 */ + ID_BLITBLEND9 = 0x0A, /* 10 */ + ID_CONSTFRAME0 = 0x0C, /* 12 */ + ID_CONSTFRAME4 = 0x0E, /* 14 */ + ID_CONSTFRAME1 = 0x10, /* 16 */ + ID_CONSTFRAME5 = 0x12, /* 18 */ + ID_FETCHWARP2 = 0x14, /* 20 */ + ID_FETCHECO2 = 0x15, /* 21 */ + ID_FETCHDECODE0 = 0x16, /* 22 */ + ID_FETCHECO0 = 0x17, /* 23 */ + ID_FETCHDECODE1 = 0x18, /* 24 */ + ID_FETCHECO1 = 0x19, /* 25 */ + ID_FETCHLAYER0 = 0x1a, /* 26 */ + ID_MATRIX4 = 0x1B, /* 27 */ + ID_HSCALER4 = 0x1C, /* 28 */ + ID_VSCALER4 = 0x1D, /* 29 */ + ID_MATRIX5 = 0x1E, /* 30 */ + ID_HSCALER5 = 0x1F, /* 31 */ + ID_VSCALER5 = 0x20, /* 32 */ + ID_LAYERBLEND0 = 0x21, /* 33 */ + ID_LAYERBLEND1 = 0x22, /* 34 */ + ID_LAYERBLEND2 = 0x23, /* 35 */ + ID_LAYERBLEND3 = 0x24, /* 36 */ +} dpu_block_id_t; + +typedef enum { + ED_SRC_DISABLE = ID_NONE, + ED_SRC_BLITBLEND9 = ID_BLITBLEND9, + ED_SRC_CONSTFRAME0 = ID_CONSTFRAME0, + ED_SRC_CONSTFRAME1 = ID_CONSTFRAME1, + ED_SRC_CONSTFRAME4 = ID_CONSTFRAME4, + ED_SRC_CONSTFRAME5 = ID_CONSTFRAME5, + ED_SRC_MATRIX4 = ID_MATRIX4, + ED_SRC_HSCALER4 = ID_HSCALER4, + ED_SRC_VSCALER4 = ID_VSCALER4, + /* content stream(extdst 0/1) only */ + ED_SRC_MATRIX5 = ID_MATRIX5, + ED_SRC_HSCALER5 = ID_HSCALER5, + ED_SRC_VSCALER5 = ID_VSCALER5, + /* content stream(extdst 0/1) only */ + ED_SRC_LAYERBLEND3 = ID_LAYERBLEND3, + ED_SRC_LAYERBLEND2 = ID_LAYERBLEND2, + ED_SRC_LAYERBLEND1 = ID_LAYERBLEND1, + ED_SRC_LAYERBLEND0 = ID_LAYERBLEND0, +} extdst_src_sel_t; + +typedef enum { + SINGLE, /* Reconfig pipeline after explicit trigger */ + AUTO, /* Reconfig pipeline after every kick when idle */ +} ed_sync_mode_t; + +typedef enum { + PSTATUS_EMPTY, + PSTATUS_RUNNING, + PSTATUS_RUNNING_RETRIGGERED, + PSTATUS_RESERVED +} ed_pipeline_status_t; + +typedef enum { + SOFTWARE = 0, /* kick generation by KICK field only */ + EXTERNAL = BIT(8), /* kick signal from external allowed */ +} ed_kick_mode_t; + +typedef enum { + FD_SRC_DISABLE = ID_NONE, + FD_SRC_FETCHECO0 = ID_FETCHECO0, + FD_SRC_FETCHECO1 = ID_FETCHECO1, + FD_SRC_FETCHECO2 = ID_FETCHECO2, + FD_SRC_FETCHDECODE0 = ID_FETCHDECODE0, + FD_SRC_FETCHDECODE1 = ID_FETCHDECODE1, + FD_SRC_FETCHWARP2 = ID_FETCHWARP2, +} fd_dynamic_src_sel_t; + +typedef enum { + /* RL and RLAD decoder */ + FETCHTYPE__DECODE, + /* fractional plane(8 layers) */ + FETCHTYPE__LAYER, + /* arbitrary warping and fractional plane(8 layers) */ + FETCHTYPE__WARP, + /* minimum feature set for alpha, chroma and coordinate planes */ + FETCHTYPE__ECO, + /* affine, perspective and arbitrary warping */ + FETCHTYPE__PERSP, + /* affine and arbitrary warping */ + FETCHTYPE__ROT, + /* RL and RLAD decoder, reduced feature set */ + FETCHTYPE__DECODEL, + /* fractional plane(8 layers), reduced feature set */ + FETCHTYPE__LAYERL, + /* affine and arbitrary warping, reduced feature set */ + FETCHTYPE__ROTL, +} fetchtype_t; + +typedef enum { + /* No side-by-side synchronization. */ + FGSYNCMODE__OFF = 0, + /* Framegen is master. */ + FGSYNCMODE__MASTER = 1 << 1, + /* Runs in cyclic synchronization mode. */ + FGSYNCMODE__SLAVE_CYC = 2 << 1, + /* Runs in one time synchronization mode. */ + FGSYNCMODE__SLAVE_ONCE = 3 << 1, +} fgsyncmode_t; + +typedef enum { + FGDM__BLACK, + /* Constant Color Background is shown. */ + FGDM__CONSTCOL, + FGDM__PRIM, + FGDM__SEC, + FGDM__PRIM_ON_TOP, + FGDM__SEC_ON_TOP, + /* White color background with test pattern is shown. */ + FGDM__TEST, +} fgdm_t; + +typedef enum { + HS_SRC_SEL__DISABLE = ID_NONE, + HS_SRC_SEL__MATRIX9 = ID_MATRIX9, + HS_SRC_SEL__VSCALER9 = ID_VSCALER9, + HS_SRC_SEL__FILTER9 = ID_FILTER9, + HS_SRC_SEL__FETCHDECODE0 = ID_FETCHDECODE0, + HS_SRC_SEL__FETCHDECODE1 = ID_FETCHDECODE1, + HS_SRC_SEL__MATRIX4 = ID_MATRIX4, + HS_SRC_SEL__VSCALER4 = ID_VSCALER4, + HS_SRC_SEL__MATRIX5 = ID_MATRIX5, + HS_SRC_SEL__VSCALER5 = ID_VSCALER5, +} hs_src_sel_t; + +typedef enum { + /* common options */ + LB_PRIM_SEL__DISABLE = ID_NONE, + LB_PRIM_SEL__BLITBLEND9 = ID_BLITBLEND9, + LB_PRIM_SEL__CONSTFRAME0 = ID_CONSTFRAME0, + LB_PRIM_SEL__CONSTFRAME1 = ID_CONSTFRAME1, + LB_PRIM_SEL__CONSTFRAME4 = ID_CONSTFRAME4, + LB_PRIM_SEL__CONSTFRAME5 = ID_CONSTFRAME5, + LB_PRIM_SEL__MATRIX4 = ID_MATRIX4, + LB_PRIM_SEL__HSCALER4 = ID_HSCALER4, + LB_PRIM_SEL__VSCALER4 = ID_VSCALER4, + LB_PRIM_SEL__MATRIX5 = ID_MATRIX5, + LB_PRIM_SEL__HSCALER5 = ID_HSCALER5, + LB_PRIM_SEL__VSCALER5 = ID_VSCALER5, + /* + * special options: + * layerblend(n) has n special options, + * from layerblend0 to layerblend(n - 1), e.g., + * layerblend3 has 3 special options - + * layerblend0/1/2. + */ + LB_PRIM_SEL__LAYERBLEND3 = ID_LAYERBLEND3, + LB_PRIM_SEL__LAYERBLEND2 = ID_LAYERBLEND2, + LB_PRIM_SEL__LAYERBLEND1 = ID_LAYERBLEND1, + LB_PRIM_SEL__LAYERBLEND0 = ID_LAYERBLEND0, +} lb_prim_sel_t; + +typedef enum { + LB_SEC_SEL__DISABLE = ID_NONE, + LB_SEC_SEL__FETCHWARP2 = ID_FETCHWARP2, + LB_SEC_SEL__FETCHDECODE0 = ID_FETCHDECODE0, + LB_SEC_SEL__FETCHDECODE1 = ID_FETCHDECODE1, + LB_SEC_SEL__MATRIX4 = ID_MATRIX4, + LB_SEC_SEL__HSCALER4 = ID_HSCALER4, + LB_SEC_SEL__VSCALER4 = ID_VSCALER4, + LB_SEC_SEL__MATRIX5 = ID_MATRIX5, + LB_SEC_SEL__HSCALER5 = ID_HSCALER5, + LB_SEC_SEL__VSCALER5 = ID_VSCALER5, + LB_SEC_SEL__FETCHLAYER0 = ID_FETCHLAYER0, +} lb_sec_sel_t; + +typedef enum { + PRIMARY, /* background plane */ + SECONDARY, /* foreground plane */ + BOTH, +} lb_shadow_sel_t; + +typedef enum { + LB_NEUTRAL, /* Output is same as primary input. */ + LB_BLEND, +} lb_mode_t; + +typedef enum { + /* Constant 0 indicates frame or top field. */ + SCALER_ALWAYS0 = 0x0, + /* Constant 1 indicates bottom field. */ + SCALER_ALWAYS1 = 0x1 << 12, + /* Output field polarity is taken from input field polarity. */ + SCALER_INPUT = 0x2 << 12, + /* Output field polarity toggles, starting with 0 after reset. */ + SCALER_TOGGLE = 0x3 << 12, +} scaler_field_mode_t; + +typedef enum { + /* pointer-sampling */ + SCALER_NEAREST = 0x0, + /* box filter */ + SCALER_LINEAR = 0x100, +} scaler_filter_mode_t; + +typedef enum { + SCALER_DOWNSCALE = 0x0, + SCALER_UPSCALE = 0x10, +} scaler_scale_mode_t; + +typedef enum { + /* Pixel by-pass the scaler, all other settings are ignored. */ + SCALER_NEUTRAL = 0x0, + /* Scaler is active. */ + SCALER_ACTIVE = 0x1, +} scaler_mode_t; + +typedef enum { + VS_SRC_SEL__DISABLE = ID_NONE, + VS_SRC_SEL__MATRIX9 = ID_MATRIX9, + VS_SRC_SEL__HSCALER9 = ID_HSCALER9, + VS_SRC_SEL__FETCHDECODE0 = ID_FETCHDECODE0, + VS_SRC_SEL__FETCHDECODE1 = ID_FETCHDECODE1, + VS_SRC_SEL__MATRIX4 = ID_MATRIX4, + VS_SRC_SEL__HSCALER4 = ID_HSCALER4, + VS_SRC_SEL__MATRIX5 = ID_MATRIX5, + VS_SRC_SEL__HSCALER5 = ID_HSCALER5, +} vs_src_sel_t; + +#define CLKEN_MASK (0x3 << 24) +#define CLKEN_MASK_SHIFT 24 +typedef enum { + CLKEN__DISABLE = 0x0, + CLKEN__AUTOMATIC = 0x1, + CLKEN__FULL = 0x3, +} pixengcfg_clken_t; + +/* fetch unit types */ +enum { + FU_T_NA, + FU_T_FD, + FU_T_FE, + FU_T_FL, + FU_T_FW, +}; + +struct dpu_fetchunit; + +struct dpu_fetchunit_ops { + void (*set_burstlength)(struct dpu_fetchunit *fu, + unsigned int x_offset, unsigned int mt_w, + int bpp, dma_addr_t baddr, bool use_prefetch); + + void (*set_baseaddress)(struct dpu_fetchunit *fu, unsigned int width, + unsigned int x_offset, unsigned int y_offset, + unsigned int mt_w, unsigned int mt_h, + int bpp, dma_addr_t baddr); + + void (*set_src_bpp)(struct dpu_fetchunit *fu, int bpp); + + void (*set_src_stride)(struct dpu_fetchunit *fu, + unsigned int width, unsigned int x_offset, + unsigned int mt_w, int bpp, unsigned int stride, + dma_addr_t baddr, bool use_prefetch); + + void (*set_src_buf_dimensions)(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, u32 fmt, + bool deinterlace); + + void (*set_fmt)(struct dpu_fetchunit *fu, u32 fmt, + enum drm_color_encoding color_encoding, + enum drm_color_range color_range, + bool deinterlace); + + void (*set_pixel_blend_mode)(struct dpu_fetchunit *fu, + unsigned int pixel_blend_mode, u16 alpha, + u32 fb_format); + + void (*enable_src_buf)(struct dpu_fetchunit *fu); + void (*disable_src_buf)(struct dpu_fetchunit *fu); + bool (*is_enabled)(struct dpu_fetchunit *fu); + + void (*set_framedimensions)(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + bool deinterlace); + + void (*set_controltrigger)(struct dpu_fetchunit *fu); + + unsigned int (*get_stream_id)(struct dpu_fetchunit *fu); + void (*set_stream_id)(struct dpu_fetchunit *fu, unsigned int id); + + void (*pin_off)(struct dpu_fetchunit *fu); + void (*unpin_off)(struct dpu_fetchunit *fu); + bool (*is_pinned_off)(struct dpu_fetchunit *fu); +}; + +struct dpu_fetchunit { + void __iomem *pec_base; + void __iomem *base; + char *name; + struct mutex mutex; + int id; + int sub_id; /* for fractional fetch units */ + int type; + bool inuse; + struct dpu_soc *dpu; + /* see DPU_PLANE_SRC_xxx */ + unsigned int stream_id; + bool pin_off; + struct dprc *dprc; + const struct dpu_fetchunit_ops *ops; +}; + +int dpu_map_irq(struct dpu_soc *dpu, int irq); + +/* Constant Frame Unit */ +struct dpu_constframe; +void constframe_shden(struct dpu_constframe *cf, bool enable); +void constframe_framedimensions(struct dpu_constframe *cf, unsigned int w, + unsigned int h); +void constframe_framedimensions_copy_prim(struct dpu_constframe *cf); +void constframe_constantcolor(struct dpu_constframe *cf, unsigned int r, + unsigned int g, unsigned int b, unsigned int a); +void constframe_controltrigger(struct dpu_constframe *cf, bool trigger); +struct dpu_constframe *dpu_cf_get(struct dpu_soc *dpu, int id); +void dpu_cf_put(struct dpu_constframe *cf); +struct dpu_constframe *dpu_aux_cf_peek(struct dpu_constframe *cf); + +/* Display Engine Configuration Unit */ +struct dpu_disengcfg; +struct dpu_disengcfg *dpu_dec_get(struct dpu_soc *dpu, int id); +void dpu_dec_put(struct dpu_disengcfg *dec); +struct dpu_disengcfg *dpu_aux_dec_peek(struct dpu_disengcfg *dec); + +/* External Destination Unit */ +struct dpu_extdst; +void extdst_pixengcfg_shden(struct dpu_extdst *ed, bool enable); +void extdst_pixengcfg_powerdown(struct dpu_extdst *ed, bool powerdown); +void extdst_pixengcfg_sync_mode(struct dpu_extdst *ed, ed_sync_mode_t mode); +void extdst_pixengcfg_reset(struct dpu_extdst *ed, bool reset); +void extdst_pixengcfg_div(struct dpu_extdst *ed, u16 div); +void extdst_pixengcfg_syncmode_master(struct dpu_extdst *ed, bool enable); +int extdst_pixengcfg_src_sel(struct dpu_extdst *ed, extdst_src_sel_t src); +void extdst_pixengcfg_sel_shdldreq(struct dpu_extdst *ed); +void extdst_pixengcfg_shdldreq(struct dpu_extdst *ed, u32 req_mask); +void extdst_pixengcfg_sync_trigger(struct dpu_extdst *ed); +void extdst_pixengcfg_trigger_sequence_complete(struct dpu_extdst *ed); +bool extdst_pixengcfg_is_sync_busy(struct dpu_extdst *ed); +ed_pipeline_status_t extdst_pixengcfg_pipeline_status(struct dpu_extdst *ed); +void extdst_shden(struct dpu_extdst *ed, bool enable); +void extdst_kick_mode(struct dpu_extdst *ed, ed_kick_mode_t mode); +void extdst_perfcountmode(struct dpu_extdst *ed, bool enable); +void extdst_gamma_apply_enable(struct dpu_extdst *ed, bool enable); +void extdst_kick(struct dpu_extdst *ed); +void extdst_cnt_err_clear(struct dpu_extdst *ed); +bool extdst_cnt_err_status(struct dpu_extdst *ed); +u32 extdst_last_control_word(struct dpu_extdst *ed); +void extdst_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y); +void extdst_last_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y); +u32 extdst_perfresult(struct dpu_extdst *ed); +bool extdst_is_master(struct dpu_extdst *ed); +struct dpu_extdst *dpu_ed_get(struct dpu_soc *dpu, int id); +void dpu_ed_put(struct dpu_extdst *ed); +struct dpu_extdst *dpu_aux_ed_peek(struct dpu_extdst *ed); + +/* Fetch Decode Unit */ +int fetchdecode_pixengcfg_dynamic_src_sel(struct dpu_fetchunit *fu, + fd_dynamic_src_sel_t src); +void fetchdecode_layeroffset(struct dpu_fetchunit *fd, unsigned int x, + unsigned int y); +void fetchdecode_clipoffset(struct dpu_fetchunit *fd, unsigned int x, + unsigned int y); +void fetchdecode_clipdimensions(struct dpu_fetchunit *fd, unsigned int w, + unsigned int h); +void fetchdecode_rgb_constantcolor(struct dpu_fetchunit *fd, + u8 r, u8 g, u8 b, u8 a); +void fetchdecode_yuv_constantcolor(struct dpu_fetchunit *fd, + u8 y, u8 u, u8 v); +int fetchdecode_fetchtype(struct dpu_fetchunit *fd, fetchtype_t *type); +u32 fetchdecode_get_vproc_mask(struct dpu_fetchunit *fd); +bool fetchdecode_need_fetcheco(struct dpu_fetchunit *fd, u32 fmt); +struct dpu_fetchunit *dpu_fd_get(struct dpu_soc *dpu, int id); +void dpu_fd_put(struct dpu_fetchunit *fu); + +/* Fetch ECO Unit */ +void fetcheco_layeroffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y); +void fetcheco_clipoffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y); +void fetcheco_clipdimensions(struct dpu_fetchunit *fu, unsigned int w, + unsigned int h); +void fetcheco_frameresampling(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y); +int fetcheco_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type); +dpu_block_id_t fetcheco_get_block_id(struct dpu_fetchunit *fu); +struct dpu_fetchunit *dpu_fe_get(struct dpu_soc *dpu, int id); +void dpu_fe_put(struct dpu_fetchunit *fu); + +/* Fetch Layer Unit */ +void fetchlayer_rgb_constantcolor(struct dpu_fetchunit *fu, + u8 r, u8 g, u8 b, u8 a); +void fetchlayer_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v); +int fetchlayer_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type); +struct dpu_fetchunit *dpu_fl_get(struct dpu_soc *dpu, int id); +void dpu_fl_put(struct dpu_fetchunit *fu); + +/* Fetch Warp Unit */ +void fetchwarp_rgb_constantcolor(struct dpu_fetchunit *fu, + u8 r, u8 g, u8 b, u8 a); +void fetchwarp_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v); +int fetchwarp_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type); +struct dpu_fetchunit *dpu_fw_get(struct dpu_soc *dpu, int id); +void dpu_fw_put(struct dpu_fetchunit *fu); + +/* Frame Generator Unit */ +struct dpu_framegen; +void framegen_enable(struct dpu_framegen *fg); +void framegen_disable(struct dpu_framegen *fg); +void framegen_enable_pixel_link(struct dpu_framegen *fg); +void framegen_disable_pixel_link(struct dpu_framegen *fg); +void framegen_shdtokgen(struct dpu_framegen *fg); +void framegen_syncmode(struct dpu_framegen *fg, fgsyncmode_t mode); +void framegen_cfg_videomode(struct dpu_framegen *fg, struct drm_display_mode *m, + bool side_by_side, unsigned int encoder_type); +void framegen_pkickconfig(struct dpu_framegen *fg, bool enable); +void framegen_syncmode_fixup(struct dpu_framegen *fg, bool enable); +void framegen_displaymode(struct dpu_framegen *fg, fgdm_t mode); +void framegen_panic_displaymode(struct dpu_framegen *fg, fgdm_t mode); +void framegen_wait_done(struct dpu_framegen *fg, struct drm_display_mode *m); +void framegen_read_timestamp(struct dpu_framegen *fg, + u32 *frame_index, u32 *line_index); +void framegen_wait_for_frame_counter_moving(struct dpu_framegen *fg); +bool framegen_secondary_requests_to_read_empty_fifo(struct dpu_framegen *fg); +void framegen_secondary_clear_channel_status(struct dpu_framegen *fg); +bool framegen_secondary_is_syncup(struct dpu_framegen *fg); +void framegen_wait_for_secondary_syncup(struct dpu_framegen *fg); +void framegen_enable_clock(struct dpu_framegen *fg); +void framegen_disable_clock(struct dpu_framegen *fg); +bool framegen_is_master(struct dpu_framegen *fg); +bool framegen_is_slave(struct dpu_framegen *fg); +struct dpu_framegen *dpu_fg_get(struct dpu_soc *dpu, int id); +void dpu_fg_put(struct dpu_framegen *fg); +struct dpu_framegen *dpu_aux_fg_peek(struct dpu_framegen *fg); + +/* Horizontal Scaler Unit */ +struct dpu_hscaler; +int hscaler_pixengcfg_dynamic_src_sel(struct dpu_hscaler *hs, hs_src_sel_t src); +void hscaler_pixengcfg_clken(struct dpu_hscaler *hs, pixengcfg_clken_t clken); +void hscaler_shden(struct dpu_hscaler *hs, bool enable); +void hscaler_setup1(struct dpu_hscaler *hs, unsigned int src, unsigned int dst); +void hscaler_setup2(struct dpu_hscaler *hs, u32 phase_offset); +void hscaler_output_size(struct dpu_hscaler *hs, u32 line_num); +void hscaler_filter_mode(struct dpu_hscaler *hs, scaler_filter_mode_t m); +void hscaler_scale_mode(struct dpu_hscaler *hs, scaler_scale_mode_t m); +void hscaler_mode(struct dpu_hscaler *hs, scaler_mode_t m); +bool hscaler_is_enabled(struct dpu_hscaler *hs); +dpu_block_id_t hscaler_get_block_id(struct dpu_hscaler *hs); +unsigned int hscaler_get_stream_id(struct dpu_hscaler *hs); +void hscaler_set_stream_id(struct dpu_hscaler *hs, unsigned int id); +struct dpu_hscaler *dpu_hs_get(struct dpu_soc *dpu, int id); +void dpu_hs_put(struct dpu_hscaler *hs); + +/* Layer Blend Unit */ +struct dpu_layerblend; +int layerblend_pixengcfg_dynamic_prim_sel(struct dpu_layerblend *lb, + lb_prim_sel_t prim); +void layerblend_pixengcfg_dynamic_sec_sel(struct dpu_layerblend *lb, + lb_sec_sel_t sec); +void layerblend_pixengcfg_clken(struct dpu_layerblend *lb, + pixengcfg_clken_t clken); +void layerblend_shden(struct dpu_layerblend *lb, bool enable); +void layerblend_shdtoksel(struct dpu_layerblend *lb, lb_shadow_sel_t sel); +void layerblend_shdldsel(struct dpu_layerblend *lb, lb_shadow_sel_t sel); +void layerblend_control(struct dpu_layerblend *lb, lb_mode_t mode); +void layerblend_blendcontrol(struct dpu_layerblend *lb, unsigned int zpos, + unsigned int pixel_blend_mode, u16 alpha); +void layerblend_position(struct dpu_layerblend *lb, int x, int y); +struct dpu_layerblend *dpu_lb_get(struct dpu_soc *dpu, int id); +void dpu_lb_put(struct dpu_layerblend *lb); + +/* Store Unit */ +struct dpu_store; +void store_pixengcfg_syncmode_fixup(struct dpu_store *st, bool enable); +struct dpu_store *dpu_st_get(struct dpu_soc *dpu, int id); +void dpu_st_put(struct dpu_store *st); + +/* Timing Controller Unit */ +struct dpu_tcon; +int tcon_set_fmt(struct dpu_tcon *tcon, u32 bus_format); +void tcon_set_operation_mode(struct dpu_tcon *tcon); +void tcon_cfg_videomode(struct dpu_tcon *tcon, + struct drm_display_mode *m, bool side_by_side); +bool tcon_is_master(struct dpu_tcon *tcon); +bool tcon_is_slave(struct dpu_tcon *tcon); +void tcon_configure_pc(struct dpu_tcon *tcon, unsigned int di, + unsigned int frame_width, u32 mode, u32 format); +void tcon_enable_pc(struct dpu_tcon *tcon); +void tcon_disable_pc(struct dpu_tcon *tcon); +struct dpu_tcon *dpu_tcon_get(struct dpu_soc *dpu, int id); +void dpu_tcon_put(struct dpu_tcon *tcon); +struct dpu_tcon *dpu_aux_tcon_peek(struct dpu_tcon *tcon); + +/* Vertical Scaler Unit */ +struct dpu_vscaler; +int vscaler_pixengcfg_dynamic_src_sel(struct dpu_vscaler *vs, vs_src_sel_t src); +void vscaler_pixengcfg_clken(struct dpu_vscaler *vs, pixengcfg_clken_t clken); +void vscaler_shden(struct dpu_vscaler *vs, bool enable); +void vscaler_setup1(struct dpu_vscaler *vs, u32 src, u32 dst, bool deinterlace); +void vscaler_setup2(struct dpu_vscaler *vs, bool deinterlace); +void vscaler_setup3(struct dpu_vscaler *vs, bool deinterlace); +void vscaler_setup4(struct dpu_vscaler *vs, u32 phase_offset); +void vscaler_setup5(struct dpu_vscaler *vs, u32 phase_offset); +void vscaler_output_size(struct dpu_vscaler *vs, u32 line_num); +void vscaler_field_mode(struct dpu_vscaler *vs, scaler_field_mode_t m); +void vscaler_filter_mode(struct dpu_vscaler *vs, scaler_filter_mode_t m); +void vscaler_scale_mode(struct dpu_vscaler *vs, scaler_scale_mode_t m); +void vscaler_mode(struct dpu_vscaler *vs, scaler_mode_t m); +bool vscaler_is_enabled(struct dpu_vscaler *vs); +dpu_block_id_t vscaler_get_block_id(struct dpu_vscaler *vs); +unsigned int vscaler_get_stream_id(struct dpu_vscaler *vs); +void vscaler_set_stream_id(struct dpu_vscaler *vs, unsigned int id); +struct dpu_vscaler *dpu_vs_get(struct dpu_soc *dpu, int id); +void dpu_vs_put(struct dpu_vscaler *vs); + +struct dpu_fetchunit *fetchdecode_get_fetcheco(struct dpu_fetchunit *fu); +struct dpu_hscaler *fetchdecode_get_hscaler(struct dpu_fetchunit *fu); +struct dpu_vscaler *fetchdecode_get_vscaler(struct dpu_fetchunit *fu); + +unsigned int dpu_get_syncmode_min_prate(struct dpu_soc *dpu); +unsigned int dpu_get_singlemode_max_width(struct dpu_soc *dpu); +unsigned int dpu_get_master_stream_id(struct dpu_soc *dpu); + +bool dpu_vproc_has_fetcheco_cap(u32 cap_mask); +bool dpu_vproc_has_hscale_cap(u32 cap_mask); +bool dpu_vproc_has_vscale_cap(u32 cap_mask); + +u32 dpu_vproc_get_fetcheco_cap(u32 cap_mask); +u32 dpu_vproc_get_hscale_cap(u32 cap_mask); +u32 dpu_vproc_get_vscale_cap(u32 cap_mask); + +unsigned int fetchunit_burst_size_fixup_tkt343664(dma_addr_t baddr); +unsigned int +fetchunit_stride_fixup_tkt339017(unsigned int stride, unsigned int burst_size, + dma_addr_t baddr, bool nonzero_mod); +void fetchunit_get_dprc(struct dpu_fetchunit *fu, void *data); +void fetchunit_shden(struct dpu_fetchunit *fu, bool enable); +void fetchunit_baddr_autoupdate(struct dpu_fetchunit *fu, u8 layer_mask); +void fetchunit_shdldreq_sticky(struct dpu_fetchunit *fu, u8 layer_mask); +void fetchunit_set_burstlength(struct dpu_fetchunit *fu, + unsigned int x_offset, unsigned int mt_w, + int bpp, dma_addr_t baddr, bool use_prefetch); +void fetchunit_set_baseaddress(struct dpu_fetchunit *fu, unsigned int width, + unsigned int x_offset, unsigned int y_offset, + unsigned int mt_w, unsigned int mt_h, + int bpp, dma_addr_t baddr); +void fetchunit_set_src_bpp(struct dpu_fetchunit *fu, int bpp); +void fetchunit_set_src_stride(struct dpu_fetchunit *fu, + unsigned int width, unsigned int x_offset, + unsigned int mt_w, int bpp, unsigned int stride, + dma_addr_t baddr, bool use_prefetch); +void fetchunit_set_pixel_blend_mode(struct dpu_fetchunit *fu, + unsigned int pixel_blend_mode, u16 alpha, + u32 fb_format); +void fetchunit_enable_src_buf(struct dpu_fetchunit *fu); +void fetchunit_disable_src_buf(struct dpu_fetchunit *fu); +bool fetchunit_is_enabled(struct dpu_fetchunit *fu); +unsigned int fetchunit_get_stream_id(struct dpu_fetchunit *fu); +void fetchunit_set_stream_id(struct dpu_fetchunit *fu, unsigned int id); +void fetchunit_pin_off(struct dpu_fetchunit *fu); +void fetchunit_unpin_off(struct dpu_fetchunit *fu); +bool fetchunit_is_pinned_off(struct dpu_fetchunit *fu); +bool fetchunit_is_fetchdecode(struct dpu_fetchunit *fu); +bool fetchunit_is_fetcheco(struct dpu_fetchunit *fu); +bool fetchunit_is_fetchlayer(struct dpu_fetchunit *fu); +bool fetchunit_is_fetchwarp(struct dpu_fetchunit *fu); + +/* + * to avoid on-the-fly/hot plane resource migration + * between two display interfaces + */ +#define DPU_PLANE_SRC_TO_DISP_STREAM0 BIT(0) +#define DPU_PLANE_SRC_TO_DISP_STREAM1 BIT(1) +#define DPU_PLANE_SRC_DISABLED 0 + +struct dpu_plane_res { + struct dpu_extdst *ed[2]; + struct dpu_fetchunit *fd[2]; + struct dpu_fetchunit *fe[2]; + struct dpu_fetchunit *fl[1]; + struct dpu_fetchunit *fw[1]; + struct dpu_framegen *fg[2]; + struct dpu_hscaler *hs[2]; + struct dpu_layerblend *lb[4]; + struct dpu_vscaler *vs[2]; +}; + +/* + * Each DPU plane can be a primary plane or an overlay plane + * of one of the DPU's two CRTCs. + */ +#define DPU_PLANE_SRC_FL0_ID BIT(0) +#define DPU_PLANE_SRC_FW2_ID BIT(1) +#define DPU_PLANE_SRC_FD0_ID BIT(2) +#define DPU_PLANE_SRC_FD1_ID BIT(3) + +struct dpu_plane_grp { + struct dpu_plane_res res; + unsigned int hw_plane_num; + unsigned int hw_plane_fetcheco_num; + unsigned int hw_plane_hscaler_num; + unsigned int hw_plane_vscaler_num; + unsigned int id; + bool has_vproc; + + /* used when assigning plane source */ + struct mutex mutex; + u32 src_mask; + u32 src_a_mask; + u32 src_use_vproc_mask; +}; + +static inline struct dpu_plane_grp *plane_res_to_grp(struct dpu_plane_res *res) +{ + return container_of(res, struct dpu_plane_grp, res); +} + +struct dpu_client_platformdata { + const unsigned int stream_id; + unsigned int di_grp_id; + struct dpu_plane_grp *plane_grp; + + /* Store9 could be shared bewteen display engine and blit engine */ + struct dpu_store *st9; + + struct device_node *of_node; +}; +#endif /* __DRM_DPU_H__ */ |