summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorSandor Yu <R01008@freescale.com>2015-08-26 16:42:02 +0800
committerNitin Garg <nitin.garg@nxp.com>2016-01-14 11:01:22 -0600
commitec97772b4eac12a6c45a948c7c71f14eba8152c2 (patch)
treed6b18c8cdac6b5ef847fc18ce3a5157a35c695dd /drivers
parentccee321c973448caf76ec4a916475c3c204b0ad8 (diff)
MLK-11431-1: IPU: forward IPU display drivers to 4.1.y kernel
Forward imx_3.14.y IPU and display drivers to 4.1 kernel. This includes IPU core driver, display driver, LDB and HDMI driver. Signed-off-by: Sandor Yu <R01008@freescale.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mfd/Kconfig7
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/mxc-hdmi-core.c808
-rw-r--r--drivers/mxc/Kconfig9
-rw-r--r--drivers/mxc/Makefile2
-rw-r--r--drivers/mxc/hdmi-cec/Kconfig11
-rw-r--r--drivers/mxc/hdmi-cec/Makefile1
-rw-r--r--drivers/mxc/hdmi-cec/mxc_hdmi-cec.c664
-rw-r--r--drivers/mxc/hdmi-cec/mxc_hdmi-cec.h38
-rw-r--r--drivers/mxc/ipu3/Kconfig21
-rw-r--r--drivers/mxc/ipu3/Makefile7
-rw-r--r--drivers/mxc/ipu3/ipu_calc_stripes_sizes.c495
-rw-r--r--drivers/mxc/ipu3/ipu_capture.c816
-rw-r--r--drivers/mxc/ipu3/ipu_common.c3458
-rw-r--r--drivers/mxc/ipu3/ipu_device.c3739
-rw-r--r--drivers/mxc/ipu3/ipu_disp.c1959
-rw-r--r--drivers/mxc/ipu3/ipu_ic.c924
-rw-r--r--drivers/mxc/ipu3/ipu_param_mem.h996
-rw-r--r--drivers/mxc/ipu3/ipu_pixel_clk.c317
-rw-r--r--drivers/mxc/ipu3/ipu_prv.h368
-rw-r--r--drivers/mxc/ipu3/ipu_regs.h702
-rw-r--r--drivers/mxc/ipu3/pre-regs.h480
-rw-r--r--drivers/mxc/ipu3/pre.c973
-rw-r--r--drivers/mxc/ipu3/prg-regs.h70
-rw-r--r--drivers/mxc/ipu3/prg.c506
-rw-r--r--drivers/mxc/ipu3/vdoa.c536
-rw-r--r--drivers/mxc/ipu3/vdoa.h62
-rw-r--r--drivers/video/fbdev/mxc/Kconfig72
-rw-r--r--drivers/video/fbdev/mxc/Makefile7
-rw-r--r--drivers/video/fbdev/mxc/hannstar_cabc.c82
-rw-r--r--drivers/video/fbdev/mxc/ldb.c913
-rw-r--r--drivers/video/fbdev/mxc/mxc_dispdrv.c2
-rw-r--r--drivers/video/fbdev/mxc/mxc_edid.c769
-rw-r--r--drivers/video/fbdev/mxc/mxc_hdmi.c2948
-rw-r--r--drivers/video/fbdev/mxc/mxc_ipuv3_fb.c3563
-rw-r--r--drivers/video/fbdev/mxc/mxc_lcdif.c237
36 files changed, 26539 insertions, 24 deletions
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index d5ad04dad081..5ad5945f06de 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -244,6 +244,13 @@ config MFD_MC13XXX_I2C
help
Select this if your MC13xxx is connected via an I2C bus.
+config MFD_MXC_HDMI
+ tristate "Freescale HDMI Core"
+ select MFD_CORE
+ help
+ This is the core driver for the Freescale i.MX6 on-chip HDMI.
+ This MFD driver connects with the video and audio drivers for HDMI.
+
config MFD_HI6421_PMIC
tristate "HiSilicon Hi6421 PMU/Codec IC"
depends on OF
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 0e5cfeba107c..9e36d0e8d99b 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -181,6 +181,7 @@ obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o
obj-$(CONFIG_MFD_DLN2) += dln2.o
obj-$(CONFIG_MFD_RT5033) += rt5033.o
obj-$(CONFIG_MFD_SKY81452) += sky81452.o
+obj-$(CONFIG_MFD_MXC_HDMI) += mxc-hdmi-core.o
intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
diff --git a/drivers/mfd/mxc-hdmi-core.c b/drivers/mfd/mxc-hdmi-core.c
new file mode 100644
index 000000000000..ca79e7e4325a
--- /dev/null
+++ b/drivers/mfd/mxc-hdmi-core.c
@@ -0,0 +1,808 @@
+/*
+ * Copyright (C) 2011-2015 Freescale Semiconductor, Inc.
+
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <linux/platform_device.h>
+#include <linux/regulator/machine.h>
+#include <asm/mach-types.h>
+
+#include <video/mxc_hdmi.h>
+#include <linux/ipu-v3.h>
+#include <video/mxc_edid.h>
+#include "../mxc/ipu3/ipu_prv.h"
+#include <linux/mfd/mxc-hdmi-core.h>
+#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mfd/mxc-hdmi-core.h>
+
+struct mxc_hdmi_data {
+ struct platform_device *pdev;
+ unsigned long __iomem *reg_base;
+ unsigned long reg_phys_base;
+ struct device *dev;
+};
+
+static void __iomem *hdmi_base;
+static struct clk *isfr_clk;
+static struct clk *iahb_clk;
+static struct clk *mipi_core_clk;
+static spinlock_t irq_spinlock;
+static spinlock_t edid_spinlock;
+static unsigned int sample_rate;
+static unsigned long pixel_clk_rate;
+static struct clk *pixel_clk;
+static int hdmi_ratio;
+int mxc_hdmi_ipu_id;
+int mxc_hdmi_disp_id;
+static struct mxc_edid_cfg hdmi_core_edid_cfg;
+static int hdmi_core_init;
+static unsigned int hdmi_dma_running;
+static struct snd_pcm_substream *hdmi_audio_stream_playback;
+static unsigned int hdmi_cable_state;
+static unsigned int hdmi_blank_state;
+static unsigned int hdmi_abort_state;
+static spinlock_t hdmi_audio_lock, hdmi_blank_state_lock, hdmi_cable_state_lock;
+
+unsigned int hdmi_set_cable_state(unsigned int state)
+{
+ unsigned long flags;
+ struct snd_pcm_substream *substream = hdmi_audio_stream_playback;
+
+ spin_lock_irqsave(&hdmi_cable_state_lock, flags);
+ hdmi_cable_state = state;
+ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags);
+
+#ifndef CONFIG_MFD_MXC_HDMI_ANDROID
+ if (check_hdmi_state() && substream && hdmi_abort_state) {
+ hdmi_abort_state = 0;
+ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_set_cable_state);
+
+unsigned int hdmi_set_blank_state(unsigned int state)
+{
+ unsigned long flags;
+ struct snd_pcm_substream *substream = hdmi_audio_stream_playback;
+
+ spin_lock_irqsave(&hdmi_blank_state_lock, flags);
+ hdmi_blank_state = state;
+ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags);
+
+#ifndef CONFIG_MFD_MXC_HDMI_ANDROID
+ if (check_hdmi_state() && substream && hdmi_abort_state) {
+ hdmi_abort_state = 0;
+ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
+ }
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_set_blank_state);
+
+static void hdmi_audio_abort_stream(struct snd_pcm_substream *substream)
+{
+ unsigned long flags;
+
+ snd_pcm_stream_lock_irqsave(substream, flags);
+
+#ifndef CONFIG_MFD_MXC_HDMI_ANDROID
+ if (snd_pcm_running(substream)) {
+ hdmi_abort_state = 1;
+ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
+ }
+#else
+ if (snd_pcm_running(substream))
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
+#endif
+
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
+}
+
+int mxc_hdmi_abort_stream(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&hdmi_audio_lock, flags);
+ if (hdmi_audio_stream_playback)
+ hdmi_audio_abort_stream(hdmi_audio_stream_playback);
+ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(mxc_hdmi_abort_stream);
+
+int check_hdmi_state(void)
+{
+ unsigned long flags1, flags2;
+ unsigned int ret;
+
+ spin_lock_irqsave(&hdmi_cable_state_lock, flags1);
+ spin_lock_irqsave(&hdmi_blank_state_lock, flags2);
+
+ ret = hdmi_cable_state && hdmi_blank_state;
+
+ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags2);
+ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags1);
+
+ return ret;
+}
+EXPORT_SYMBOL(check_hdmi_state);
+
+int mxc_hdmi_register_audio(struct snd_pcm_substream *substream)
+{
+ unsigned long flags, flags1;
+ int ret = 0;
+
+ snd_pcm_stream_lock_irqsave(substream, flags);
+
+ if (substream && check_hdmi_state()) {
+ spin_lock_irqsave(&hdmi_audio_lock, flags1);
+ if (hdmi_audio_stream_playback) {
+ pr_err("%s unconsist hdmi auido stream!\n", __func__);
+ ret = -EINVAL;
+ }
+ hdmi_audio_stream_playback = substream;
+ hdmi_abort_state = 0;
+ spin_unlock_irqrestore(&hdmi_audio_lock, flags1);
+ } else
+ ret = -EINVAL;
+
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(mxc_hdmi_register_audio);
+
+void mxc_hdmi_unregister_audio(struct snd_pcm_substream *substream)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi_audio_lock, flags);
+ hdmi_audio_stream_playback = NULL;
+ hdmi_abort_state = 0;
+ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
+}
+EXPORT_SYMBOL(mxc_hdmi_unregister_audio);
+
+u8 hdmi_readb(unsigned int reg)
+{
+ u8 value;
+
+ value = __raw_readb(hdmi_base + reg);
+
+ return value;
+}
+EXPORT_SYMBOL(hdmi_readb);
+
+#ifdef DEBUG
+static bool overflow_lo;
+static bool overflow_hi;
+
+bool hdmi_check_overflow(void)
+{
+ u8 val, lo, hi;
+
+ val = hdmi_readb(HDMI_IH_FC_STAT2);
+ lo = (val & HDMI_IH_FC_STAT2_LOW_PRIORITY_OVERFLOW) != 0;
+ hi = (val & HDMI_IH_FC_STAT2_HIGH_PRIORITY_OVERFLOW) != 0;
+
+ if ((lo != overflow_lo) || (hi != overflow_hi)) {
+ pr_debug("%s LowPriority=%d HighPriority=%d <=======================\n",
+ __func__, lo, hi);
+ overflow_lo = lo;
+ overflow_hi = hi;
+ return true;
+ }
+ return false;
+}
+#else
+bool hdmi_check_overflow(void)
+{
+ return false;
+}
+#endif
+EXPORT_SYMBOL(hdmi_check_overflow);
+
+void hdmi_writeb(u8 value, unsigned int reg)
+{
+ hdmi_check_overflow();
+ __raw_writeb(value, hdmi_base + reg);
+ hdmi_check_overflow();
+}
+EXPORT_SYMBOL(hdmi_writeb);
+
+void hdmi_mask_writeb(u8 data, unsigned int reg, u8 shift, u8 mask)
+{
+ u8 value = hdmi_readb(reg) & ~mask;
+ value |= (data << shift) & mask;
+ hdmi_writeb(value, reg);
+}
+EXPORT_SYMBOL(hdmi_mask_writeb);
+
+unsigned int hdmi_read4(unsigned int reg)
+{
+ /* read a four byte address from registers */
+ return (hdmi_readb(reg + 3) << 24) |
+ (hdmi_readb(reg + 2) << 16) |
+ (hdmi_readb(reg + 1) << 8) |
+ hdmi_readb(reg);
+}
+EXPORT_SYMBOL(hdmi_read4);
+
+void hdmi_write4(unsigned int value, unsigned int reg)
+{
+ /* write a four byte address to hdmi regs */
+ hdmi_writeb(value & 0xff, reg);
+ hdmi_writeb((value >> 8) & 0xff, reg + 1);
+ hdmi_writeb((value >> 16) & 0xff, reg + 2);
+ hdmi_writeb((value >> 24) & 0xff, reg + 3);
+}
+EXPORT_SYMBOL(hdmi_write4);
+
+static void initialize_hdmi_ih_mutes(void)
+{
+ u8 ih_mute;
+
+ /*
+ * Boot up defaults are:
+ * HDMI_IH_MUTE = 0x03 (disabled)
+ * HDMI_IH_MUTE_* = 0x00 (enabled)
+ */
+
+ /* Disable top level interrupt bits in HDMI block */
+ ih_mute = hdmi_readb(HDMI_IH_MUTE) |
+ HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
+ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT;
+
+ hdmi_writeb(ih_mute, HDMI_IH_MUTE);
+
+ /* by default mask all interrupts */
+ hdmi_writeb(0xff, HDMI_VP_MASK);
+ hdmi_writeb(0xff, HDMI_FC_MASK0);
+ hdmi_writeb(0xff, HDMI_FC_MASK1);
+ hdmi_writeb(0xff, HDMI_FC_MASK2);
+ hdmi_writeb(0xff, HDMI_PHY_MASK0);
+ hdmi_writeb(0xff, HDMI_PHY_I2CM_INT_ADDR);
+ hdmi_writeb(0xff, HDMI_PHY_I2CM_CTLINT_ADDR);
+ hdmi_writeb(0xff, HDMI_AUD_INT);
+ hdmi_writeb(0xff, HDMI_AUD_SPDIFINT);
+ hdmi_writeb(0xff, HDMI_AUD_HBR_MASK);
+ hdmi_writeb(0xff, HDMI_GP_MASK);
+ hdmi_writeb(0xff, HDMI_A_APIINTMSK);
+ hdmi_writeb(0xff, HDMI_CEC_MASK);
+ hdmi_writeb(0xff, HDMI_I2CM_INT);
+ hdmi_writeb(0xff, HDMI_I2CM_CTLINT);
+
+ /* Disable interrupts in the IH_MUTE_* registers */
+ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT0);
+ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT1);
+ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT2);
+ hdmi_writeb(0xff, HDMI_IH_MUTE_AS_STAT0);
+ hdmi_writeb(0xff, HDMI_IH_MUTE_PHY_STAT0);
+ hdmi_writeb(0xff, HDMI_IH_MUTE_I2CM_STAT0);
+ hdmi_writeb(0xff, HDMI_IH_MUTE_CEC_STAT0);
+ hdmi_writeb(0xff, HDMI_IH_MUTE_VP_STAT0);
+ hdmi_writeb(0xff, HDMI_IH_MUTE_I2CMPHY_STAT0);
+ hdmi_writeb(0xff, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
+
+ /* Enable top level interrupt bits in HDMI block */
+ ih_mute &= ~(HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
+ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT);
+ hdmi_writeb(ih_mute, HDMI_IH_MUTE);
+}
+
+static void hdmi_set_clock_regenerator_n(unsigned int value)
+{
+ u8 val;
+
+ if (!hdmi_dma_running) {
+ hdmi_writeb(value & 0xff, HDMI_AUD_N1);
+ hdmi_writeb(0, HDMI_AUD_N2);
+ hdmi_writeb(0, HDMI_AUD_N3);
+ }
+
+ hdmi_writeb(value & 0xff, HDMI_AUD_N1);
+ hdmi_writeb((value >> 8) & 0xff, HDMI_AUD_N2);
+ hdmi_writeb((value >> 16) & 0x0f, HDMI_AUD_N3);
+
+ /* nshift factor = 0 */
+ val = hdmi_readb(HDMI_AUD_CTS3);
+ val &= ~HDMI_AUD_CTS3_N_SHIFT_MASK;
+ hdmi_writeb(val, HDMI_AUD_CTS3);
+}
+
+static void hdmi_set_clock_regenerator_cts(unsigned int cts)
+{
+ u8 val;
+
+ if (!hdmi_dma_running) {
+ hdmi_writeb(cts & 0xff, HDMI_AUD_CTS1);
+ hdmi_writeb(0, HDMI_AUD_CTS2);
+ hdmi_writeb(0, HDMI_AUD_CTS3);
+ }
+
+ /* Must be set/cleared first */
+ val = hdmi_readb(HDMI_AUD_CTS3);
+ val &= ~HDMI_AUD_CTS3_CTS_MANUAL;
+ hdmi_writeb(val, HDMI_AUD_CTS3);
+
+ hdmi_writeb(cts & 0xff, HDMI_AUD_CTS1);
+ hdmi_writeb((cts >> 8) & 0xff, HDMI_AUD_CTS2);
+ hdmi_writeb(((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
+ HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
+}
+
+static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
+ unsigned int ratio)
+{
+ unsigned int n = (128 * freq) / 1000;
+
+ switch (freq) {
+ case 32000:
+ if (pixel_clk == 25174000)
+ n = (ratio == 150) ? 9152 : 4576;
+ else if (pixel_clk == 27020000)
+ n = (ratio == 150) ? 8192 : 4096;
+ else if (pixel_clk == 74170000 || pixel_clk == 148350000)
+ n = 11648;
+ else if (pixel_clk == 297000000)
+ n = (ratio == 150) ? 6144 : 3072;
+ else
+ n = 4096;
+ break;
+
+ case 44100:
+ if (pixel_clk == 25174000)
+ n = 7007;
+ else if (pixel_clk == 74170000)
+ n = 17836;
+ else if (pixel_clk == 148350000)
+ n = (ratio == 150) ? 17836 : 8918;
+ else if (pixel_clk == 297000000)
+ n = (ratio == 150) ? 9408 : 4704;
+ else
+ n = 6272;
+ break;
+
+ case 48000:
+ if (pixel_clk == 25174000)
+ n = (ratio == 150) ? 9152 : 6864;
+ else if (pixel_clk == 27020000)
+ n = (ratio == 150) ? 8192 : 6144;
+ else if (pixel_clk == 74170000)
+ n = 11648;
+ else if (pixel_clk == 148350000)
+ n = (ratio == 150) ? 11648 : 5824;
+ else if (pixel_clk == 297000000)
+ n = (ratio == 150) ? 10240 : 5120;
+ else
+ n = 6144;
+ break;
+
+ case 88200:
+ n = hdmi_compute_n(44100, pixel_clk, ratio) * 2;
+ break;
+
+ case 96000:
+ n = hdmi_compute_n(48000, pixel_clk, ratio) * 2;
+ break;
+
+ case 176400:
+ n = hdmi_compute_n(44100, pixel_clk, ratio) * 4;
+ break;
+
+ case 192000:
+ n = hdmi_compute_n(48000, pixel_clk, ratio) * 4;
+ break;
+
+ default:
+ break;
+ }
+
+ return n;
+}
+
+static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
+ unsigned int ratio)
+{
+ unsigned int cts = 0;
+ switch (freq) {
+ case 32000:
+ if (pixel_clk == 297000000) {
+ cts = 222750;
+ break;
+ } else if (pixel_clk == 25174000) {
+ cts = 28125;
+ break;
+ }
+ case 48000:
+ case 96000:
+ case 192000:
+ switch (pixel_clk) {
+ case 25200000:
+ case 27000000:
+ case 54000000:
+ case 74250000:
+ case 148500000:
+ cts = pixel_clk / 1000;
+ break;
+ case 297000000:
+ cts = 247500;
+ break;
+ case 25174000:
+ cts = 28125l;
+ break;
+ /*
+ * All other TMDS clocks are not supported by
+ * DWC_hdmi_tx. The TMDS clocks divided or
+ * multiplied by 1,001 coefficients are not
+ * supported.
+ */
+ default:
+ break;
+ }
+ break;
+ case 44100:
+ case 88200:
+ case 176400:
+ switch (pixel_clk) {
+ case 25200000:
+ cts = 28000;
+ break;
+ case 25174000:
+ cts = 31250;
+ break;
+ case 27000000:
+ cts = 30000;
+ break;
+ case 54000000:
+ cts = 60000;
+ break;
+ case 74250000:
+ cts = 82500;
+ break;
+ case 148500000:
+ cts = 165000;
+ break;
+ case 297000000:
+ cts = 247500;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (ratio == 100)
+ return cts;
+ else
+ return (cts * ratio) / 100;
+}
+
+static void hdmi_set_clk_regenerator(void)
+{
+ unsigned int clk_n, clk_cts;
+
+ clk_n = hdmi_compute_n(sample_rate, pixel_clk_rate, hdmi_ratio);
+ clk_cts = hdmi_compute_cts(sample_rate, pixel_clk_rate, hdmi_ratio);
+
+ if (clk_cts == 0) {
+ pr_debug("%s: pixel clock not supported: %d\n",
+ __func__, (int)pixel_clk_rate);
+ return;
+ }
+
+ pr_debug("%s: samplerate=%d ratio=%d pixelclk=%d N=%d cts=%d\n",
+ __func__, sample_rate, hdmi_ratio, (int)pixel_clk_rate,
+ clk_n, clk_cts);
+
+ hdmi_set_clock_regenerator_cts(clk_cts);
+ hdmi_set_clock_regenerator_n(clk_n);
+}
+
+static int hdmi_core_get_of_property(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int err;
+ int ipu_id, disp_id;
+
+ err = of_property_read_u32(np, "ipu_id", &ipu_id);
+ if (err) {
+ dev_dbg(&pdev->dev, "get of property ipu_id fail\n");
+ return err;
+ }
+ err = of_property_read_u32(np, "disp_id", &disp_id);
+ if (err) {
+ dev_dbg(&pdev->dev, "get of property disp_id fail\n");
+ return err;
+ }
+
+ mxc_hdmi_ipu_id = ipu_id;
+ mxc_hdmi_disp_id = disp_id;
+
+ return err;
+}
+
+/* Need to run this before phy is enabled the first time to prevent
+ * overflow condition in HDMI_IH_FC_STAT2 */
+void hdmi_init_clk_regenerator(void)
+{
+ if (pixel_clk_rate == 0) {
+ pixel_clk_rate = 74250000;
+ hdmi_set_clk_regenerator();
+ }
+}
+EXPORT_SYMBOL(hdmi_init_clk_regenerator);
+
+void hdmi_clk_regenerator_update_pixel_clock(u32 pixclock)
+{
+
+ if (!pixclock)
+ return;
+ /* Translate pixel clock in ps (pico seconds) to Hz */
+ pixel_clk_rate = PICOS2KHZ(pixclock) * 1000UL;
+ hdmi_set_clk_regenerator();
+}
+EXPORT_SYMBOL(hdmi_clk_regenerator_update_pixel_clock);
+
+void hdmi_set_dma_mode(unsigned int dma_running)
+{
+ hdmi_dma_running = dma_running;
+ hdmi_set_clk_regenerator();
+}
+EXPORT_SYMBOL(hdmi_set_dma_mode);
+
+void hdmi_set_sample_rate(unsigned int rate)
+{
+ sample_rate = rate;
+}
+EXPORT_SYMBOL(hdmi_set_sample_rate);
+
+void hdmi_set_edid_cfg(struct mxc_edid_cfg *cfg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&edid_spinlock, flags);
+ memcpy(&hdmi_core_edid_cfg, cfg, sizeof(struct mxc_edid_cfg));
+ spin_unlock_irqrestore(&edid_spinlock, flags);
+}
+EXPORT_SYMBOL(hdmi_set_edid_cfg);
+
+void hdmi_get_edid_cfg(struct mxc_edid_cfg *cfg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&edid_spinlock, flags);
+ memcpy(cfg, &hdmi_core_edid_cfg, sizeof(struct mxc_edid_cfg));
+ spin_unlock_irqrestore(&edid_spinlock, flags);
+}
+EXPORT_SYMBOL(hdmi_get_edid_cfg);
+
+void hdmi_set_registered(int registered)
+{
+ hdmi_core_init = registered;
+}
+EXPORT_SYMBOL(hdmi_set_registered);
+
+int hdmi_get_registered(void)
+{
+ return hdmi_core_init;
+}
+EXPORT_SYMBOL(hdmi_get_registered);
+
+static int mxc_hdmi_core_probe(struct platform_device *pdev)
+{
+ struct mxc_hdmi_data *hdmi_data;
+ struct resource *res;
+ unsigned long flags;
+ int ret = 0;
+
+#ifdef DEBUG
+ overflow_lo = false;
+ overflow_hi = false;
+#endif
+
+ hdmi_core_init = 0;
+ hdmi_dma_running = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ ret = hdmi_core_get_of_property(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "get hdmi of property fail\n");
+ return -ENOENT;
+ }
+
+ hdmi_data = devm_kzalloc(&pdev->dev, sizeof(struct mxc_hdmi_data), GFP_KERNEL);
+ if (!hdmi_data) {
+ dev_err(&pdev->dev, "Couldn't allocate mxc hdmi mfd device\n");
+ return -ENOMEM;
+ }
+ hdmi_data->pdev = pdev;
+
+ pixel_clk = NULL;
+ sample_rate = 48000;
+ pixel_clk_rate = 0;
+ hdmi_ratio = 100;
+
+ spin_lock_init(&irq_spinlock);
+ spin_lock_init(&edid_spinlock);
+
+
+ spin_lock_init(&hdmi_cable_state_lock);
+ spin_lock_init(&hdmi_blank_state_lock);
+ spin_lock_init(&hdmi_audio_lock);
+
+ spin_lock_irqsave(&hdmi_cable_state_lock, flags);
+ hdmi_cable_state = 0;
+ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags);
+
+ spin_lock_irqsave(&hdmi_blank_state_lock, flags);
+ hdmi_blank_state = 0;
+ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags);
+
+ spin_lock_irqsave(&hdmi_audio_lock, flags);
+ hdmi_audio_stream_playback = NULL;
+ hdmi_abort_state = 0;
+ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
+
+ mipi_core_clk = clk_get(&hdmi_data->pdev->dev, "mipi_core");
+ if (IS_ERR(mipi_core_clk)) {
+ ret = PTR_ERR(mipi_core_clk);
+ dev_err(&hdmi_data->pdev->dev,
+ "Unable to get mipi core clk: %d\n", ret);
+ goto eclkg;
+ }
+
+ ret = clk_prepare_enable(mipi_core_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot enable mipi core clock: %d\n", ret);
+ goto eclke;
+ }
+
+ isfr_clk = clk_get(&hdmi_data->pdev->dev, "hdmi_isfr");
+ if (IS_ERR(isfr_clk)) {
+ ret = PTR_ERR(isfr_clk);
+ dev_err(&hdmi_data->pdev->dev,
+ "Unable to get HDMI isfr clk: %d\n", ret);
+ goto eclkg1;
+ }
+
+ ret = clk_prepare_enable(isfr_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot enable HDMI clock: %d\n", ret);
+ goto eclke1;
+ }
+
+ pr_debug("%s isfr_clk:%d\n", __func__,
+ (int)clk_get_rate(isfr_clk));
+
+ iahb_clk = clk_get(&hdmi_data->pdev->dev, "hdmi_iahb");
+ if (IS_ERR(iahb_clk)) {
+ ret = PTR_ERR(iahb_clk);
+ dev_err(&hdmi_data->pdev->dev,
+ "Unable to get HDMI iahb clk: %d\n", ret);
+ goto eclkg2;
+ }
+
+ ret = clk_prepare_enable(iahb_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot enable HDMI clock: %d\n", ret);
+ goto eclke2;
+ }
+
+ hdmi_data->reg_phys_base = res->start;
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ ret = -EBUSY;
+ goto emem;
+ }
+
+ hdmi_data->reg_base = ioremap(res->start, resource_size(res));
+ if (!hdmi_data->reg_base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto eirq;
+ }
+ hdmi_base = hdmi_data->reg_base;
+
+ pr_debug("\n%s hdmi hw base = 0x%08x\n\n", __func__, (int)res->start);
+
+ initialize_hdmi_ih_mutes();
+
+ /* Disable HDMI clocks until video/audio sub-drivers are initialized */
+ clk_disable_unprepare(isfr_clk);
+ clk_disable_unprepare(iahb_clk);
+ clk_disable_unprepare(mipi_core_clk);
+
+ /* Replace platform data coming in with a local struct */
+ platform_set_drvdata(pdev, hdmi_data);
+
+ return ret;
+
+eirq:
+ release_mem_region(res->start, resource_size(res));
+emem:
+ clk_disable_unprepare(iahb_clk);
+eclke2:
+ clk_put(iahb_clk);
+eclkg2:
+ clk_disable_unprepare(isfr_clk);
+eclke1:
+ clk_put(isfr_clk);
+eclkg1:
+ clk_disable_unprepare(mipi_core_clk);
+eclke:
+ clk_put(mipi_core_clk);
+eclkg:
+ return ret;
+}
+
+
+static int __exit mxc_hdmi_core_remove(struct platform_device *pdev)
+{
+ struct mxc_hdmi_data *hdmi_data = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ iounmap(hdmi_data->reg_base);
+ release_mem_region(res->start, resource_size(res));
+
+ return 0;
+}
+
+static const struct of_device_id imx_hdmi_dt_ids[] = {
+ { .compatible = "fsl,imx6q-hdmi-core", },
+ { .compatible = "fsl,imx6dl-hdmi-core", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver mxc_hdmi_core_driver = {
+ .driver = {
+ .name = "mxc_hdmi_core",
+ .of_match_table = imx_hdmi_dt_ids,
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(mxc_hdmi_core_remove),
+};
+
+static int __init mxc_hdmi_core_init(void)
+{
+ return platform_driver_probe(&mxc_hdmi_core_driver,
+ mxc_hdmi_core_probe);
+}
+
+static void __exit mxc_hdmi_core_exit(void)
+{
+ platform_driver_unregister(&mxc_hdmi_core_driver);
+}
+
+subsys_initcall(mxc_hdmi_core_init);
+module_exit(mxc_hdmi_core_exit);
+
+MODULE_DESCRIPTION("Core driver for Freescale i.Mx on-chip HDMI");
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mxc/Kconfig b/drivers/mxc/Kconfig
index d52baf16682b..a81314e8db44 100644
--- a/drivers/mxc/Kconfig
+++ b/drivers/mxc/Kconfig
@@ -4,9 +4,18 @@ if ARCH_MXC
menu "MXC support drivers"
+config MXC_IPU
+ bool "Image Processing Unit Driver"
+ select MXC_IPU_V3
+ help
+ If you plan to use the Image Processing unit, say
+ Y here. IPU is needed by Framebuffer and V4L2 drivers.
+
source "drivers/mxc/mlb/Kconfig"
+source "drivers/mxc/ipu3/Kconfig"
source "drivers/mxc/sim/Kconfig"
source "drivers/mxc/vpu/Kconfig"
+source "drivers/mxc/hdmi-cec/Kconfig"
endmenu
diff --git a/drivers/mxc/Makefile b/drivers/mxc/Makefile
index 060dc06d5f7b..170e681c1c5f 100644
--- a/drivers/mxc/Makefile
+++ b/drivers/mxc/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_MXC_MLB) += mlb/
obj-$(CONFIG_MXC_SIM) += sim/
obj-$(CONFIG_MXC_VPU) += vpu/
+obj-$(CONFIG_MXC_IPU_V3) += ipu3/
+obj-$(CONFIG_MXC_HDMI_CEC) += hdmi-cec/
diff --git a/drivers/mxc/hdmi-cec/Kconfig b/drivers/mxc/hdmi-cec/Kconfig
new file mode 100644
index 000000000000..c0be06854e27
--- /dev/null
+++ b/drivers/mxc/hdmi-cec/Kconfig
@@ -0,0 +1,11 @@
+
+menu "MXC HDMI CEC (Consumer Electronics Control) support"
+
+config MXC_HDMI_CEC
+ tristate "Support for MXC HDMI CEC (Consumer Electronics Control)"
+ depends on MFD_MXC_HDMI
+ depends on FB_MXC_HDMI
+ help
+ The HDMI CEC device implement low level protocol on i.MX6x platforms.
+
+endmenu
diff --git a/drivers/mxc/hdmi-cec/Makefile b/drivers/mxc/hdmi-cec/Makefile
new file mode 100644
index 000000000000..07248b25b033
--- /dev/null
+++ b/drivers/mxc/hdmi-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MXC_HDMI_CEC) += mxc_hdmi-cec.o
diff --git a/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c b/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c
new file mode 100644
index 000000000000..b89139ff666f
--- /dev/null
+++ b/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright (C) 2012-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file mxc_hdmi-cec.c
+ *
+ * @brief HDMI CEC system initialization and file operation implementation
+ *
+ * @ingroup HDMI
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/fsl_devices.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/sizes.h>
+
+#include <linux/console.h>
+#include <linux/types.h>
+#include <linux/mfd/mxc-hdmi-core.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <video/mxc_hdmi.h>
+
+#include "mxc_hdmi-cec.h"
+
+
+#define MAX_MESSAGE_LEN 17
+
+#define MESSAGE_TYPE_RECEIVE_SUCCESS 1
+#define MESSAGE_TYPE_NOACK 2
+#define MESSAGE_TYPE_DISCONNECTED 3
+#define MESSAGE_TYPE_CONNECTED 4
+#define MESSAGE_TYPE_SEND_SUCCESS 5
+
+
+struct hdmi_cec_priv {
+ int receive_error;
+ int send_error;
+ u8 Logical_address;
+ bool cec_state;
+ u8 last_msg[MAX_MESSAGE_LEN];
+ u8 msg_len;
+ u8 latest_cec_stat;
+ spinlock_t irq_lock;
+ struct delayed_work hdmi_cec_work;
+ struct mutex lock;
+};
+
+struct hdmi_cec_event {
+ int event_type;
+ int msg_len;
+ u8 msg[MAX_MESSAGE_LEN];
+ struct list_head list;
+};
+
+static LIST_HEAD(head);
+
+static int hdmi_cec_major;
+static struct class *hdmi_cec_class;
+static struct hdmi_cec_priv hdmi_cec_data;
+static u8 open_count;
+
+static wait_queue_head_t hdmi_cec_queue;
+static irqreturn_t mxc_hdmi_cec_isr(int irq, void *data)
+{
+ struct hdmi_cec_priv *hdmi_cec = data;
+ u8 cec_stat = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi_cec->irq_lock, flags);
+
+ hdmi_writeb(0x7f, HDMI_IH_MUTE_CEC_STAT0);
+
+ cec_stat = hdmi_readb(HDMI_IH_CEC_STAT0);
+ hdmi_writeb(cec_stat, HDMI_IH_CEC_STAT0);
+
+ if ((cec_stat & (HDMI_IH_CEC_STAT0_ERROR_INIT | \
+ HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_EOM | \
+ HDMI_IH_CEC_STAT0_DONE)) == 0) {
+ spin_unlock_irqrestore(&hdmi_cec->irq_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ pr_debug("HDMI CEC interrupt received\n");
+ hdmi_cec->latest_cec_stat = cec_stat;
+
+ schedule_delayed_work(&(hdmi_cec->hdmi_cec_work), msecs_to_jiffies(20));
+
+ spin_unlock_irqrestore(&hdmi_cec->irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+void mxc_hdmi_cec_handle(u16 cec_stat)
+{
+ u8 val = 0, i = 0;
+ struct hdmi_cec_event *event = NULL;
+
+ /* The current transmission is successful (for initiator only). */
+ if (!open_count)
+ return;
+
+ if (cec_stat & HDMI_IH_CEC_STAT0_DONE) {
+
+ event = vmalloc(sizeof(struct hdmi_cec_event));
+ if (NULL == event) {
+ pr_err("%s: Not enough memory!\n", __func__);
+ return;
+ }
+
+ memset(event, 0, sizeof(struct hdmi_cec_event));
+ event->event_type = MESSAGE_TYPE_SEND_SUCCESS;
+
+ mutex_lock(&hdmi_cec_data.lock);
+ list_add_tail(&event->list, &head);
+ mutex_unlock(&hdmi_cec_data.lock);
+
+ wake_up(&hdmi_cec_queue);
+ }
+
+ /* EOM is detected so that the received data is ready
+ * in the receiver data buffer
+ */
+ if (cec_stat & HDMI_IH_CEC_STAT0_EOM) {
+
+ hdmi_writeb(0x02, HDMI_IH_CEC_STAT0);
+
+ event = vmalloc(sizeof(struct hdmi_cec_event));
+ if (NULL == event) {
+ pr_err("%s: Not enough memory!\n", __func__);
+ return;
+ }
+ memset(event, 0, sizeof(struct hdmi_cec_event));
+
+ event->msg_len = hdmi_readb(HDMI_CEC_RX_CNT);
+ if (!event->msg_len) {
+ pr_err("%s: Invalid CEC message length!\n", __func__);
+ return;
+ }
+ event->event_type = MESSAGE_TYPE_RECEIVE_SUCCESS;
+
+ for (i = 0; i < event->msg_len; i++)
+ event->msg[i] = hdmi_readb(HDMI_CEC_RX_DATA0+i);
+ hdmi_writeb(0x0, HDMI_CEC_LOCK);
+
+ mutex_lock(&hdmi_cec_data.lock);
+ list_add_tail(&event->list, &head);
+ mutex_unlock(&hdmi_cec_data.lock);
+
+ wake_up(&hdmi_cec_queue);
+ }
+
+ /* An error is detected on cec line (for initiator only). */
+ if (cec_stat & HDMI_IH_CEC_STAT0_ERROR_INIT) {
+
+ mutex_lock(&hdmi_cec_data.lock);
+ hdmi_cec_data.send_error++;
+ if (hdmi_cec_data.send_error > 5) {
+ pr_err("%s:Re-transmission is attempted more than 5 times!\n",
+ __func__);
+ hdmi_cec_data.send_error = 0;
+ mutex_unlock(&hdmi_cec_data.lock);
+ return;
+ }
+
+ for (i = 0; i < hdmi_cec_data.msg_len; i++) {
+ hdmi_writeb(hdmi_cec_data.last_msg[i],
+ HDMI_CEC_TX_DATA0 + i);
+ }
+ hdmi_writeb(hdmi_cec_data.msg_len, HDMI_CEC_TX_CNT);
+
+ val = hdmi_readb(HDMI_CEC_CTRL);
+ val |= 0x01;
+ hdmi_writeb(val, HDMI_CEC_CTRL);
+ mutex_unlock(&hdmi_cec_data.lock);
+ }
+
+ /* A frame is not acknowledged in a directly addressed message.
+ * Or a frame is negatively acknowledged in
+ * a broadcast message (for initiator only).
+ */
+ if (cec_stat & HDMI_IH_CEC_STAT0_NACK) {
+ event = vmalloc(sizeof(struct hdmi_cec_event));
+ if (NULL == event) {
+ pr_err("%s: Not enough memory\n", __func__);
+ return;
+ }
+ memset(event, 0, sizeof(struct hdmi_cec_event));
+ event->event_type = MESSAGE_TYPE_NOACK;
+
+ mutex_lock(&hdmi_cec_data.lock);
+ list_add_tail(&event->list, &head);
+ mutex_unlock(&hdmi_cec_data.lock);
+
+ wake_up(&hdmi_cec_queue);
+ }
+
+ /* An error is notified by a follower.
+ * Abnormal logic data bit error (for follower).
+ */
+ if (cec_stat & HDMI_IH_CEC_STAT0_ERROR_FOLL) {
+ hdmi_cec_data.receive_error++;
+ }
+
+ /* HDMI cable connected */
+ if (cec_stat & 0x80) {
+ event = vmalloc(sizeof(struct hdmi_cec_event));
+ if (NULL == event) {
+ pr_err("%s: Not enough memory\n", __func__);
+ return;
+ }
+ memset(event, 0, sizeof(struct hdmi_cec_event));
+ event->event_type = MESSAGE_TYPE_CONNECTED;
+
+ mutex_lock(&hdmi_cec_data.lock);
+ list_add_tail(&event->list, &head);
+ mutex_unlock(&hdmi_cec_data.lock);
+
+ wake_up(&hdmi_cec_queue);
+ }
+
+ /* HDMI cable disconnected */
+ if (cec_stat & 0x100) {
+ event = vmalloc(sizeof(struct hdmi_cec_event));
+ if (NULL == event) {
+ pr_err("%s: Not enough memory!\n", __func__);
+ return;
+ }
+ memset(event, 0, sizeof(struct hdmi_cec_event));
+ event->event_type = MESSAGE_TYPE_DISCONNECTED;
+
+ mutex_lock(&hdmi_cec_data.lock);
+ list_add_tail(&event->list, &head);
+ mutex_unlock(&hdmi_cec_data.lock);
+
+ wake_up(&hdmi_cec_queue);
+ }
+
+ return;
+}
+EXPORT_SYMBOL(mxc_hdmi_cec_handle);
+
+static void mxc_hdmi_cec_worker(struct work_struct *work)
+{
+ u8 val;
+
+ mxc_hdmi_cec_handle(hdmi_cec_data.latest_cec_stat);
+ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL |
+ HDMI_IH_CEC_STAT0_ARB_LOST;
+ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
+}
+
+/*!
+ * @brief open function for vpu file operation
+ *
+ * @return 0 on success or negative error code on error
+ */
+static int hdmi_cec_open(struct inode *inode, struct file *filp)
+{
+ mutex_lock(&hdmi_cec_data.lock);
+ if (open_count) {
+ mutex_unlock(&hdmi_cec_data.lock);
+ return -EBUSY;
+ }
+
+ open_count = 1;
+ filp->private_data = (void *)(&hdmi_cec_data);
+ hdmi_cec_data.Logical_address = 15;
+ hdmi_cec_data.cec_state = false;
+ mutex_unlock(&hdmi_cec_data.lock);
+
+ return 0;
+}
+
+static ssize_t hdmi_cec_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct hdmi_cec_event *event = NULL;
+
+ pr_debug("function : %s\n", __func__);
+ if (!open_count)
+ return -ENODEV;
+
+ mutex_lock(&hdmi_cec_data.lock);
+ if (false == hdmi_cec_data.cec_state) {
+ mutex_unlock(&hdmi_cec_data.lock);
+ return -EACCES;
+ }
+ mutex_unlock(&hdmi_cec_data.lock);
+
+ /* delete from list */
+ mutex_lock(&hdmi_cec_data.lock);
+ if (list_empty(&head)) {
+ mutex_unlock(&hdmi_cec_data.lock);
+ return -EACCES;
+ }
+ event = list_first_entry(&head, struct hdmi_cec_event, list);
+ list_del(&event->list);
+ mutex_unlock(&hdmi_cec_data.lock);
+
+ if (copy_to_user(buf, event,
+ sizeof(struct hdmi_cec_event) - sizeof(struct list_head))) {
+ vfree(event);
+ return -EFAULT;
+ }
+ vfree(event);
+
+ return sizeof(struct hdmi_cec_event);
+}
+
+static ssize_t hdmi_cec_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret = 0 , i = 0;
+ u8 msg[MAX_MESSAGE_LEN];
+ u8 msg_len = 0, val = 0;
+
+ pr_debug("function : %s\n", __func__);
+ if (!open_count)
+ return -ENODEV;
+
+ mutex_lock(&hdmi_cec_data.lock);
+ if (false == hdmi_cec_data.cec_state) {
+ mutex_unlock(&hdmi_cec_data.lock);
+ return -EACCES;
+ }
+ mutex_unlock(&hdmi_cec_data.lock);
+
+ if (count > MAX_MESSAGE_LEN)
+ return -EINVAL;
+
+ mutex_lock(&hdmi_cec_data.lock);
+ hdmi_cec_data.send_error = 0;
+ memset(&msg, 0, MAX_MESSAGE_LEN);
+ ret = copy_from_user(&msg, buf, count);
+ if (ret) {
+ ret = -EACCES;
+ goto end;
+ }
+
+ msg_len = count;
+ hdmi_writeb(msg_len, HDMI_CEC_TX_CNT);
+ for (i = 0; i < msg_len; i++) {
+ hdmi_writeb(msg[i], HDMI_CEC_TX_DATA0+i);
+ }
+
+ val = hdmi_readb(HDMI_CEC_CTRL);
+ val |= 0x01;
+ hdmi_writeb(val, HDMI_CEC_CTRL);
+ memcpy(hdmi_cec_data.last_msg, msg, msg_len);
+ hdmi_cec_data.msg_len = msg_len;
+
+ i = 0;
+ val = hdmi_readb(HDMI_CEC_CTRL);
+ while ((val & 0x01) == 0x1) {
+ msleep(50);
+ i++;
+ if (i > 3) {
+ ret = -EIO;
+ goto end;
+ }
+ val = hdmi_readb(HDMI_CEC_CTRL);
+ }
+
+end:
+ mutex_unlock(&hdmi_cec_data.lock);
+
+ return ret;
+}
+
+/*!
+ * @brief IO ctrl function for vpu file operation
+ * @param cmd IO ctrl command
+ * @return 0 on success or negative error code on error
+ */
+static long hdmi_cec_ioctl(struct file *filp, u_int cmd,
+ u_long arg)
+{
+ int ret = 0, status = 0;
+ u8 val = 0, msg = 0;
+ struct mxc_edid_cfg hdmi_edid_cfg;
+
+ pr_debug("function : %s\n", __func__);
+ if (!open_count)
+ return -ENODEV;
+
+ switch (cmd) {
+ case HDMICEC_IOC_SETLOGICALADDRESS:
+ mutex_lock(&hdmi_cec_data.lock);
+ if (false == hdmi_cec_data.cec_state) {
+ mutex_unlock(&hdmi_cec_data.lock);
+ return -EACCES;
+ }
+
+ hdmi_cec_data.Logical_address = (u8)arg;
+
+ if (hdmi_cec_data.Logical_address <= 7) {
+ val = 1 << hdmi_cec_data.Logical_address;
+ hdmi_writeb(val, HDMI_CEC_ADDR_L);
+ hdmi_writeb(0, HDMI_CEC_ADDR_H);
+ } else if (hdmi_cec_data.Logical_address > 7 &&
+ hdmi_cec_data.Logical_address <= 15) {
+ val = 1 << (hdmi_cec_data.Logical_address - 8);
+ hdmi_writeb(val, HDMI_CEC_ADDR_H);
+ hdmi_writeb(0, HDMI_CEC_ADDR_L);
+ } else {
+ ret = -EINVAL;
+ }
+
+ /* Send Polling message with same source
+ * and destination address
+ */
+ if (0 == ret && 15 != hdmi_cec_data.Logical_address) {
+ msg = (hdmi_cec_data.Logical_address << 4) |
+ hdmi_cec_data.Logical_address;
+ hdmi_writeb(1, HDMI_CEC_TX_CNT);
+ hdmi_writeb(msg, HDMI_CEC_TX_DATA0);
+
+ val = hdmi_readb(HDMI_CEC_CTRL);
+ val |= 0x01;
+ hdmi_writeb(val, HDMI_CEC_CTRL);
+ }
+
+ mutex_unlock(&hdmi_cec_data.lock);
+ break;
+
+ case HDMICEC_IOC_STARTDEVICE:
+ val = hdmi_readb(HDMI_MC_CLKDIS);
+ val &= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
+ hdmi_writeb(val, HDMI_MC_CLKDIS);
+
+ hdmi_writeb(0x02, HDMI_CEC_CTRL);
+
+ val = HDMI_IH_CEC_STAT0_ERROR_INIT | HDMI_IH_CEC_STAT0_NACK |
+ HDMI_IH_CEC_STAT0_EOM | HDMI_IH_CEC_STAT0_DONE;
+ hdmi_writeb(val, HDMI_CEC_POLARITY);
+
+ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL |
+ HDMI_IH_CEC_STAT0_ARB_LOST;
+ hdmi_writeb(val, HDMI_CEC_MASK);
+ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
+
+ mutex_lock(&hdmi_cec_data.lock);
+ hdmi_cec_data.cec_state = true;
+ mutex_unlock(&hdmi_cec_data.lock);
+ break;
+
+ case HDMICEC_IOC_STOPDEVICE:
+ hdmi_writeb(0x10, HDMI_CEC_CTRL);
+
+ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL |
+ HDMI_IH_CEC_STAT0_ERROR_INIT | HDMI_IH_CEC_STAT0_ARB_LOST |
+ HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_EOM |
+ HDMI_IH_CEC_STAT0_DONE;
+ hdmi_writeb(val, HDMI_CEC_MASK);
+ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
+
+ hdmi_writeb(0x0, HDMI_CEC_POLARITY);
+
+ val = hdmi_readb(HDMI_MC_CLKDIS);
+ val |= HDMI_MC_CLKDIS_CECCLK_DISABLE;
+ hdmi_writeb(val, HDMI_MC_CLKDIS);
+
+ mutex_lock(&hdmi_cec_data.lock);
+ hdmi_cec_data.cec_state = false;
+ mutex_unlock(&hdmi_cec_data.lock);
+ break;
+
+ case HDMICEC_IOC_GETPHYADDRESS:
+ hdmi_get_edid_cfg(&hdmi_edid_cfg);
+ status = copy_to_user((void __user *)arg,
+ &hdmi_edid_cfg.physical_address,
+ 4*sizeof(u8));
+ if (status)
+ ret = -EFAULT;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*!
+* @brief Release function for vpu file operation
+* @return 0 on success or negative error code on error
+*/
+static int hdmi_cec_release(struct inode *inode, struct file *filp)
+{
+ mutex_lock(&hdmi_cec_data.lock);
+
+ if (open_count) {
+ open_count = 0;
+ hdmi_cec_data.cec_state = false;
+ hdmi_cec_data.Logical_address = 15;
+ }
+
+ mutex_unlock(&hdmi_cec_data.lock);
+
+ return 0;
+}
+
+static unsigned int hdmi_cec_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+
+ pr_debug("function : %s\n", __func__);
+
+ if (!open_count)
+ return -ENODEV;
+
+ if (false == hdmi_cec_data.cec_state)
+ return -EACCES;
+
+ poll_wait(file, &hdmi_cec_queue, wait);
+
+ if (!list_empty(&head))
+ mask |= (POLLIN | POLLRDNORM);
+
+ return mask;
+}
+
+const struct file_operations hdmi_cec_fops = {
+ .owner = THIS_MODULE,
+ .read = hdmi_cec_read,
+ .write = hdmi_cec_write,
+ .open = hdmi_cec_open,
+ .unlocked_ioctl = hdmi_cec_ioctl,
+ .release = hdmi_cec_release,
+ .poll = hdmi_cec_poll,
+};
+
+static int hdmi_cec_dev_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct device *temp_class;
+ struct resource *res;
+ struct pinctrl *pinctrl;
+ int irq = platform_get_irq(pdev, 0);
+
+ hdmi_cec_major = register_chrdev(hdmi_cec_major,
+ "mxc_hdmi_cec", &hdmi_cec_fops);
+ if (hdmi_cec_major < 0) {
+ dev_err(&pdev->dev, "hdmi_cec: unable to get a major for HDMI CEC\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (unlikely(res == NULL)) {
+ dev_err(&pdev->dev, "hdmi_cec:No HDMI irq line provided\n");
+ goto err_out_chrdev;
+ }
+
+ spin_lock_init(&hdmi_cec_data.irq_lock);
+
+ err = devm_request_irq(&pdev->dev, irq, mxc_hdmi_cec_isr, IRQF_SHARED,
+ dev_name(&pdev->dev), &hdmi_cec_data);
+ if (err < 0) {
+ dev_err(&pdev->dev, "hdmi_cec:Unable to request irq: %d\n", err);
+ goto err_out_chrdev;
+ }
+
+ hdmi_cec_class = class_create(THIS_MODULE, "mxc_hdmi_cec");
+ if (IS_ERR(hdmi_cec_class)) {
+ err = PTR_ERR(hdmi_cec_class);
+ goto err_out_chrdev;
+ }
+
+ temp_class = device_create(hdmi_cec_class, NULL,
+ MKDEV(hdmi_cec_major, 0), NULL, "mxc_hdmi_cec");
+ if (IS_ERR(temp_class)) {
+ err = PTR_ERR(temp_class);
+ goto err_out_class;
+ }
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ dev_err(&pdev->dev, "can't get/select CEC pinctrl\n");
+ goto err_out_class;
+ }
+
+ init_waitqueue_head(&hdmi_cec_queue);
+
+ INIT_LIST_HEAD(&head);
+
+ mutex_init(&hdmi_cec_data.lock);
+
+ hdmi_cec_data.Logical_address = 15;
+
+ platform_set_drvdata(pdev, &hdmi_cec_data);
+
+ INIT_DELAYED_WORK(&hdmi_cec_data.hdmi_cec_work, mxc_hdmi_cec_worker);
+
+ dev_info(&pdev->dev, "HDMI CEC initialized\n");
+ goto out;
+
+err_out_class:
+ device_destroy(hdmi_cec_class, MKDEV(hdmi_cec_major, 0));
+ class_destroy(hdmi_cec_class);
+err_out_chrdev:
+ unregister_chrdev(hdmi_cec_major, "mxc_hdmi_cec");
+out:
+ return err;
+}
+
+static int hdmi_cec_dev_remove(struct platform_device *pdev)
+{
+ if (hdmi_cec_major > 0) {
+ device_destroy(hdmi_cec_class, MKDEV(hdmi_cec_major, 0));
+ class_destroy(hdmi_cec_class);
+ unregister_chrdev(hdmi_cec_major, "mxc_hdmi_cec");
+ hdmi_cec_major = 0;
+ }
+ return 0;
+}
+
+static const struct of_device_id imx_hdmi_cec_match[] = {
+ { .compatible = "fsl,imx6q-hdmi-cec", },
+ { .compatible = "fsl,imx6dl-hdmi-cec", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver mxc_hdmi_cec_driver = {
+ .probe = hdmi_cec_dev_probe,
+ .remove = hdmi_cec_dev_remove,
+ .driver = {
+ .name = "mxc_hdmi_cec",
+ .of_match_table = imx_hdmi_cec_match,
+ },
+};
+
+module_platform_driver(mxc_hdmi_cec_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Linux HDMI CEC driver for Freescale i.MX/MXC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxc_hdmi_cec");
+
diff --git a/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h b/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h
new file mode 100644
index 000000000000..5c402af4d0c3
--- /dev/null
+++ b/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2005-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#ifndef _HDMICEC_H_
+#define _HDMICEC_H_
+#include <linux/ioctl.h>
+
+/*
+ * Ioctl definitions
+ */
+
+/* Use 'k' as magic number */
+#define HDMICEC_IOC_MAGIC 'H'
+/*
+ * S means "Set" through a ptr,
+ * T means "Tell" directly with the argument value
+ * G means "Get": reply by setting through a pointer
+ * Q means "Query": response is on the return value
+ * X means "eXchange": G and S atomically
+ * H means "sHift": T and Q atomically
+ */
+#define HDMICEC_IOC_SETLOGICALADDRESS \
+ _IOW(HDMICEC_IOC_MAGIC, 1, unsigned char)
+#define HDMICEC_IOC_STARTDEVICE _IO(HDMICEC_IOC_MAGIC, 2)
+#define HDMICEC_IOC_STOPDEVICE _IO(HDMICEC_IOC_MAGIC, 3)
+#define HDMICEC_IOC_GETPHYADDRESS \
+ _IOR(HDMICEC_IOC_MAGIC, 4, unsigned char[4])
+
+#endif /* !_HDMICEC_H_ */
diff --git a/drivers/mxc/ipu3/Kconfig b/drivers/mxc/ipu3/Kconfig
new file mode 100644
index 000000000000..c702efed2b55
--- /dev/null
+++ b/drivers/mxc/ipu3/Kconfig
@@ -0,0 +1,21 @@
+config MXC_IPU_V3
+ bool
+
+config MXC_IPU_V3_PRG
+ tristate "i.MX IPUv3 prefetch gasket engine"
+ depends on MXC_IPU_V3 && MXC_IPU_V3_PRE
+ help
+ This enables support for the IPUv3 prefetch gasket engine to
+ support double buffer handshake control bewteen IPUv3 and
+ prefetch engine(PRE), snoop the AXI interface for display
+ refresh requests to memory and modify the request address to
+ fetch the double buffered row of blocks in OCRAM.
+
+config MXC_IPU_V3_PRE
+ tristate "i.MX IPUv3 prefetch engine"
+ depends on MXC_IPU_V3
+ select MXC_IPU_V3_PRG
+ help
+ This enables support for the IPUv3 prefetch engine to improve
+ the system memory performance. The engine has the capability
+ to resolve framebuffers in tile pixel format to linear.
diff --git a/drivers/mxc/ipu3/Makefile b/drivers/mxc/ipu3/Makefile
new file mode 100644
index 000000000000..d48f11244073
--- /dev/null
+++ b/drivers/mxc/ipu3/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MXC_IPU_V3) = mxc_ipu.o
+
+obj-$(CONFIG_MXC_IPU_V3_PRG) += prg.o
+obj-$(CONFIG_MXC_IPU_V3_PRE) += pre.o
+
+mxc_ipu-objs := ipu_common.o ipu_ic.o ipu_disp.o ipu_capture.o ipu_device.o \
+ ipu_calc_stripes_sizes.o vdoa.o ipu_pixel_clk.o
diff --git a/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c b/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c
new file mode 100644
index 000000000000..512404d4101c
--- /dev/null
+++ b/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2009-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*
+ * @file ipu_calc_stripes_sizes.c
+ *
+ * @brief IPU IC functions
+ *
+ * @ingroup IPU
+ */
+
+#include <linux/ipu-v3.h>
+#include <linux/module.h>
+#include <linux/math64.h>
+
+#define BPP_32 0
+#define BPP_16 3
+#define BPP_8 5
+#define BPP_24 1
+#define BPP_12 4
+#define BPP_18 2
+
+static u32 truncate(u32 up, /* 0: down; else: up */
+ u64 a, /* must be non-negative */
+ u32 b)
+{
+ u32 d;
+ u64 div;
+ div = div_u64(a, b);
+ d = b * (div >> 32);
+ if (up && (a > (((u64)d) << 32)))
+ return d+b;
+ else
+ return d;
+}
+
+static unsigned int f_calc(unsigned int pfs, unsigned int bpp, unsigned int *write)
+{/* return input_f */
+ unsigned int f_calculated = 0;
+ switch (pfs) {
+ case IPU_PIX_FMT_YVU422P:
+ case IPU_PIX_FMT_YUV422P:
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YUV420P:
+ case IPU_PIX_FMT_YVU420P:
+ case IPU_PIX_FMT_YUV444P:
+ f_calculated = 16;
+ break;
+
+ case IPU_PIX_FMT_RGB565:
+ case IPU_PIX_FMT_YUYV:
+ case IPU_PIX_FMT_UYVY:
+ f_calculated = 8;
+ break;
+
+ case IPU_PIX_FMT_NV12:
+ f_calculated = 8;
+ break;
+
+ default:
+ f_calculated = 0;
+ break;
+
+ }
+ if (!f_calculated) {
+ switch (bpp) {
+ case BPP_32:
+ f_calculated = 2;
+ break;
+
+ case BPP_16:
+ f_calculated = 4;
+ break;
+
+ case BPP_8:
+ case BPP_24:
+ f_calculated = 8;
+ break;
+
+ case BPP_12:
+ f_calculated = 16;
+ break;
+
+ case BPP_18:
+ f_calculated = 32;
+ break;
+
+ default:
+ f_calculated = 0;
+ break;
+ }
+ }
+ return f_calculated;
+}
+
+
+static unsigned int m_calc(unsigned int pfs)
+{
+ unsigned int m_calculated = 0;
+ switch (pfs) {
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YUV420P:
+ case IPU_PIX_FMT_YVU422P:
+ case IPU_PIX_FMT_YUV422P:
+ case IPU_PIX_FMT_YVU420P:
+ case IPU_PIX_FMT_YUV444P:
+ m_calculated = 16;
+ break;
+
+ case IPU_PIX_FMT_NV12:
+ case IPU_PIX_FMT_YUYV:
+ case IPU_PIX_FMT_UYVY:
+ m_calculated = 8;
+ break;
+
+ default:
+ m_calculated = 8;
+ break;
+
+ }
+ return m_calculated;
+}
+
+static int calc_split_resize_coeffs(unsigned int inSize, unsigned int outSize,
+ unsigned int *resizeCoeff,
+ unsigned int *downsizeCoeff)
+{
+ uint32_t tempSize;
+ uint32_t tempDownsize;
+
+ if (inSize > 4096) {
+ pr_debug("IC input size(%d) cannot exceed 4096\n",
+ inSize);
+ return -EINVAL;
+ }
+
+ if (outSize > 1024) {
+ pr_debug("IC output size(%d) cannot exceed 1024\n",
+ outSize);
+ return -EINVAL;
+ }
+
+ if ((outSize << 3) < inSize) {
+ pr_debug("IC cannot downsize more than 8:1\n");
+ return -EINVAL;
+ }
+
+ /* Compute downsizing coefficient */
+ /* Output of downsizing unit cannot be more than 1024 */
+ tempDownsize = 0;
+ tempSize = inSize;
+ while (((tempSize > 1024) || (tempSize >= outSize * 2)) &&
+ (tempDownsize < 2)) {
+ tempSize >>= 1;
+ tempDownsize++;
+ }
+ *downsizeCoeff = tempDownsize;
+
+ /* compute resizing coefficient using the following equation:
+ resizeCoeff = M*(SI -1)/(SO - 1)
+ where M = 2^13, SI - input size, SO - output size */
+ *resizeCoeff = (8192L * (tempSize - 1)) / (outSize - 1);
+ if (*resizeCoeff >= 16384L) {
+ pr_debug("Overflow on IC resize coefficient.\n");
+ return -EINVAL;
+ }
+
+ pr_debug("resizing from %u -> %u pixels, "
+ "downsize=%u, resize=%u.%lu (reg=%u)\n", inSize, outSize,
+ *downsizeCoeff, (*resizeCoeff >= 8192L) ? 1 : 0,
+ ((*resizeCoeff & 0x1FFF) * 10000L) / 8192L, *resizeCoeff);
+
+ return 0;
+}
+
+/* Stripe parameters calculator */
+/**************************************************************************
+Notes:
+MSW = the maximal width allowed for a stripe
+ i.MX31: 720, i.MX35: 800, i.MX37/51/53: 1024
+cirr = the maximal inverse resizing ratio for which overlap in the input
+ is requested; typically cirr~2
+flags
+ bit 0 - equal_stripes
+ 0 each stripe is allowed to have independent parameters
+ for maximal image quality
+ 1 the stripes are requested to have identical parameters
+ (except the base address), for maximal performance
+ bit 1 - vertical/horizontal
+ 0 horizontal
+ 1 vertical
+
+If performance is the top priority (above image quality)
+ Avoid overlap, by setting CIRR = 0
+ This will also force effectively identical_stripes = 1
+ Choose IF & OF that corresponds to the same IOX/SX for both stripes
+ Choose IFW & OFW such that
+ IFW/IM, IFW/IF, OFW/OM, OFW/OF are even integers
+ The function returns an error status:
+ 0: no error
+ 1: invalid input parameters -> aborted without result
+ Valid parameters should satisfy the following conditions
+ IFW <= OFW, otherwise downsizing is required
+ - which is not supported yet
+ 4 <= IFW,OFW, so some interpolation may be needed even without overlap
+ IM, OM, IF, OF should not vanish
+ 2*IF <= IFW
+ so the frame can be split to two equal stripes, even without overlap
+ 2*(OF+IF/irr_opt) <= OFW
+ so a valid positive INW exists even for equal stripes
+ OF <= MSW, otherwise, the left stripe cannot be sufficiently large
+ MSW < OFW, so splitting to stripes is required
+ OFW <= 2*MSW, so two stripes are sufficient
+ (this also implies that 2<=MSW)
+ 2: OF is not a multiple of OM - not fully-supported yet
+ Output is produced but OW is not guaranited to be a multiple of OM
+ 4: OFW reduced to be a multiple of OM
+ 8: CIRR > 1: truncated to 1
+ Overlap is not supported (and not needed) y for upsizing)
+**************************************************************************/
+int ipu_calc_stripes_sizes(const unsigned int input_frame_width,
+ /* input frame width;>1 */
+ unsigned int output_frame_width, /* output frame width; >1 */
+ const unsigned int maximal_stripe_width,
+ /* the maximal width allowed for a stripe */
+ const unsigned long long cirr, /* see above */
+ const unsigned int flags, /* see above */
+ u32 input_pixelformat,/* pixel format after of read channel*/
+ u32 output_pixelformat,/* pixel format after of write channel*/
+ struct stripe_param *left,
+ struct stripe_param *right)
+{
+ const unsigned int irr_frac_bits = 13;
+ const unsigned long irr_steps = 1 << irr_frac_bits;
+ const u64 dirr = ((u64)1) << (32 - 2);
+ /* The maximum relative difference allowed between the irrs */
+ const u64 cr = ((u64)4) << 32;
+ /* The importance ratio between the two terms in the cost function below */
+
+ unsigned int status;
+ unsigned int temp;
+ unsigned int onw_min;
+ unsigned int inw = 0, onw = 0, inw_best = 0;
+ /* number of pixels in the left stripe NOT hidden by the right stripe */
+ u64 irr_opt; /* the optimal inverse resizing ratio */
+ u64 rr_opt; /* the optimal resizing ratio = 1/irr_opt*/
+ u64 dinw; /* the misalignment between the stripes */
+ /* (measured in units of input columns) */
+ u64 difwl, difwr = 0;
+ /* The number of input columns not reflected in the output */
+ /* the resizing ratio used for the right stripe is */
+ /* left->irr and right->irr respectively */
+ u64 cost, cost_min;
+ u64 div; /* result of division */
+ bool equal_stripes = (flags & 0x1) != 0;
+ bool vertical = (flags & 0x2) != 0;
+
+ unsigned int input_m, input_f, output_m, output_f; /* parameters for upsizing by stripes */
+ unsigned int resize_coeff;
+ unsigned int downsize_coeff;
+
+ status = 0;
+
+ if (vertical) {
+ input_f = 2;
+ input_m = 8;
+ output_f = 8;
+ output_m = 2;
+ } else {
+ input_f = f_calc(input_pixelformat, 0, NULL);
+ input_m = m_calc(input_pixelformat);
+ output_f = input_m;
+ output_m = m_calc(output_pixelformat);
+ }
+ if ((input_frame_width < 4) || (output_frame_width < 4))
+ return 1;
+
+ irr_opt = div_u64((((u64)(input_frame_width - 1)) << 32),
+ (output_frame_width - 1));
+ rr_opt = div_u64((((u64)(output_frame_width - 1)) << 32),
+ (input_frame_width - 1));
+
+ if ((input_m == 0) || (output_m == 0) || (input_f == 0) || (output_f == 0)
+ || (input_frame_width < (2 * input_f))
+ || ((((u64)output_frame_width) << 32) <
+ (2 * ((((u64)output_f) << 32) + (input_f * rr_opt))))
+ || (maximal_stripe_width < output_f)
+ || ((output_frame_width <= maximal_stripe_width)
+ && (equal_stripes == 0))
+ || ((2 * maximal_stripe_width) < output_frame_width))
+ return 1;
+
+ if (output_f % output_m)
+ status += 2;
+
+ temp = truncate(0, (((u64)output_frame_width) << 32), output_m);
+ if (temp < output_frame_width) {
+ output_frame_width = temp;
+ status += 4;
+ }
+
+ pr_debug("---------------->\n"
+ "if = %d\n"
+ "im = %d\n"
+ "of = %d\n"
+ "om = %d\n"
+ "irr_opt = %llu\n"
+ "rr_opt = %llu\n"
+ "cirr = %llu\n"
+ "pixel in = %08x\n"
+ "pixel out = %08x\n"
+ "ifw = %d\n"
+ "ofwidth = %d\n",
+ input_f,
+ input_m,
+ output_f,
+ output_m,
+ irr_opt,
+ rr_opt,
+ cirr,
+ input_pixelformat,
+ output_pixelformat,
+ input_frame_width,
+ output_frame_width
+ );
+
+ if (equal_stripes) {
+ if ((irr_opt > cirr) /* overlap in the input is not requested */
+ && ((input_frame_width % (input_m << 1)) == 0)
+ && ((input_frame_width % (input_f << 1)) == 0)
+ && ((output_frame_width % (output_m << 1)) == 0)
+ && ((output_frame_width % (output_f << 1)) == 0)) {
+ /* without overlap */
+ left->input_width = right->input_width = right->input_column =
+ input_frame_width >> 1;
+ left->output_width = right->output_width = right->output_column =
+ output_frame_width >> 1;
+ left->input_column = 0;
+ left->output_column = 0;
+ div = div_u64(((((u64)irr_steps) << 32) *
+ (right->input_width - 1)), (right->output_width - 1));
+ left->irr = right->irr = truncate(0, div, 1);
+ } else { /* with overlap */
+ onw = truncate(0, (((u64)output_frame_width - 1) << 32) >> 1,
+ output_f);
+ inw = truncate(0, onw * irr_opt, input_f);
+ /* this is the maximal inw which allows the same resizing ratio */
+ /* in both stripes */
+ onw = truncate(1, (inw * rr_opt), output_f);
+ div = div_u64((((u64)(irr_steps * inw)) <<
+ 32), onw);
+ left->irr = right->irr = truncate(0, div, 1);
+ left->output_width = right->output_width =
+ output_frame_width - onw;
+ /* These are valid assignments for output_width, */
+ /* assuming output_f is a multiple of output_m */
+ div = (((u64)(left->output_width-1) * (left->irr)) << 32);
+ div = (((u64)1) << 32) + div_u64(div, irr_steps);
+
+ left->input_width = right->input_width = truncate(1, div, input_m);
+
+ div = div_u64((((u64)((right->output_width - 1) * right->irr)) <<
+ 32), irr_steps);
+ difwr = (((u64)(input_frame_width - 1 - inw)) << 32) - div;
+ div = div_u64((difwr + (((u64)input_f) << 32)), 2);
+ left->input_column = truncate(0, div, input_f);
+
+
+ /* This splits the truncated input columns evenly */
+ /* between the left and right margins */
+ right->input_column = left->input_column + inw;
+ left->output_column = 0;
+ right->output_column = onw;
+ }
+ if (left->input_width > left->output_width) {
+ if (calc_split_resize_coeffs(left->input_width,
+ left->output_width,
+ &resize_coeff,
+ &downsize_coeff) < 0)
+ return -EINVAL;
+
+ if (downsize_coeff > 0) {
+ left->irr = right->irr =
+ (downsize_coeff << 14) | resize_coeff;
+ }
+ }
+ pr_debug("inw %d, onw %d, ilw %d, ilc %d, olw %d,"
+ " irw %d, irc %d, orw %d, orc %d, "
+ "difwr %llu, lirr %u\n",
+ inw, onw, left->input_width,
+ left->input_column, left->output_width,
+ right->input_width, right->input_column,
+ right->output_width,
+ right->output_column, difwr, left->irr);
+ } else { /* independent stripes */
+ onw_min = output_frame_width - maximal_stripe_width;
+ /* onw is a multiple of output_f, in the range */
+ /* [max(output_f,output_frame_width-maximal_stripe_width),*/
+ /*min(output_frame_width-2,maximal_stripe_width)] */
+ /* definitely beyond the cost of any valid setting */
+ cost_min = (((u64)input_frame_width) << 32) + cr;
+ onw = truncate(0, ((u64)maximal_stripe_width), output_f);
+ if (output_frame_width - onw == 1)
+ onw -= output_f; /* => onw and output_frame_width-1-onw are positive */
+ inw = truncate(0, onw * irr_opt, input_f);
+ /* this is the maximal inw which allows the same resizing ratio */
+ /* in both stripes */
+ onw = truncate(1, inw * rr_opt, output_f);
+ do {
+ div = div_u64((((u64)(irr_steps * inw)) << 32), onw);
+ left->irr = truncate(0, div, 1);
+ div = div_u64((((u64)(onw * left->irr)) << 32),
+ irr_steps);
+ dinw = (((u64)inw) << 32) - div;
+
+ div = div_u64((((u64)((output_frame_width - 1 - onw) * left->irr)) <<
+ 32), irr_steps);
+
+ difwl = (((u64)(input_frame_width - 1 - inw)) << 32) - div;
+
+ cost = difwl + (((u64)(cr * dinw)) >> 32);
+
+ if (cost < cost_min) {
+ inw_best = inw;
+ cost_min = cost;
+ }
+
+ inw -= input_f;
+ onw = truncate(1, inw * rr_opt, output_f);
+ /* This is the minimal onw which allows the same resizing ratio */
+ /* in both stripes */
+ } while (onw >= onw_min);
+
+ inw = inw_best;
+ onw = truncate(1, inw * rr_opt, output_f);
+ div = div_u64((((u64)(irr_steps * inw)) << 32), onw);
+ left->irr = truncate(0, div, 1);
+
+ left->output_width = onw;
+ right->output_width = output_frame_width - onw;
+ /* These are valid assignments for output_width, */
+ /* assuming output_f is a multiple of output_m */
+ left->input_width = truncate(1, ((u64)(inw + 1)) << 32, input_m);
+ right->input_width = truncate(1, ((u64)(input_frame_width - inw)) <<
+ 32, input_m);
+
+ div = div_u64((((u64)(irr_steps * (input_frame_width - 1 - inw))) <<
+ 32), (right->output_width - 1));
+ right->irr = truncate(0, div, 1);
+ temp = truncate(0, ((u64)left->irr) * ((((u64)1) << 32) + dirr), 1);
+ if (temp < right->irr)
+ right->irr = temp;
+ div = div_u64(((u64)((right->output_width - 1) * right->irr) <<
+ 32), irr_steps);
+ difwr = (u64)(input_frame_width - 1 - inw) - div;
+
+
+ div = div_u64((difwr + (((u64)input_f) << 32)), 2);
+ left->input_column = truncate(0, div, input_f);
+
+ /* This splits the truncated input columns evenly */
+ /* between the left and right margins */
+ right->input_column = left->input_column + inw;
+ left->output_column = 0;
+ right->output_column = onw;
+ if (left->input_width > left->output_width) {
+ if (calc_split_resize_coeffs(left->input_width,
+ left->output_width,
+ &resize_coeff,
+ &downsize_coeff) < 0)
+ return -EINVAL;
+ left->irr = (downsize_coeff << 14) | resize_coeff;
+ }
+ if (right->input_width > right->output_width) {
+ if (calc_split_resize_coeffs(right->input_width,
+ right->output_width,
+ &resize_coeff,
+ &downsize_coeff) < 0)
+ return -EINVAL;
+ right->irr = (downsize_coeff << 14) | resize_coeff;
+ }
+ }
+ return status;
+}
+EXPORT_SYMBOL(ipu_calc_stripes_sizes);
diff --git a/drivers/mxc/ipu3/ipu_capture.c b/drivers/mxc/ipu3/ipu_capture.c
new file mode 100644
index 000000000000..09bb6d98783b
--- /dev/null
+++ b/drivers/mxc/ipu3/ipu_capture.c
@@ -0,0 +1,816 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file ipu_capture.c
+ *
+ * @brief IPU capture dase functions
+ *
+ * @ingroup IPU
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ipu-v3.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "ipu_prv.h"
+#include "ipu_regs.h"
+
+/*!
+ * _ipu_csi_mclk_set
+ *
+ * @param ipu ipu handler
+ * @param pixel_clk desired pixel clock frequency in Hz
+ * @param csi csi 0 or csi 1
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int _ipu_csi_mclk_set(struct ipu_soc *ipu, uint32_t pixel_clk, uint32_t csi)
+{
+ uint32_t temp;
+ uint32_t div_ratio;
+
+ div_ratio = (clk_get_rate(ipu->ipu_clk) / pixel_clk) - 1;
+
+ if (div_ratio > 0xFF || div_ratio < 0) {
+ dev_dbg(ipu->dev, "value of pixel_clk extends normal range\n");
+ return -EINVAL;
+ }
+
+ temp = ipu_csi_read(ipu, csi, CSI_SENS_CONF);
+ temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
+ ipu_csi_write(ipu, csi, temp |
+ (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
+ CSI_SENS_CONF);
+
+ return 0;
+}
+
+/*!
+ * ipu_csi_init_interface
+ * Sets initial values for the CSI registers.
+ * The width and height of the sensor and the actual frame size will be
+ * set to the same values.
+ * @param ipu ipu handler
+ * @param width Sensor width
+ * @param height Sensor height
+ * @param pixel_fmt pixel format
+ * @param cfg_param ipu_csi_signal_cfg_t structure
+ * @param csi csi 0 or csi 1
+ *
+ * @return 0 for success, -EINVAL for error
+ */
+int32_t
+ipu_csi_init_interface(struct ipu_soc *ipu, uint16_t width, uint16_t height,
+ uint32_t pixel_fmt, ipu_csi_signal_cfg_t cfg_param)
+{
+ uint32_t data = 0;
+ uint32_t csi = cfg_param.csi;
+
+ /* Set SENS_DATA_FORMAT bits (8, 9 and 10)
+ RGB or YUV444 is 0 which is current value in data so not set
+ explicitly
+ This is also the default value if attempts are made to set it to
+ something invalid. */
+ switch (pixel_fmt) {
+ case IPU_PIX_FMT_YUYV:
+ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+ break;
+ case IPU_PIX_FMT_UYVY:
+ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
+ break;
+ case IPU_PIX_FMT_RGB24:
+ case IPU_PIX_FMT_BGR24:
+ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB_YUV444;
+ break;
+ case IPU_PIX_FMT_GENERIC:
+ case IPU_PIX_FMT_GENERIC_16:
+ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ break;
+ case IPU_PIX_FMT_RGB565:
+ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+ break;
+ case IPU_PIX_FMT_RGB555:
+ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set the CSI_SENS_CONF register remaining fields */
+ data |= cfg_param.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
+ cfg_param.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
+ cfg_param.data_pol << CSI_SENS_CONF_DATA_POL_SHIFT |
+ cfg_param.Vsync_pol << CSI_SENS_CONF_VSYNC_POL_SHIFT |
+ cfg_param.Hsync_pol << CSI_SENS_CONF_HSYNC_POL_SHIFT |
+ cfg_param.pixclk_pol << CSI_SENS_CONF_PIX_CLK_POL_SHIFT |
+ cfg_param.ext_vsync << CSI_SENS_CONF_EXT_VSYNC_SHIFT |
+ cfg_param.clk_mode << CSI_SENS_CONF_SENS_PRTCL_SHIFT |
+ cfg_param.pack_tight << CSI_SENS_CONF_PACK_TIGHT_SHIFT |
+ cfg_param.force_eof << CSI_SENS_CONF_FORCE_EOF_SHIFT |
+ cfg_param.data_en_pol << CSI_SENS_CONF_DATA_EN_POL_SHIFT;
+
+ _ipu_get(ipu);
+
+ mutex_lock(&ipu->mutex_lock);
+
+ ipu_csi_write(ipu, csi, data, CSI_SENS_CONF);
+
+ /* Setup sensor frame size */
+ ipu_csi_write(ipu, csi, (width - 1) | (height - 1) << 16, CSI_SENS_FRM_SIZE);
+
+ /* Set CCIR registers */
+ if (cfg_param.clk_mode == IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE) {
+ ipu_csi_write(ipu, csi, 0x40030, CSI_CCIR_CODE_1);
+ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
+ } else if (cfg_param.clk_mode == IPU_CSI_CLK_MODE_CCIR656_INTERLACED) {
+ if (width == 720 && height == 625) {
+ /* PAL case */
+ /*
+ * Field0BlankEnd = 0x6, Field0BlankStart = 0x2,
+ * Field0ActiveEnd = 0x4, Field0ActiveStart = 0
+ */
+ ipu_csi_write(ipu, csi, 0x40596, CSI_CCIR_CODE_1);
+ /*
+ * Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
+ * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
+ */
+ ipu_csi_write(ipu, csi, 0xD07DF, CSI_CCIR_CODE_2);
+
+ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
+
+ } else if (width == 720 && height == 525) {
+ /* NTSC case */
+ /*
+ * Field0BlankEnd = 0x7, Field0BlankStart = 0x3,
+ * Field0ActiveEnd = 0x5, Field0ActiveStart = 0x1
+ */
+ ipu_csi_write(ipu, csi, 0xD07DF, CSI_CCIR_CODE_1);
+ /*
+ * Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
+ * Field1ActiveEnd = 0x4, Field1ActiveStart = 0
+ */
+ ipu_csi_write(ipu, csi, 0x40596, CSI_CCIR_CODE_2);
+ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
+ } else {
+ dev_err(ipu->dev, "Unsupported CCIR656 interlaced "
+ "video mode\n");
+ mutex_unlock(&ipu->mutex_lock);
+ _ipu_put(ipu);
+ return -EINVAL;
+ }
+ _ipu_csi_ccir_err_detection_enable(ipu, csi);
+ } else if ((cfg_param.clk_mode ==
+ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR) ||
+ (cfg_param.clk_mode ==
+ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR) ||
+ (cfg_param.clk_mode ==
+ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR) ||
+ (cfg_param.clk_mode ==
+ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR)) {
+ ipu_csi_write(ipu, csi, 0x40030, CSI_CCIR_CODE_1);
+ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
+ _ipu_csi_ccir_err_detection_enable(ipu, csi);
+ } else if ((cfg_param.clk_mode == IPU_CSI_CLK_MODE_GATED_CLK) ||
+ (cfg_param.clk_mode == IPU_CSI_CLK_MODE_NONGATED_CLK)) {
+ _ipu_csi_ccir_err_detection_disable(ipu, csi);
+ }
+
+ dev_dbg(ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
+ ipu_csi_read(ipu, csi, CSI_SENS_CONF));
+ dev_dbg(ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
+ ipu_csi_read(ipu, csi, CSI_ACT_FRM_SIZE));
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ _ipu_put(ipu);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_csi_init_interface);
+
+/*!
+ * ipu_csi_get_sensor_protocol
+ *
+ * @param ipu ipu handler
+ * @param csi csi 0 or csi 1
+ *
+ * @return Returns sensor protocol
+ */
+int32_t ipu_csi_get_sensor_protocol(struct ipu_soc *ipu, uint32_t csi)
+{
+ int ret;
+ _ipu_get(ipu);
+ ret = (ipu_csi_read(ipu, csi, CSI_SENS_CONF) &
+ CSI_SENS_CONF_SENS_PRTCL_MASK) >>
+ CSI_SENS_CONF_SENS_PRTCL_SHIFT;
+ _ipu_put(ipu);
+ return ret;
+}
+EXPORT_SYMBOL(ipu_csi_get_sensor_protocol);
+
+/*!
+ * ipu_csi_enable_mclk
+ *
+ * @param ipu ipu handler
+ * @param csi csi 0 or csi 1
+ * @param flag true to enable mclk, false to disable mclk
+ * @param wait true to wait 100ms make clock stable, false not wait
+ *
+ * @return Returns 0 on success
+ */
+int ipu_csi_enable_mclk(struct ipu_soc *ipu, int csi, bool flag, bool wait)
+{
+ /* Return immediately if there is no csi_clk to manage */
+ if (ipu->csi_clk[csi] == NULL)
+ return 0;
+
+ if (flag) {
+ clk_enable(ipu->csi_clk[csi]);
+ if (wait == true)
+ msleep(10);
+ } else {
+ clk_disable(ipu->csi_clk[csi]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_csi_enable_mclk);
+
+/*!
+ * ipu_csi_get_window_size
+ *
+ * @param ipu ipu handler
+ * @param width pointer to window width
+ * @param height pointer to window height
+ * @param csi csi 0 or csi 1
+ */
+void ipu_csi_get_window_size(struct ipu_soc *ipu, uint32_t *width, uint32_t *height, uint32_t csi)
+{
+ uint32_t reg;
+
+ _ipu_get(ipu);
+
+ mutex_lock(&ipu->mutex_lock);
+
+ reg = ipu_csi_read(ipu, csi, CSI_ACT_FRM_SIZE);
+ *width = (reg & 0xFFFF) + 1;
+ *height = (reg >> 16 & 0xFFFF) + 1;
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ _ipu_put(ipu);
+}
+EXPORT_SYMBOL(ipu_csi_get_window_size);
+
+/*!
+ * ipu_csi_set_window_size
+ *
+ * @param ipu ipu handler
+ * @param width window width
+ * @param height window height
+ * @param csi csi 0 or csi 1
+ */
+void ipu_csi_set_window_size(struct ipu_soc *ipu, uint32_t width, uint32_t height, uint32_t csi)
+{
+ _ipu_get(ipu);
+
+ mutex_lock(&ipu->mutex_lock);
+
+ ipu_csi_write(ipu, csi, (width - 1) | (height - 1) << 16, CSI_ACT_FRM_SIZE);
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ _ipu_put(ipu);
+}
+EXPORT_SYMBOL(ipu_csi_set_window_size);
+
+/*!
+ * ipu_csi_set_window_pos
+ *
+ * @param ipu ipu handler
+ * @param left uint32 window x start
+ * @param top uint32 window y start
+ * @param csi csi 0 or csi 1
+ */
+void ipu_csi_set_window_pos(struct ipu_soc *ipu, uint32_t left, uint32_t top, uint32_t csi)
+{
+ uint32_t temp;
+
+ _ipu_get(ipu);
+
+ mutex_lock(&ipu->mutex_lock);
+
+ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
+ temp &= ~(CSI_HSC_MASK | CSI_VSC_MASK);
+ temp |= ((top << CSI_VSC_SHIFT) | (left << CSI_HSC_SHIFT));
+ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ _ipu_put(ipu);
+}
+EXPORT_SYMBOL(ipu_csi_set_window_pos);
+
+/*!
+ * _ipu_csi_horizontal_downsize_enable
+ * Enable horizontal downsizing(decimation) by 2.
+ *
+ * @param ipu ipu handler
+ * @param csi csi 0 or csi 1
+ */
+void _ipu_csi_horizontal_downsize_enable(struct ipu_soc *ipu, uint32_t csi)
+{
+ uint32_t temp;
+
+ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
+ temp |= CSI_HORI_DOWNSIZE_EN;
+ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
+}
+
+/*!
+ * _ipu_csi_horizontal_downsize_disable
+ * Disable horizontal downsizing(decimation) by 2.
+ *
+ * @param ipu ipu handler
+ * @param csi csi 0 or csi 1
+ */
+void _ipu_csi_horizontal_downsize_disable(struct ipu_soc *ipu, uint32_t csi)
+{
+ uint32_t temp;
+
+ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
+ temp &= ~CSI_HORI_DOWNSIZE_EN;
+ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
+}
+
+/*!
+ * _ipu_csi_vertical_downsize_enable
+ * Enable vertical downsizing(decimation) by 2.
+ *
+ * @param ipu ipu handler
+ * @param csi csi 0 or csi 1
+ */
+void _ipu_csi_vertical_downsize_enable(struct ipu_soc *ipu, uint32_t csi)
+{
+ uint32_t temp;
+
+ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
+ temp |= CSI_VERT_DOWNSIZE_EN;
+ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
+}
+
+/*!
+ * _ipu_csi_vertical_downsize_disable
+ * Disable vertical downsizing(decimation) by 2.
+ *
+ * @param ipu ipu handler
+ * @param csi csi 0 or csi 1
+ */
+void _ipu_csi_vertical_downsize_disable(struct ipu_soc *ipu, uint32_t csi)
+{
+ uint32_t temp;
+
+ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
+ temp &= ~CSI_VERT_DOWNSIZE_EN;
+ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
+}
+
+/*!
+ * _ipu_csi_set_test_generator
+ *
+ * @param ipu ipu handler
+ * @param active 1 for active and 0 for inactive
+ * @param r_value red value for the generated pattern of even pixel
+ * @param g_value green value for the generated pattern of even
+ * pixel
+ * @param b_value blue value for the generated pattern of even pixel
+ * @param pixel_clk desired pixel clock frequency in Hz
+ * @param csi csi 0 or csi 1
+ */
+void _ipu_csi_set_test_generator(struct ipu_soc *ipu, bool active, uint32_t r_value,
+ uint32_t g_value, uint32_t b_value, uint32_t pix_clk, uint32_t csi)
+{
+ uint32_t temp;
+
+ temp = ipu_csi_read(ipu, csi, CSI_TST_CTRL);
+
+ if (active == false) {
+ temp &= ~CSI_TEST_GEN_MODE_EN;
+ ipu_csi_write(ipu, csi, temp, CSI_TST_CTRL);
+ } else {
+ /* Set sensb_mclk div_ratio*/
+ _ipu_csi_mclk_set(ipu, pix_clk, csi);
+
+ temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
+ CSI_TEST_GEN_B_MASK);
+ temp |= CSI_TEST_GEN_MODE_EN;
+ temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
+ (g_value << CSI_TEST_GEN_G_SHIFT) |
+ (b_value << CSI_TEST_GEN_B_SHIFT);
+ ipu_csi_write(ipu, csi, temp, CSI_TST_CTRL);
+ }
+}
+
+/*!
+ * _ipu_csi_ccir_err_detection_en
+ * Enable error detection and correction for
+ * CCIR interlaced mode with protection bit.
+ *
+ * @param ipu ipu handler
+ * @param csi csi 0 or csi 1
+ */
+void _ipu_csi_ccir_err_detection_enable(struct ipu_soc *ipu, uint32_t csi)
+{
+ uint32_t temp;
+
+ temp = ipu_csi_read(ipu, csi, CSI_CCIR_CODE_1);
+ temp |= CSI_CCIR_ERR_DET_EN;
+ ipu_csi_write(ipu, csi, temp, CSI_CCIR_CODE_1);
+
+}
+
+/*!
+ * _ipu_csi_ccir_err_detection_disable
+ * Disable error detection and correction for
+ * CCIR interlaced mode with protection bit.
+ *
+ * @param ipu ipu handler
+ * @param csi csi 0 or csi 1
+ */
+void _ipu_csi_ccir_err_detection_disable(struct ipu_soc *ipu, uint32_t csi)
+{
+ uint32_t temp;
+
+ temp = ipu_csi_read(ipu, csi, CSI_CCIR_CODE_1);
+ temp &= ~CSI_CCIR_ERR_DET_EN;
+ ipu_csi_write(ipu, csi, temp, CSI_CCIR_CODE_1);
+
+}
+
+/*!
+ * _ipu_csi_set_mipi_di
+ *
+ * @param ipu ipu handler
+ * @param num MIPI data identifier 0-3 handled by CSI
+ * @param di_val data identifier value
+ * @param csi csi 0 or csi 1
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int _ipu_csi_set_mipi_di(struct ipu_soc *ipu, uint32_t num, uint32_t di_val, uint32_t csi)
+{
+ uint32_t temp;
+ int retval = 0;
+
+ if (di_val > 0xFFL) {
+ retval = -EINVAL;
+ goto err;
+ }
+
+ temp = ipu_csi_read(ipu, csi, CSI_MIPI_DI);
+
+ switch (num) {
+ case IPU_CSI_MIPI_DI0:
+ temp &= ~CSI_MIPI_DI0_MASK;
+ temp |= (di_val << CSI_MIPI_DI0_SHIFT);
+ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
+ break;
+ case IPU_CSI_MIPI_DI1:
+ temp &= ~CSI_MIPI_DI1_MASK;
+ temp |= (di_val << CSI_MIPI_DI1_SHIFT);
+ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
+ break;
+ case IPU_CSI_MIPI_DI2:
+ temp &= ~CSI_MIPI_DI2_MASK;
+ temp |= (di_val << CSI_MIPI_DI2_SHIFT);
+ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
+ break;
+ case IPU_CSI_MIPI_DI3:
+ temp &= ~CSI_MIPI_DI3_MASK;
+ temp |= (di_val << CSI_MIPI_DI3_SHIFT);
+ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
+ break;
+ default:
+ retval = -EINVAL;
+ }
+
+err:
+ return retval;
+}
+
+/*!
+ * _ipu_csi_set_skip_isp
+ *
+ * @param ipu ipu handler
+ * @param skip select frames to be skipped and set the
+ * correspond bits to 1
+ * @param max_ratio number of frames in a skipping set and the
+ * maximum value of max_ratio is 5
+ * @param csi csi 0 or csi 1
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int _ipu_csi_set_skip_isp(struct ipu_soc *ipu, uint32_t skip, uint32_t max_ratio, uint32_t csi)
+{
+ uint32_t temp;
+ int retval = 0;
+
+ if (max_ratio > 5) {
+ retval = -EINVAL;
+ goto err;
+ }
+
+ temp = ipu_csi_read(ipu, csi, CSI_SKIP);
+ temp &= ~(CSI_MAX_RATIO_SKIP_ISP_MASK | CSI_SKIP_ISP_MASK);
+ temp |= (max_ratio << CSI_MAX_RATIO_SKIP_ISP_SHIFT) |
+ (skip << CSI_SKIP_ISP_SHIFT);
+ ipu_csi_write(ipu, csi, temp, CSI_SKIP);
+
+err:
+ return retval;
+}
+
+/*!
+ * _ipu_csi_set_skip_smfc
+ *
+ * @param ipu ipu handler
+ * @param skip select frames to be skipped and set the
+ * correspond bits to 1
+ * @param max_ratio number of frames in a skipping set and the
+ * maximum value of max_ratio is 5
+ * @param id csi to smfc skipping id
+ * @param csi csi 0 or csi 1
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int _ipu_csi_set_skip_smfc(struct ipu_soc *ipu, uint32_t skip,
+ uint32_t max_ratio, uint32_t id, uint32_t csi)
+{
+ uint32_t temp;
+ int retval = 0;
+
+ if (max_ratio > 5 || id > 3) {
+ retval = -EINVAL;
+ goto err;
+ }
+
+ temp = ipu_csi_read(ipu, csi, CSI_SKIP);
+ temp &= ~(CSI_MAX_RATIO_SKIP_SMFC_MASK | CSI_ID_2_SKIP_MASK |
+ CSI_SKIP_SMFC_MASK);
+ temp |= (max_ratio << CSI_MAX_RATIO_SKIP_SMFC_SHIFT) |
+ (id << CSI_ID_2_SKIP_SHIFT) |
+ (skip << CSI_SKIP_SMFC_SHIFT);
+ ipu_csi_write(ipu, csi, temp, CSI_SKIP);
+
+err:
+ return retval;
+}
+
+/*!
+ * _ipu_smfc_init
+ * Map CSI frames to IDMAC channels.
+ *
+ * @param ipu ipu handler
+ * @param channel IDMAC channel 0-3
+ * @param mipi_id mipi id number 0-3
+ * @param csi csi0 or csi1
+ */
+void _ipu_smfc_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t mipi_id, uint32_t csi)
+{
+ uint32_t temp;
+
+ temp = ipu_smfc_read(ipu, SMFC_MAP);
+
+ switch (channel) {
+ case CSI_MEM0:
+ temp &= ~SMFC_MAP_CH0_MASK;
+ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH0_SHIFT;
+ break;
+ case CSI_MEM1:
+ temp &= ~SMFC_MAP_CH1_MASK;
+ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH1_SHIFT;
+ break;
+ case CSI_MEM2:
+ temp &= ~SMFC_MAP_CH2_MASK;
+ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH2_SHIFT;
+ break;
+ case CSI_MEM3:
+ temp &= ~SMFC_MAP_CH3_MASK;
+ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH3_SHIFT;
+ break;
+ default:
+ return;
+ }
+
+ ipu_smfc_write(ipu, temp, SMFC_MAP);
+}
+
+/*!
+ * _ipu_smfc_set_wmc
+ * Caution: The number of required channels, the enabled channels
+ * and the FIFO size per channel are configured restrictedly.
+ *
+ * @param ipu ipu handler
+ * @param channel IDMAC channel 0-3
+ * @param set set 1 or clear 0
+ * @param level water mark level when FIFO is on the
+ * relative size
+ */
+void _ipu_smfc_set_wmc(struct ipu_soc *ipu, ipu_channel_t channel, bool set, uint32_t level)
+{
+ uint32_t temp;
+
+ temp = ipu_smfc_read(ipu, SMFC_WMC);
+
+ switch (channel) {
+ case CSI_MEM0:
+ if (set == true) {
+ temp &= ~SMFC_WM0_SET_MASK;
+ temp |= level << SMFC_WM0_SET_SHIFT;
+ } else {
+ temp &= ~SMFC_WM0_CLR_MASK;
+ temp |= level << SMFC_WM0_CLR_SHIFT;
+ }
+ break;
+ case CSI_MEM1:
+ if (set == true) {
+ temp &= ~SMFC_WM1_SET_MASK;
+ temp |= level << SMFC_WM1_SET_SHIFT;
+ } else {
+ temp &= ~SMFC_WM1_CLR_MASK;
+ temp |= level << SMFC_WM1_CLR_SHIFT;
+ }
+ break;
+ case CSI_MEM2:
+ if (set == true) {
+ temp &= ~SMFC_WM2_SET_MASK;
+ temp |= level << SMFC_WM2_SET_SHIFT;
+ } else {
+ temp &= ~SMFC_WM2_CLR_MASK;
+ temp |= level << SMFC_WM2_CLR_SHIFT;
+ }
+ break;
+ case CSI_MEM3:
+ if (set == true) {
+ temp &= ~SMFC_WM3_SET_MASK;
+ temp |= level << SMFC_WM3_SET_SHIFT;
+ } else {
+ temp &= ~SMFC_WM3_CLR_MASK;
+ temp |= level << SMFC_WM3_CLR_SHIFT;
+ }
+ break;
+ default:
+ return;
+ }
+
+ ipu_smfc_write(ipu, temp, SMFC_WMC);
+}
+
+/*!
+ * _ipu_smfc_set_burst_size
+ *
+ * @param ipu ipu handler
+ * @param channel IDMAC channel 0-3
+ * @param bs burst size of IDMAC channel,
+ * the value programmed here shoud be BURST_SIZE-1
+ */
+void _ipu_smfc_set_burst_size(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t bs)
+{
+ uint32_t temp;
+
+ temp = ipu_smfc_read(ipu, SMFC_BS);
+
+ switch (channel) {
+ case CSI_MEM0:
+ temp &= ~SMFC_BS0_MASK;
+ temp |= bs << SMFC_BS0_SHIFT;
+ break;
+ case CSI_MEM1:
+ temp &= ~SMFC_BS1_MASK;
+ temp |= bs << SMFC_BS1_SHIFT;
+ break;
+ case CSI_MEM2:
+ temp &= ~SMFC_BS2_MASK;
+ temp |= bs << SMFC_BS2_SHIFT;
+ break;
+ case CSI_MEM3:
+ temp &= ~SMFC_BS3_MASK;
+ temp |= bs << SMFC_BS3_SHIFT;
+ break;
+ default:
+ return;
+ }
+
+ ipu_smfc_write(ipu, temp, SMFC_BS);
+}
+
+/*!
+ * _ipu_csi_init
+ *
+ * @param ipu ipu handler
+ * @param channel IDMAC channel
+ * @param csi csi 0 or csi 1
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int _ipu_csi_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t csi)
+{
+ uint32_t csi_sens_conf, csi_dest;
+ int retval = 0;
+
+ switch (channel) {
+ case CSI_MEM0:
+ case CSI_MEM1:
+ case CSI_MEM2:
+ case CSI_MEM3:
+ csi_dest = CSI_DATA_DEST_IDMAC;
+ break;
+ case CSI_PRP_ENC_MEM:
+ case CSI_PRP_VF_MEM:
+ csi_dest = CSI_DATA_DEST_IC;
+ break;
+ default:
+ retval = -EINVAL;
+ goto err;
+ }
+
+ csi_sens_conf = ipu_csi_read(ipu, csi, CSI_SENS_CONF);
+ csi_sens_conf &= ~CSI_SENS_CONF_DATA_DEST_MASK;
+ ipu_csi_write(ipu, csi, csi_sens_conf | (csi_dest <<
+ CSI_SENS_CONF_DATA_DEST_SHIFT), CSI_SENS_CONF);
+err:
+ return retval;
+}
+
+/*!
+ * csi_irq_handler
+ *
+ * @param irq interrupt id
+ * @param dev_id pointer to ipu handler
+ *
+ * @return Returns if irq is handled
+ */
+static irqreturn_t csi_irq_handler(int irq, void *dev_id)
+{
+ struct ipu_soc *ipu = dev_id;
+ struct completion *comp = &ipu->csi_comp;
+
+ complete(comp);
+ return IRQ_HANDLED;
+}
+
+/*!
+ * _ipu_csi_wait4eof
+ *
+ * @param ipu ipu handler
+ * @param channel IDMAC channel
+ *
+ */
+void _ipu_csi_wait4eof(struct ipu_soc *ipu, ipu_channel_t channel)
+{
+ int ret;
+ int irq = 0;
+
+ if (channel == CSI_MEM0)
+ irq = IPU_IRQ_CSI0_OUT_EOF;
+ else if (channel == CSI_MEM1)
+ irq = IPU_IRQ_CSI1_OUT_EOF;
+ else if (channel == CSI_MEM2)
+ irq = IPU_IRQ_CSI2_OUT_EOF;
+ else if (channel == CSI_MEM3)
+ irq = IPU_IRQ_CSI3_OUT_EOF;
+ else if (channel == CSI_PRP_ENC_MEM)
+ irq = IPU_IRQ_PRP_ENC_OUT_EOF;
+ else if (channel == CSI_PRP_VF_MEM)
+ irq = IPU_IRQ_PRP_VF_OUT_EOF;
+ else{
+ dev_err(ipu->dev, "Not a CSI channel\n");
+ return;
+ }
+
+ init_completion(&ipu->csi_comp);
+ ret = ipu_request_irq(ipu, irq, csi_irq_handler, 0, NULL, ipu);
+ if (ret < 0) {
+ dev_err(ipu->dev, "CSI irq %d in use\n", irq);
+ return;
+ }
+ ret = wait_for_completion_timeout(&ipu->csi_comp, msecs_to_jiffies(500));
+ ipu_free_irq(ipu, irq, ipu);
+ dev_dbg(ipu->dev, "CSI stop timeout - %d * 10ms\n", 5 - ret);
+}
diff --git a/drivers/mxc/ipu3/ipu_common.c b/drivers/mxc/ipu3/ipu_common.c
new file mode 100644
index 000000000000..8ba5f9987cb9
--- /dev/null
+++ b/drivers/mxc/ipu3/ipu_common.c
@@ -0,0 +1,3458 @@
+/*
+ * Copyright 2005-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file ipu_common.c
+ *
+ * @brief This file contains the IPU driver common API functions.
+ *
+ * @ingroup IPU
+ */
+#include <linux/busfreq-imx.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ipu-v3.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+
+#include "ipu_param_mem.h"
+#include "ipu_regs.h"
+
+static struct ipu_soc ipu_array[MXC_IPU_MAX_NUM];
+
+/* Static functions */
+static irqreturn_t ipu_sync_irq_handler(int irq, void *desc);
+static irqreturn_t ipu_err_irq_handler(int irq, void *desc);
+
+static inline uint32_t channel_2_dma(ipu_channel_t ch, ipu_buffer_t type)
+{
+ return ((uint32_t) ch >> (6 * type)) & 0x3F;
+};
+
+static inline int _ipu_is_ic_chan(uint32_t dma_chan)
+{
+ return (((dma_chan >= 11) && (dma_chan <= 22) && (dma_chan != 17) &&
+ (dma_chan != 18)));
+}
+
+static inline int _ipu_is_vdi_out_chan(uint32_t dma_chan)
+{
+ return (dma_chan == 5);
+}
+
+static inline int _ipu_is_ic_graphic_chan(uint32_t dma_chan)
+{
+ return (dma_chan == 14 || dma_chan == 15);
+}
+
+/* Either DP BG or DP FG can be graphic window */
+static inline int _ipu_is_dp_graphic_chan(uint32_t dma_chan)
+{
+ return (dma_chan == 23 || dma_chan == 27);
+}
+
+static inline int _ipu_is_irt_chan(uint32_t dma_chan)
+{
+ return ((dma_chan >= 45) && (dma_chan <= 50));
+}
+
+static inline int _ipu_is_dmfc_chan(uint32_t dma_chan)
+{
+ return ((dma_chan >= 23) && (dma_chan <= 29));
+}
+
+static inline int _ipu_is_trb_chan(struct ipu_soc *ipu, uint32_t dma_chan)
+{
+ return (((dma_chan == 8) || (dma_chan == 9) ||
+ (dma_chan == 10) || (dma_chan == 13) ||
+ (dma_chan == 21) || (dma_chan == 23) ||
+ (dma_chan == 27) || (dma_chan == 28)) &&
+ (ipu->devtype >= IPUv3EX));
+}
+
+/*
+ * We usually use IDMAC 23 as full plane and IDMAC 27 as partial
+ * plane.
+ * IDMAC 23/24/28/41 can drive a display respectively - primary
+ * IDMAC 27 depends on IDMAC 23 - nonprimary
+ */
+static inline int _ipu_is_primary_disp_chan(uint32_t dma_chan)
+{
+ return ((dma_chan == 23) || (dma_chan == 24) ||
+ (dma_chan == 28) || (dma_chan == 41));
+}
+
+static inline int _ipu_is_sync_irq(uint32_t irq)
+{
+ /* sync interrupt register number */
+ int reg_num = irq / 32 + 1;
+
+ return ((reg_num == 1) || (reg_num == 2) || (reg_num == 3) ||
+ (reg_num == 4) || (reg_num == 7) || (reg_num == 8) ||
+ (reg_num == 11) || (reg_num == 12) || (reg_num == 13) ||
+ (reg_num == 14) || (reg_num == 15));
+}
+
+#define idma_is_valid(ch) (ch != NO_DMA)
+#define idma_mask(ch) (idma_is_valid(ch) ? (1UL << (ch & 0x1F)) : 0)
+#define tri_cur_buf_mask(ch) (idma_mask(ch*2) * 3)
+#define tri_cur_buf_shift(ch) (ffs(idma_mask(ch*2)) - 1)
+
+static inline bool idma_is_set(struct ipu_soc *ipu, uint32_t reg, uint32_t dma)
+{
+ return !!(ipu_idmac_read(ipu, reg) & idma_mask(dma));
+}
+
+static int ipu_clk_setup_enable(struct ipu_soc *ipu)
+{
+ char pixel_clk_0[] = "ipu1_pclk_0";
+ char pixel_clk_1[] = "ipu1_pclk_1";
+ char pixel_clk_0_sel[] = "ipu1_pclk0_sel";
+ char pixel_clk_1_sel[] = "ipu1_pclk1_sel";
+ char pixel_clk_0_div[] = "ipu1_pclk0_div";
+ char pixel_clk_1_div[] = "ipu1_pclk1_div";
+ char *ipu_pixel_clk_sel[] = { "ipu1", "ipu1_di0", "ipu1_di1", };
+ char *pclk_sel;
+ struct clk *clk;
+ int ret;
+ int i;
+
+ pixel_clk_0[3] += ipu->id;
+ pixel_clk_1[3] += ipu->id;
+ pixel_clk_0_sel[3] += ipu->id;
+ pixel_clk_1_sel[3] += ipu->id;
+ pixel_clk_0_div[3] += ipu->id;
+ pixel_clk_1_div[3] += ipu->id;
+ for (i = 0; i < ARRAY_SIZE(ipu_pixel_clk_sel); i++) {
+ pclk_sel = ipu_pixel_clk_sel[i];
+ pclk_sel[3] += ipu->id;
+ }
+ dev_dbg(ipu->dev, "ipu_clk = %lu\n", clk_get_rate(ipu->ipu_clk));
+
+ clk = clk_register_mux_pix_clk(ipu->dev, pixel_clk_0_sel,
+ (const char **)ipu_pixel_clk_sel,
+ ARRAY_SIZE(ipu_pixel_clk_sel),
+ 0, ipu->id, 0, 0);
+ if (IS_ERR(clk)) {
+ dev_err(ipu->dev, "clk_register mux di0 failed");
+ return PTR_ERR(clk);
+ }
+ ipu->pixel_clk_sel[0] = clk;
+ clk = clk_register_mux_pix_clk(ipu->dev, pixel_clk_1_sel,
+ (const char **)ipu_pixel_clk_sel,
+ ARRAY_SIZE(ipu_pixel_clk_sel),
+ 0, ipu->id, 1, 0);
+ if (IS_ERR(clk)) {
+ dev_err(ipu->dev, "clk_register mux di1 failed");
+ return PTR_ERR(clk);
+ }
+ ipu->pixel_clk_sel[1] = clk;
+
+ clk = clk_register_div_pix_clk(ipu->dev, pixel_clk_0_div,
+ pixel_clk_0_sel, 0, ipu->id, 0, 0);
+ if (IS_ERR(clk)) {
+ dev_err(ipu->dev, "clk register di0 div failed");
+ return PTR_ERR(clk);
+ }
+ clk = clk_register_div_pix_clk(ipu->dev, pixel_clk_1_div,
+ pixel_clk_1_sel, CLK_SET_RATE_PARENT, ipu->id, 1, 0);
+ if (IS_ERR(clk)) {
+ dev_err(ipu->dev, "clk register di1 div failed");
+ return PTR_ERR(clk);
+ }
+
+ ipu->pixel_clk[0] = clk_register_gate_pix_clk(ipu->dev, pixel_clk_0,
+ pixel_clk_0_div, CLK_SET_RATE_PARENT,
+ ipu->id, 0, 0);
+ if (IS_ERR(ipu->pixel_clk[0])) {
+ dev_err(ipu->dev, "clk register di0 gate failed");
+ return PTR_ERR(ipu->pixel_clk[0]);
+ }
+ ipu->pixel_clk[1] = clk_register_gate_pix_clk(ipu->dev, pixel_clk_1,
+ pixel_clk_1_div, CLK_SET_RATE_PARENT,
+ ipu->id, 1, 0);
+ if (IS_ERR(ipu->pixel_clk[1])) {
+ dev_err(ipu->dev, "clk register di1 gate failed");
+ return PTR_ERR(ipu->pixel_clk[1]);
+ }
+
+ ret = clk_set_parent(ipu->pixel_clk_sel[0], ipu->ipu_clk);
+ if (ret) {
+ dev_err(ipu->dev, "clk set parent failed");
+ return ret;
+ }
+
+ ret = clk_set_parent(ipu->pixel_clk_sel[1], ipu->ipu_clk);
+ if (ret) {
+ dev_err(ipu->dev, "clk set parent failed");
+ return ret;
+ }
+
+ ipu->di_clk[0] = devm_clk_get(ipu->dev, "di0");
+ if (IS_ERR(ipu->di_clk[0])) {
+ dev_err(ipu->dev, "clk_get di0 failed");
+ return PTR_ERR(ipu->di_clk[0]);
+ }
+ ipu->di_clk[1] = devm_clk_get(ipu->dev, "di1");
+ if (IS_ERR(ipu->di_clk[1])) {
+ dev_err(ipu->dev, "clk_get di1 failed");
+ return PTR_ERR(ipu->di_clk[1]);
+ }
+
+ ipu->di_clk_sel[0] = devm_clk_get(ipu->dev, "di0_sel");
+ if (IS_ERR(ipu->di_clk_sel[0])) {
+ dev_err(ipu->dev, "clk_get di0_sel failed");
+ return PTR_ERR(ipu->di_clk_sel[0]);
+ }
+ ipu->di_clk_sel[1] = devm_clk_get(ipu->dev, "di1_sel");
+ if (IS_ERR(ipu->di_clk_sel[1])) {
+ dev_err(ipu->dev, "clk_get di1_sel failed");
+ return PTR_ERR(ipu->di_clk_sel[1]);
+ }
+
+ return 0;
+}
+
+static int ipu_mem_reset(struct ipu_soc *ipu)
+{
+ int timeout = 1000;
+
+ ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
+
+ while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
+ if (!timeout--)
+ return -ETIME;
+ msleep(1);
+ }
+
+ return 0;
+}
+
+struct ipu_soc *ipu_get_soc(int id)
+{
+ if (id >= MXC_IPU_MAX_NUM)
+ return ERR_PTR(-ENODEV);
+ else if (!ipu_array[id].online)
+ return ERR_PTR(-ENODEV);
+ else
+ return &(ipu_array[id]);
+}
+EXPORT_SYMBOL_GPL(ipu_get_soc);
+
+void _ipu_get(struct ipu_soc *ipu)
+{
+ int ret;
+
+ ret = clk_enable(ipu->ipu_clk);
+ if (ret < 0)
+ BUG();
+}
+
+void _ipu_put(struct ipu_soc *ipu)
+{
+ clk_disable(ipu->ipu_clk);
+}
+
+void ipu_disable_hsp_clk(struct ipu_soc *ipu)
+{
+ _ipu_put(ipu);
+}
+EXPORT_SYMBOL(ipu_disable_hsp_clk);
+
+struct ipu_devtype {
+ const char *name;
+ unsigned long cm_ofs;
+ unsigned long idmac_ofs;
+ unsigned long ic_ofs;
+ unsigned long csi0_ofs;
+ unsigned long csi1_ofs;
+ unsigned long di0_ofs;
+ unsigned long di1_ofs;
+ unsigned long smfc_ofs;
+ unsigned long dc_ofs;
+ unsigned long dmfc_ofs;
+ unsigned long vdi_ofs;
+ unsigned long cpmem_ofs;
+ unsigned long srm_ofs;
+ unsigned long tpm_ofs;
+ unsigned long dc_tmpl_ofs;
+ enum ipuv3_type type;
+ bool idmac_used_bufs_present;
+};
+
+struct ipu_platform_type {
+ struct ipu_devtype devtype;
+ unsigned int ch0123_axi;
+ unsigned int ch23_axi;
+ unsigned int ch27_axi;
+ unsigned int ch28_axi;
+ unsigned int normal_axi;
+ bool smfc_idmac_12bit_3planar_bs_fixup; /* workaround little stripes */
+ bool idmac_used_bufs_en_r;
+ bool idmac_used_bufs_en_w;
+ unsigned int idmac_used_bufs_max_r;
+ unsigned int idmac_used_bufs_max_w;
+};
+
+static struct ipu_platform_type ipu_type_imx51 = {
+ .devtype = {
+ .name = "IPUv3EX",
+ .cm_ofs = 0x1E000000,
+ .idmac_ofs = 0x1E008000,
+ .ic_ofs = 0x1E020000,
+ .csi0_ofs = 0x1E030000,
+ .csi1_ofs = 0x1E038000,
+ .di0_ofs = 0x1E040000,
+ .di1_ofs = 0x1E048000,
+ .smfc_ofs = 0x1E050000,
+ .dc_ofs = 0x1E058000,
+ .dmfc_ofs = 0x1E060000,
+ .vdi_ofs = 0x1E068000,
+ .cpmem_ofs = 0x1F000000,
+ .srm_ofs = 0x1F040000,
+ .tpm_ofs = 0x1F060000,
+ .dc_tmpl_ofs = 0x1F080000,
+ .type = IPUv3EX,
+ .idmac_used_bufs_present = false,
+ },
+ .ch0123_axi = 1,
+ .ch23_axi = 1,
+ .ch23_axi = 1,
+ .ch27_axi = 1,
+ .ch28_axi = 1,
+ .normal_axi = 0,
+ .smfc_idmac_12bit_3planar_bs_fixup = false,
+};
+
+static struct ipu_platform_type ipu_type_imx53 = {
+ .devtype = {
+ .name = "IPUv3M",
+ .cm_ofs = 0x06000000,
+ .idmac_ofs = 0x06008000,
+ .ic_ofs = 0x06020000,
+ .csi0_ofs = 0x06030000,
+ .csi1_ofs = 0x06038000,
+ .di0_ofs = 0x06040000,
+ .di1_ofs = 0x06048000,
+ .smfc_ofs = 0x06050000,
+ .dc_ofs = 0x06058000,
+ .dmfc_ofs = 0x06060000,
+ .vdi_ofs = 0x06068000,
+ .cpmem_ofs = 0x07000000,
+ .srm_ofs = 0x07040000,
+ .tpm_ofs = 0x07060000,
+ .dc_tmpl_ofs = 0x07080000,
+ .type = IPUv3M,
+ .idmac_used_bufs_present = true,
+ },
+ .ch0123_axi = 1,
+ .ch23_axi = 1,
+ .ch27_axi = 1,
+ .ch28_axi = 1,
+ .normal_axi = 0,
+ .idmac_used_bufs_en_r = false,
+ .idmac_used_bufs_en_w = false,
+ .smfc_idmac_12bit_3planar_bs_fixup = false,
+};
+
+static struct ipu_platform_type ipu_type_imx6q = {
+ .devtype = {
+ .name = "IPUv3H",
+ .cm_ofs = 0x00200000,
+ .idmac_ofs = 0x00208000,
+ .ic_ofs = 0x00220000,
+ .csi0_ofs = 0x00230000,
+ .csi1_ofs = 0x00238000,
+ .di0_ofs = 0x00240000,
+ .di1_ofs = 0x00248000,
+ .smfc_ofs = 0x00250000,
+ .dc_ofs = 0x00258000,
+ .dmfc_ofs = 0x00260000,
+ .vdi_ofs = 0x00268000,
+ .cpmem_ofs = 0x00300000,
+ .srm_ofs = 0x00340000,
+ .tpm_ofs = 0x00360000,
+ .dc_tmpl_ofs = 0x00380000,
+ .type = IPUv3H,
+ .idmac_used_bufs_present = true,
+ },
+ .ch0123_axi = 0,
+ .ch23_axi = 0,
+ .ch27_axi = 0,
+ .ch28_axi = 0,
+ .normal_axi = 1,
+ .idmac_used_bufs_en_r = false,
+ .idmac_used_bufs_en_w = false,
+ .smfc_idmac_12bit_3planar_bs_fixup = false,
+};
+
+static struct ipu_platform_type ipu_type_imx6qp = {
+ .devtype = {
+ .name = "IPUv3H",
+ .cm_ofs = 0x00200000,
+ .idmac_ofs = 0x00208000,
+ .ic_ofs = 0x00220000,
+ .csi0_ofs = 0x00230000,
+ .csi1_ofs = 0x00238000,
+ .di0_ofs = 0x00240000,
+ .di1_ofs = 0x00248000,
+ .smfc_ofs = 0x00250000,
+ .dc_ofs = 0x00258000,
+ .dmfc_ofs = 0x00260000,
+ .vdi_ofs = 0x00268000,
+ .cpmem_ofs = 0x00300000,
+ .srm_ofs = 0x00340000,
+ .tpm_ofs = 0x00360000,
+ .dc_tmpl_ofs = 0x00380000,
+ .type = IPUv3H,
+ .idmac_used_bufs_present = true,
+ },
+ .ch0123_axi = 0,
+ .ch23_axi = 0,
+ .ch27_axi = 2,
+ .ch28_axi = 3,
+ .normal_axi = 1,
+ .idmac_used_bufs_en_r = true,
+ .idmac_used_bufs_en_w = true,
+ .idmac_used_bufs_max_r = 0x3,
+ .idmac_used_bufs_max_w = 0x3,
+ .smfc_idmac_12bit_3planar_bs_fixup = true,
+ };
+
+static const struct of_device_id imx_ipuv3_dt_ids[] = {
+ { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
+ { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
+ { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
+ { .compatible = "fsl,imx6qp-ipu", .data = &ipu_type_imx6qp, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_ipuv3_dt_ids);
+
+/*!
+ * This function is called by the driver framework to initialize the IPU
+ * hardware.
+ *
+ * @param dev The device structure for the IPU passed in by the
+ * driver framework.
+ *
+ * @return Returns 0 on success or negative error code on error
+ */
+static int ipu_probe(struct platform_device *pdev)
+{
+ struct ipu_soc *ipu;
+ struct resource *res;
+ unsigned long ipu_base;
+ const struct of_device_id *of_id =
+ of_match_device(imx_ipuv3_dt_ids, &pdev->dev);
+ const struct ipu_platform_type *iputype = of_id->data;
+ const struct ipu_devtype *devtype = &iputype->devtype;
+ int ret = 0, id;
+ u32 bypass_reset, reg;
+
+ dev_dbg(&pdev->dev, "<%s>\n", __func__);
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "bypass_reset", &bypass_reset);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "can not get bypass_reset\n");
+ return ret;
+ }
+
+ id = of_alias_get_id(pdev->dev.of_node, "ipu");
+ if (id < 0) {
+ dev_dbg(&pdev->dev, "can not get alias id\n");
+ return id;
+ }
+
+ ipu = &ipu_array[id];
+ memset(ipu, 0, sizeof(struct ipu_soc));
+ ipu->bypass_reset = (bool)bypass_reset;
+ ipu->dev = &pdev->dev;
+ ipu->id = id;
+ ipu->devtype = devtype->type;
+ ipu->ch0123_axi = iputype->ch0123_axi;
+ ipu->ch23_axi = iputype->ch23_axi;
+ ipu->ch27_axi = iputype->ch27_axi;
+ ipu->ch28_axi = iputype->ch28_axi;
+ ipu->normal_axi = iputype->normal_axi;
+ ipu->smfc_idmac_12bit_3planar_bs_fixup =
+ iputype->smfc_idmac_12bit_3planar_bs_fixup;
+ spin_lock_init(&ipu->int_reg_spin_lock);
+ spin_lock_init(&ipu->rdy_reg_spin_lock);
+ mutex_init(&ipu->mutex_lock);
+
+ dev_dbg(&pdev->dev, "revision is %s\n", devtype->name);
+
+ ipu->irq_sync = platform_get_irq(pdev, 0);
+ ipu->irq_err = platform_get_irq(pdev, 1);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res || ipu->irq_sync < 0 || ipu->irq_err < 0) {
+ dev_err(&pdev->dev, "can't get device resources\n");
+ return -ENODEV;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name))
+ return -EBUSY;
+
+ ret = devm_request_irq(&pdev->dev, ipu->irq_sync,
+ ipu_sync_irq_handler, 0, pdev->name, ipu);
+ if (ret) {
+ dev_err(ipu->dev, "request SYNC interrupt failed\n");
+ return ret;
+ }
+ ret = devm_request_irq(&pdev->dev, ipu->irq_err,
+ ipu_err_irq_handler, 0, pdev->name, ipu);
+ if (ret) {
+ dev_err(ipu->dev, "request ERR interrupt failed\n");
+ return ret;
+ }
+
+ ipu_base = res->start;
+
+ ipu->cm_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->cm_ofs, PAGE_SIZE);
+ ipu->ic_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->ic_ofs, PAGE_SIZE);
+ ipu->idmac_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->idmac_ofs, PAGE_SIZE);
+ /* DP Registers are accessed thru the SRM */
+ ipu->dp_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->srm_ofs, PAGE_SIZE);
+ ipu->dc_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->dc_ofs, PAGE_SIZE);
+ ipu->dmfc_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->dmfc_ofs, PAGE_SIZE);
+ ipu->di_reg[0] = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->di0_ofs, PAGE_SIZE);
+ ipu->di_reg[1] = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->di1_ofs, PAGE_SIZE);
+ ipu->smfc_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->smfc_ofs, PAGE_SIZE);
+ ipu->csi_reg[0] = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->csi0_ofs, PAGE_SIZE);
+ ipu->csi_reg[1] = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->csi1_ofs, PAGE_SIZE);
+ ipu->cpmem_base = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->cpmem_ofs, SZ_128K);
+ ipu->tpmem_base = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->tpm_ofs, SZ_64K);
+ ipu->dc_tmpl_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->dc_tmpl_ofs, SZ_128K);
+ ipu->vdi_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->vdi_ofs, PAGE_SIZE);
+ if (!ipu->cm_reg || !ipu->ic_reg || !ipu->idmac_reg ||
+ !ipu->dp_reg || !ipu->dc_reg || !ipu->dmfc_reg ||
+ !ipu->di_reg[0] || !ipu->di_reg[1] || !ipu->smfc_reg ||
+ !ipu->csi_reg[0] || !ipu->csi_reg[1] || !ipu->cpmem_base ||
+ !ipu->tpmem_base || !ipu->dc_tmpl_reg || !ipu->vdi_reg)
+ return -ENOMEM;
+
+ dev_dbg(ipu->dev, "IPU CM Regs = %p\n", ipu->cm_reg);
+ dev_dbg(ipu->dev, "IPU IC Regs = %p\n", ipu->ic_reg);
+ dev_dbg(ipu->dev, "IPU IDMAC Regs = %p\n", ipu->idmac_reg);
+ dev_dbg(ipu->dev, "IPU DP Regs = %p\n", ipu->dp_reg);
+ dev_dbg(ipu->dev, "IPU DC Regs = %p\n", ipu->dc_reg);
+ dev_dbg(ipu->dev, "IPU DMFC Regs = %p\n", ipu->dmfc_reg);
+ dev_dbg(ipu->dev, "IPU DI0 Regs = %p\n", ipu->di_reg[0]);
+ dev_dbg(ipu->dev, "IPU DI1 Regs = %p\n", ipu->di_reg[1]);
+ dev_dbg(ipu->dev, "IPU SMFC Regs = %p\n", ipu->smfc_reg);
+ dev_dbg(ipu->dev, "IPU CSI0 Regs = %p\n", ipu->csi_reg[0]);
+ dev_dbg(ipu->dev, "IPU CSI1 Regs = %p\n", ipu->csi_reg[1]);
+ dev_dbg(ipu->dev, "IPU CPMem = %p\n", ipu->cpmem_base);
+ dev_dbg(ipu->dev, "IPU TPMem = %p\n", ipu->tpmem_base);
+ dev_dbg(ipu->dev, "IPU DC Template Mem = %p\n", ipu->dc_tmpl_reg);
+ dev_dbg(ipu->dev, "IPU VDI Regs = %p\n", ipu->vdi_reg);
+
+ ipu->ipu_clk = devm_clk_get(ipu->dev, "bus");
+ if (IS_ERR(ipu->ipu_clk)) {
+ dev_err(ipu->dev, "clk_get ipu failed");
+ return PTR_ERR(ipu->ipu_clk);
+ }
+
+ /* ipu_clk is always prepared */
+ ret = clk_prepare_enable(ipu->ipu_clk);
+ if (ret < 0) {
+ dev_err(ipu->dev, "ipu clk enable failed\n");
+ return ret;
+ }
+
+ ipu->prg_clk = devm_clk_get(ipu->dev, "prg");
+ if (IS_ERR(ipu->prg_clk))
+ ipu->prg_clk = NULL;
+
+ ipu->online = true;
+
+ platform_set_drvdata(pdev, ipu);
+
+ if (!bypass_reset) {
+ ret = device_reset(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to reset: %d\n", ret);
+ return ret;
+ }
+
+ ipu_mem_reset(ipu);
+
+ ipu_disp_init(ipu);
+
+ /* Set MCU_T to divide MCU access window into 2 */
+ ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
+ IPU_DISP_GEN);
+ }
+
+ /* setup ipu clk tree after ipu reset */
+ ret = ipu_clk_setup_enable(ipu);
+ if (ret < 0) {
+ dev_err(ipu->dev, "ipu clk setup failed\n");
+ ipu->online = false;
+ return ret;
+ }
+
+ if (devtype->idmac_used_bufs_present) {
+ reg = ipu_idmac_read(ipu, IDMAC_CONF);
+ if (iputype->idmac_used_bufs_en_r)
+ reg |= IDMAC_CONF_USED_BUFS_EN_R;
+ else
+ reg &= ~IDMAC_CONF_USED_BUFS_EN_R;
+ if (iputype->idmac_used_bufs_en_w)
+ reg |= IDMAC_CONF_USED_BUFS_EN_W;
+ else
+ reg &= ~IDMAC_CONF_USED_BUFS_EN_W;
+
+ reg &= ~IDMAC_CONF_USED_BUFS_MAX_R_MASK;
+ reg |= (iputype->idmac_used_bufs_max_r <<
+ IDMAC_CONF_USED_BUFS_MAX_R_OFFSET);
+ reg &= ~IDMAC_CONF_USED_BUFS_MAX_W_MASK;
+ reg |= (iputype->idmac_used_bufs_max_w <<
+ IDMAC_CONF_USED_BUFS_MAX_W_OFFSET);
+ ipu_idmac_write(ipu, reg, IDMAC_CONF);
+ }
+
+ /* Set sync refresh channels and CSI->mem channel as high priority */
+ ipu_idmac_write(ipu, 0x18800003L, IDMAC_CHA_PRI(0));
+
+ /* Enable error interrupts by default */
+ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(5));
+ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(6));
+ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(9));
+ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(10));
+
+ if (!bypass_reset)
+ clk_disable(ipu->ipu_clk);
+
+ register_ipu_device(ipu, id);
+
+ pm_runtime_enable(&pdev->dev);
+
+ return ret;
+}
+
+int ipu_remove(struct platform_device *pdev)
+{
+ struct ipu_soc *ipu = platform_get_drvdata(pdev);
+
+ unregister_ipu_device(ipu, ipu->id);
+
+ clk_put(ipu->ipu_clk);
+
+ return 0;
+}
+
+void ipu_dump_registers(struct ipu_soc *ipu)
+{
+ dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n", ipu_cm_read(ipu, IPU_CONF));
+ dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n", ipu_idmac_read(ipu, IDMAC_CONF));
+ dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
+ dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
+ dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
+ dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
+ dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_BAND_EN(ipu->devtype, 0)));
+ dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_BAND_EN(ipu->devtype, 32)));
+ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
+ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
+ if (ipu->devtype >= IPUv3EX) {
+ dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL0 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(ipu->devtype, 0)));
+ dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL1 = \t0x%08X\n",
+ ipu_cm_read(ipu,
+ IPU_CHA_TRB_MODE_SEL(ipu->devtype, 32)));
+ }
+ dev_dbg(ipu->dev, "DMFC_WR_CHAN = \t0x%08X\n",
+ ipu_dmfc_read(ipu, DMFC_WR_CHAN));
+ dev_dbg(ipu->dev, "DMFC_WR_CHAN_DEF = \t0x%08X\n",
+ ipu_dmfc_read(ipu, DMFC_WR_CHAN_DEF));
+ dev_dbg(ipu->dev, "DMFC_DP_CHAN = \t0x%08X\n",
+ ipu_dmfc_read(ipu, DMFC_DP_CHAN));
+ dev_dbg(ipu->dev, "DMFC_DP_CHAN_DEF = \t0x%08X\n",
+ ipu_dmfc_read(ipu, DMFC_DP_CHAN_DEF));
+ dev_dbg(ipu->dev, "DMFC_IC_CTRL = \t0x%08X\n",
+ ipu_dmfc_read(ipu, DMFC_IC_CTRL));
+ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
+ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
+ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
+ dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
+ dev_dbg(ipu->dev, "IPU_VDIC_VDI_FSIZE = \t0x%08X\n",
+ ipu_vdi_read(ipu, VDI_FSIZE));
+ dev_dbg(ipu->dev, "IPU_VDIC_VDI_C = \t0x%08X\n",
+ ipu_vdi_read(ipu, VDI_C));
+ dev_dbg(ipu->dev, "IPU_IC_CONF = \t0x%08X\n",
+ ipu_ic_read(ipu, IC_CONF));
+}
+
+/*!
+ * This function is called to initialize a logical IPU channel.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID to init.
+ *
+ * @param params Input parameter containing union of channel
+ * initialization parameters.
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
+{
+ int ret = 0;
+ bool bad_pixfmt;
+ uint32_t ipu_conf, reg, in_g_pixel_fmt, sec_dma;
+
+ dev_dbg(ipu->dev, "init channel = %d\n", IPU_CHAN_ID(channel));
+
+ ret = pm_runtime_get_sync(ipu->dev);
+ if (ret < 0) {
+ dev_err(ipu->dev, "ch = %d, pm_runtime_get failed:%d!\n",
+ IPU_CHAN_ID(channel), ret);
+ dump_stack();
+ return ret;
+ }
+ /*
+ * Here, ret could be 1 if the device's runtime PM status was
+ * already 'active', so clear it to be 0.
+ */
+ ret = 0;
+
+ _ipu_get(ipu);
+
+ mutex_lock(&ipu->mutex_lock);
+
+ /* Re-enable error interrupts every time a channel is initialized */
+ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(5));
+ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(6));
+ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(9));
+ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(10));
+
+ if (ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) {
+ dev_warn(ipu->dev, "Warning: channel already initialized %d\n",
+ IPU_CHAN_ID(channel));
+ }
+
+ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
+
+ switch (channel) {
+ case CSI_MEM0:
+ case CSI_MEM1:
+ case CSI_MEM2:
+ case CSI_MEM3:
+ if (params->csi_mem.csi > 1) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (params->csi_mem.interlaced)
+ ipu->chan_is_interlaced[channel_2_dma(channel,
+ IPU_OUTPUT_BUFFER)] = true;
+ else
+ ipu->chan_is_interlaced[channel_2_dma(channel,
+ IPU_OUTPUT_BUFFER)] = false;
+
+ ipu->smfc_use_count++;
+ ipu->csi_channel[params->csi_mem.csi] = channel;
+
+ /*SMFC setting*/
+ if (params->csi_mem.mipi_en) {
+ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
+ params->csi_mem.csi));
+ _ipu_smfc_init(ipu, channel, params->csi_mem.mipi_vc,
+ params->csi_mem.csi);
+ _ipu_csi_set_mipi_di(ipu, params->csi_mem.mipi_vc,
+ params->csi_mem.mipi_id, params->csi_mem.csi);
+ } else {
+ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
+ params->csi_mem.csi));
+ _ipu_smfc_init(ipu, channel, 0, params->csi_mem.csi);
+ }
+
+ /*CSI data (include compander) dest*/
+ _ipu_csi_init(ipu, channel, params->csi_mem.csi);
+ break;
+ case CSI_PRP_ENC_MEM:
+ if (params->csi_prp_enc_mem.csi > 1) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if ((ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
+ (ipu->using_ic_dirct_ch == MEM_VDI_MEM)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ipu->using_ic_dirct_ch = CSI_PRP_ENC_MEM;
+
+ ipu->ic_use_count++;
+ ipu->csi_channel[params->csi_prp_enc_mem.csi] = channel;
+
+ if (params->csi_prp_enc_mem.mipi_en) {
+ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
+ params->csi_prp_enc_mem.csi));
+ _ipu_csi_set_mipi_di(ipu,
+ params->csi_prp_enc_mem.mipi_vc,
+ params->csi_prp_enc_mem.mipi_id,
+ params->csi_prp_enc_mem.csi);
+ } else
+ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
+ params->csi_prp_enc_mem.csi));
+
+ /*CSI0/1 feed into IC*/
+ ipu_conf &= ~IPU_CONF_IC_INPUT;
+ if (params->csi_prp_enc_mem.csi)
+ ipu_conf |= IPU_CONF_CSI_SEL;
+ else
+ ipu_conf &= ~IPU_CONF_CSI_SEL;
+
+ /*PRP skip buffer in memory, only valid when RWS_EN is true*/
+ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
+ ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
+
+ /*CSI data (include compander) dest*/
+ _ipu_csi_init(ipu, channel, params->csi_prp_enc_mem.csi);
+ _ipu_ic_init_prpenc(ipu, params, true);
+ break;
+ case CSI_PRP_VF_MEM:
+ if (params->csi_prp_vf_mem.csi > 1) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if ((ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
+ (ipu->using_ic_dirct_ch == MEM_VDI_MEM)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ipu->using_ic_dirct_ch = CSI_PRP_VF_MEM;
+
+ ipu->ic_use_count++;
+ ipu->csi_channel[params->csi_prp_vf_mem.csi] = channel;
+
+ if (params->csi_prp_vf_mem.mipi_en) {
+ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
+ params->csi_prp_vf_mem.csi));
+ _ipu_csi_set_mipi_di(ipu,
+ params->csi_prp_vf_mem.mipi_vc,
+ params->csi_prp_vf_mem.mipi_id,
+ params->csi_prp_vf_mem.csi);
+ } else
+ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
+ params->csi_prp_vf_mem.csi));
+
+ /*CSI0/1 feed into IC*/
+ ipu_conf &= ~IPU_CONF_IC_INPUT;
+ if (params->csi_prp_vf_mem.csi)
+ ipu_conf |= IPU_CONF_CSI_SEL;
+ else
+ ipu_conf &= ~IPU_CONF_CSI_SEL;
+
+ /*PRP skip buffer in memory, only valid when RWS_EN is true*/
+ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
+ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
+
+ /*CSI data (include compander) dest*/
+ _ipu_csi_init(ipu, channel, params->csi_prp_vf_mem.csi);
+ _ipu_ic_init_prpvf(ipu, params, true);
+ break;
+ case MEM_PRP_VF_MEM:
+ if (params->mem_prp_vf_mem.graphics_combine_en) {
+ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
+ in_g_pixel_fmt = params->mem_prp_vf_mem.in_g_pixel_fmt;
+ bad_pixfmt =
+ _ipu_ch_param_bad_alpha_pos(in_g_pixel_fmt);
+
+ if (params->mem_prp_vf_mem.alpha_chan_en) {
+ if (bad_pixfmt) {
+ dev_err(ipu->dev, "bad pixel format "
+ "for graphics plane from "
+ "ch%d\n", sec_dma);
+ ret = -EINVAL;
+ goto err;
+ }
+ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
+ }
+ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
+ }
+
+ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
+ ipu_cm_write(ipu, reg | FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
+
+ _ipu_ic_init_prpvf(ipu, params, false);
+ ipu->ic_use_count++;
+ break;
+ case MEM_VDI_PRP_VF_MEM:
+ if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
+ (ipu->using_ic_dirct_ch == MEM_VDI_MEM) ||
+ (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ipu->using_ic_dirct_ch = MEM_VDI_PRP_VF_MEM;
+ ipu->ic_use_count++;
+ ipu->vdi_use_count++;
+ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
+ reg &= ~FS_VDI_SRC_SEL_MASK;
+ ipu_cm_write(ipu, reg , IPU_FS_PROC_FLOW1);
+
+ if (params->mem_prp_vf_mem.graphics_combine_en)
+ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
+ _ipu_ic_init_prpvf(ipu, params, false);
+ _ipu_vdi_init(ipu, channel, params);
+ break;
+ case MEM_VDI_PRP_VF_MEM_P:
+ case MEM_VDI_PRP_VF_MEM_N:
+ case MEM_VDI_MEM_P:
+ case MEM_VDI_MEM_N:
+ _ipu_vdi_init(ipu, channel, params);
+ break;
+ case MEM_VDI_MEM:
+ if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
+ (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
+ (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ipu->using_ic_dirct_ch = MEM_VDI_MEM;
+ ipu->ic_use_count++;
+ ipu->vdi_use_count++;
+ _ipu_vdi_init(ipu, channel, params);
+ break;
+ case MEM_ROT_VF_MEM:
+ ipu->ic_use_count++;
+ ipu->rot_use_count++;
+ _ipu_ic_init_rotate_vf(ipu, params);
+ break;
+ case MEM_PRP_ENC_MEM:
+ ipu->ic_use_count++;
+ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
+ ipu_cm_write(ipu, reg | FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
+ _ipu_ic_init_prpenc(ipu, params, false);
+ break;
+ case MEM_ROT_ENC_MEM:
+ ipu->ic_use_count++;
+ ipu->rot_use_count++;
+ _ipu_ic_init_rotate_enc(ipu, params);
+ break;
+ case MEM_PP_MEM:
+ if (params->mem_pp_mem.graphics_combine_en) {
+ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
+ in_g_pixel_fmt = params->mem_pp_mem.in_g_pixel_fmt;
+ bad_pixfmt =
+ _ipu_ch_param_bad_alpha_pos(in_g_pixel_fmt);
+
+ if (params->mem_pp_mem.alpha_chan_en) {
+ if (bad_pixfmt) {
+ dev_err(ipu->dev, "bad pixel format "
+ "for graphics plane from "
+ "ch%d\n", sec_dma);
+ ret = -EINVAL;
+ goto err;
+ }
+ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
+ }
+
+ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
+ }
+
+ _ipu_ic_init_pp(ipu, params);
+ ipu->ic_use_count++;
+ break;
+ case MEM_ROT_PP_MEM:
+ _ipu_ic_init_rotate_pp(ipu, params);
+ ipu->ic_use_count++;
+ ipu->rot_use_count++;
+ break;
+ case MEM_DC_SYNC:
+ if (params->mem_dc_sync.di > 1) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ipu->dc_di_assignment[1] = params->mem_dc_sync.di;
+ _ipu_dc_init(ipu, 1, params->mem_dc_sync.di,
+ params->mem_dc_sync.interlaced,
+ params->mem_dc_sync.out_pixel_fmt);
+ ipu->di_use_count[params->mem_dc_sync.di]++;
+ ipu->dc_use_count++;
+ ipu->dmfc_use_count++;
+ break;
+ case MEM_BG_SYNC:
+ if (params->mem_dp_bg_sync.di > 1) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (params->mem_dp_bg_sync.alpha_chan_en)
+ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
+
+ ipu->dc_di_assignment[5] = params->mem_dp_bg_sync.di;
+ _ipu_dp_init(ipu, channel, params->mem_dp_bg_sync.in_pixel_fmt,
+ params->mem_dp_bg_sync.out_pixel_fmt);
+ _ipu_dc_init(ipu, 5, params->mem_dp_bg_sync.di,
+ params->mem_dp_bg_sync.interlaced,
+ params->mem_dp_bg_sync.out_pixel_fmt);
+ ipu->di_use_count[params->mem_dp_bg_sync.di]++;
+ ipu->dc_use_count++;
+ ipu->dp_use_count++;
+ ipu->dmfc_use_count++;
+ break;
+ case MEM_FG_SYNC:
+ _ipu_dp_init(ipu, channel, params->mem_dp_fg_sync.in_pixel_fmt,
+ params->mem_dp_fg_sync.out_pixel_fmt);
+
+ if (params->mem_dp_fg_sync.alpha_chan_en)
+ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
+
+ ipu->dc_use_count++;
+ ipu->dp_use_count++;
+ ipu->dmfc_use_count++;
+ break;
+ case DIRECT_ASYNC0:
+ if (params->direct_async.di > 1) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ipu->dc_di_assignment[8] = params->direct_async.di;
+ _ipu_dc_init(ipu, 8, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
+ ipu->di_use_count[params->direct_async.di]++;
+ ipu->dc_use_count++;
+ break;
+ case DIRECT_ASYNC1:
+ if (params->direct_async.di > 1) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ipu->dc_di_assignment[9] = params->direct_async.di;
+ _ipu_dc_init(ipu, 9, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
+ ipu->di_use_count[params->direct_async.di]++;
+ ipu->dc_use_count++;
+ break;
+ default:
+ dev_err(ipu->dev, "Missing channel initialization\n");
+ break;
+ }
+
+ ipu->channel_init_mask |= 1L << IPU_CHAN_ID(channel);
+
+ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
+
+err:
+ mutex_unlock(&ipu->mutex_lock);
+ return ret;
+}
+EXPORT_SYMBOL(ipu_init_channel);
+
+/*!
+ * This function is called to uninitialize a logical IPU channel.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID to uninit.
+ */
+void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel)
+{
+ uint32_t reg;
+ uint32_t in_dma, out_dma = 0;
+ uint32_t ipu_conf;
+ uint32_t dc_chan = 0;
+ int ret;
+
+ mutex_lock(&ipu->mutex_lock);
+
+ if ((ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
+ dev_dbg(ipu->dev, "Channel already uninitialized %d\n",
+ IPU_CHAN_ID(channel));
+ mutex_unlock(&ipu->mutex_lock);
+ return;
+ }
+
+ /* Make sure channel is disabled */
+ /* Get input and output dma channels */
+ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
+ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
+
+ if (idma_is_set(ipu, IDMAC_CHA_EN(in_dma), in_dma) ||
+ idma_is_set(ipu, IDMAC_CHA_EN(out_dma), out_dma)) {
+ dev_err(ipu->dev,
+ "Channel %d is not disabled, disable first\n",
+ IPU_CHAN_ID(channel));
+ mutex_unlock(&ipu->mutex_lock);
+ return;
+ }
+
+ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
+
+ /* Reset the double buffer */
+ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(in_dma));
+ ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_DB_MODE_SEL(in_dma));
+ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(out_dma));
+ ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_DB_MODE_SEL(out_dma));
+
+ /* Reset the triple buffer */
+ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(ipu->devtype, in_dma));
+ ipu_cm_write(ipu, reg & ~idma_mask(in_dma),
+ IPU_CHA_TRB_MODE_SEL(ipu->devtype, in_dma));
+ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(ipu->devtype, out_dma));
+ ipu_cm_write(ipu, reg & ~idma_mask(out_dma),
+ IPU_CHA_TRB_MODE_SEL(ipu->devtype, out_dma));
+
+ if (_ipu_is_ic_chan(in_dma) || _ipu_is_dp_graphic_chan(in_dma)) {
+ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = false;
+ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = false;
+ }
+
+ switch (channel) {
+ case CSI_MEM0:
+ case CSI_MEM1:
+ case CSI_MEM2:
+ case CSI_MEM3:
+ ipu->smfc_use_count--;
+ if (ipu->csi_channel[0] == channel) {
+ ipu->csi_channel[0] = CHAN_NONE;
+ } else if (ipu->csi_channel[1] == channel) {
+ ipu->csi_channel[1] = CHAN_NONE;
+ }
+ break;
+ case CSI_PRP_ENC_MEM:
+ ipu->ic_use_count--;
+ if (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)
+ ipu->using_ic_dirct_ch = 0;
+ _ipu_ic_uninit_prpenc(ipu);
+ if (ipu->csi_channel[0] == channel) {
+ ipu->csi_channel[0] = CHAN_NONE;
+ } else if (ipu->csi_channel[1] == channel) {
+ ipu->csi_channel[1] = CHAN_NONE;
+ }
+ break;
+ case CSI_PRP_VF_MEM:
+ ipu->ic_use_count--;
+ if (ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM)
+ ipu->using_ic_dirct_ch = 0;
+ _ipu_ic_uninit_prpvf(ipu);
+ if (ipu->csi_channel[0] == channel) {
+ ipu->csi_channel[0] = CHAN_NONE;
+ } else if (ipu->csi_channel[1] == channel) {
+ ipu->csi_channel[1] = CHAN_NONE;
+ }
+ break;
+ case MEM_PRP_VF_MEM:
+ ipu->ic_use_count--;
+ _ipu_ic_uninit_prpvf(ipu);
+ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
+ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
+ break;
+ case MEM_VDI_PRP_VF_MEM:
+ ipu->ic_use_count--;
+ ipu->vdi_use_count--;
+ if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM)
+ ipu->using_ic_dirct_ch = 0;
+ _ipu_ic_uninit_prpvf(ipu);
+ _ipu_vdi_uninit(ipu);
+ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
+ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
+ break;
+ case MEM_VDI_MEM:
+ ipu->ic_use_count--;
+ ipu->vdi_use_count--;
+ if (ipu->using_ic_dirct_ch == MEM_VDI_MEM)
+ ipu->using_ic_dirct_ch = 0;
+ _ipu_vdi_uninit(ipu);
+ break;
+ case MEM_VDI_PRP_VF_MEM_P:
+ case MEM_VDI_PRP_VF_MEM_N:
+ case MEM_VDI_MEM_P:
+ case MEM_VDI_MEM_N:
+ break;
+ case MEM_ROT_VF_MEM:
+ ipu->rot_use_count--;
+ ipu->ic_use_count--;
+ _ipu_ic_uninit_rotate_vf(ipu);
+ break;
+ case MEM_PRP_ENC_MEM:
+ ipu->ic_use_count--;
+ _ipu_ic_uninit_prpenc(ipu);
+ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
+ ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
+ break;
+ case MEM_ROT_ENC_MEM:
+ ipu->rot_use_count--;
+ ipu->ic_use_count--;
+ _ipu_ic_uninit_rotate_enc(ipu);
+ break;
+ case MEM_PP_MEM:
+ ipu->ic_use_count--;
+ _ipu_ic_uninit_pp(ipu);
+ break;
+ case MEM_ROT_PP_MEM:
+ ipu->rot_use_count--;
+ ipu->ic_use_count--;
+ _ipu_ic_uninit_rotate_pp(ipu);
+ break;
+ case MEM_DC_SYNC:
+ dc_chan = 1;
+ _ipu_dc_uninit(ipu, 1);
+ ipu->di_use_count[ipu->dc_di_assignment[1]]--;
+ ipu->dc_use_count--;
+ ipu->dmfc_use_count--;
+ break;
+ case MEM_BG_SYNC:
+ dc_chan = 5;
+ _ipu_dp_uninit(ipu, channel);
+ _ipu_dc_uninit(ipu, 5);
+ ipu->di_use_count[ipu->dc_di_assignment[5]]--;
+ ipu->dc_use_count--;
+ ipu->dp_use_count--;
+ ipu->dmfc_use_count--;
+ break;
+ case MEM_FG_SYNC:
+ _ipu_dp_uninit(ipu, channel);
+ ipu->dc_use_count--;
+ ipu->dp_use_count--;
+ ipu->dmfc_use_count--;
+ break;
+ case DIRECT_ASYNC0:
+ dc_chan = 8;
+ _ipu_dc_uninit(ipu, 8);
+ ipu->di_use_count[ipu->dc_di_assignment[8]]--;
+ ipu->dc_use_count--;
+ break;
+ case DIRECT_ASYNC1:
+ dc_chan = 9;
+ _ipu_dc_uninit(ipu, 9);
+ ipu->di_use_count[ipu->dc_di_assignment[9]]--;
+ ipu->dc_use_count--;
+ break;
+ default:
+ break;
+ }
+
+ if (ipu->ic_use_count == 0)
+ ipu_conf &= ~IPU_CONF_IC_EN;
+ if (ipu->vdi_use_count == 0) {
+ ipu_conf &= ~IPU_CONF_ISP_EN;
+ ipu_conf &= ~IPU_CONF_VDI_EN;
+ ipu_conf &= ~IPU_CONF_IC_INPUT;
+ }
+ if (ipu->rot_use_count == 0)
+ ipu_conf &= ~IPU_CONF_ROT_EN;
+ if (ipu->dc_use_count == 0)
+ ipu_conf &= ~IPU_CONF_DC_EN;
+ if (ipu->dp_use_count == 0)
+ ipu_conf &= ~IPU_CONF_DP_EN;
+ if (ipu->dmfc_use_count == 0)
+ ipu_conf &= ~IPU_CONF_DMFC_EN;
+ if (ipu->di_use_count[0] == 0) {
+ ipu_conf &= ~IPU_CONF_DI0_EN;
+ }
+ if (ipu->di_use_count[1] == 0) {
+ ipu_conf &= ~IPU_CONF_DI1_EN;
+ }
+ if (ipu->smfc_use_count == 0)
+ ipu_conf &= ~IPU_CONF_SMFC_EN;
+
+ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
+
+ ipu->channel_init_mask &= ~(1L << IPU_CHAN_ID(channel));
+
+ /*
+ * Disable pixel clk and its parent clock(if the parent clock
+ * usecount is 1) after clearing DC/DP/DI bits in IPU_CONF
+ * register to prevent LVDS display channel starvation.
+ */
+ if (_ipu_is_primary_disp_chan(in_dma) &&
+ ipu->pixel_clk_en[ipu->dc_di_assignment[dc_chan]]) {
+ clk_disable_unprepare(ipu->pixel_clk[ipu->dc_di_assignment[dc_chan]]);
+ ipu->pixel_clk_en[ipu->dc_di_assignment[dc_chan]] = false;
+ }
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ _ipu_put(ipu);
+
+ ret = pm_runtime_put_sync_suspend(ipu->dev);
+ if (ret < 0) {
+ dev_err(ipu->dev, "ch = %d, pm_runtime_put failed:%d!\n",
+ IPU_CHAN_ID(channel), ret);
+ dump_stack();
+ }
+
+ WARN_ON(ipu->ic_use_count < 0);
+ WARN_ON(ipu->vdi_use_count < 0);
+ WARN_ON(ipu->rot_use_count < 0);
+ WARN_ON(ipu->dc_use_count < 0);
+ WARN_ON(ipu->dp_use_count < 0);
+ WARN_ON(ipu->dmfc_use_count < 0);
+ WARN_ON(ipu->smfc_use_count < 0);
+}
+EXPORT_SYMBOL(ipu_uninit_channel);
+
+/*!
+ * This function is called to initialize buffer(s) for logical IPU channel.
+ *
+ * @param ipu ipu handler
+ *
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param type Input parameter which buffer to initialize.
+ *
+ * @param pixel_fmt Input parameter for pixel format of buffer.
+ * Pixel format is a FOURCC ASCII code.
+ *
+ * @param width Input parameter for width of buffer in pixels.
+ *
+ * @param height Input parameter for height of buffer in pixels.
+ *
+ * @param stride Input parameter for stride length of buffer
+ * in pixels.
+ *
+ * @param rot_mode Input parameter for rotation setting of buffer.
+ * A rotation setting other than
+ * IPU_ROTATE_VERT_FLIP
+ * should only be used for input buffers of
+ * rotation channels.
+ *
+ * @param phyaddr_0 Input parameter buffer 0 physical address.
+ *
+ * @param phyaddr_1 Input parameter buffer 1 physical address.
+ * Setting this to a value other than NULL enables
+ * double buffering mode.
+ *
+ * @param phyaddr_2 Input parameter buffer 2 physical address.
+ * Setting this to a value other than NULL enables
+ * triple buffering mode, phyaddr_1 should not be
+ * NULL then.
+ *
+ * @param u private u offset for additional cropping,
+ * zero if not used.
+ *
+ * @param v private v offset for additional cropping,
+ * zero if not used.
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
+ ipu_buffer_t type,
+ uint32_t pixel_fmt,
+ uint16_t width, uint16_t height,
+ uint32_t stride,
+ ipu_rotate_mode_t rot_mode,
+ dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
+ dma_addr_t phyaddr_2,
+ uint32_t u, uint32_t v)
+{
+ uint32_t reg;
+ uint32_t dma_chan;
+ uint32_t burst_size;
+
+ dma_chan = channel_2_dma(channel, type);
+ if (!idma_is_valid(dma_chan))
+ return -EINVAL;
+
+ if (stride < width * bytes_per_pixel(pixel_fmt))
+ stride = width * bytes_per_pixel(pixel_fmt);
+
+ if (stride % 4) {
+ dev_err(ipu->dev,
+ "Stride not 32-bit aligned, stride = %d\n", stride);
+ return -EINVAL;
+ }
+ /* IC & IRT channels' width must be multiple of 8 pixels */
+ if ((_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan))
+ && (width % 8)) {
+ dev_err(ipu->dev, "Width must be 8 pixel multiple\n");
+ return -EINVAL;
+ }
+
+ if (_ipu_is_vdi_out_chan(dma_chan) &&
+ ((width < 16) || (height < 16) || (width % 2) || (height % 4))) {
+ dev_err(ipu->dev, "vdi width/height limited err\n");
+ return -EINVAL;
+ }
+
+ /* IPUv3EX and IPUv3M support triple buffer */
+ if ((!_ipu_is_trb_chan(ipu, dma_chan)) && phyaddr_2) {
+ dev_err(ipu->dev, "Chan%d doesn't support triple buffer "
+ "mode\n", dma_chan);
+ return -EINVAL;
+ }
+ if (!phyaddr_1 && phyaddr_2) {
+ dev_err(ipu->dev, "Chan%d's buf1 physical addr is NULL for "
+ "triple buffer mode\n", dma_chan);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipu->mutex_lock);
+
+ /* Build parameter memory data for DMA channel */
+ _ipu_ch_param_init(ipu, dma_chan, pixel_fmt, width, height, stride, u, v, 0,
+ phyaddr_0, phyaddr_1, phyaddr_2);
+
+ /* Set correlative channel parameter of local alpha channel */
+ if ((_ipu_is_ic_graphic_chan(dma_chan) ||
+ _ipu_is_dp_graphic_chan(dma_chan)) &&
+ (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] == true)) {
+ _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, true);
+ _ipu_ch_param_set_alpha_buffer_memory(ipu, dma_chan);
+ _ipu_ch_param_set_alpha_condition_read(ipu, dma_chan);
+ /* fix alpha width as 8 and burst size as 16*/
+ _ipu_ch_params_set_alpha_width(ipu, dma_chan, 8);
+ _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
+ } else if (_ipu_is_ic_graphic_chan(dma_chan) &&
+ ipu_pixel_format_has_alpha(pixel_fmt))
+ _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, false);
+
+ if (rot_mode)
+ _ipu_ch_param_set_rotation(ipu, dma_chan, rot_mode);
+
+ /* IC and ROT channels have restriction of 8 or 16 pix burst length */
+ if (_ipu_is_ic_chan(dma_chan) || _ipu_is_vdi_out_chan(dma_chan)) {
+ if ((width % 16) == 0)
+ _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
+ else
+ _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
+ } else if (_ipu_is_irt_chan(dma_chan)) {
+ _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
+ _ipu_ch_param_set_block_mode(ipu, dma_chan);
+ } else if (_ipu_is_dmfc_chan(dma_chan)) {
+ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
+ _ipu_dmfc_set_wait4eot(ipu, dma_chan, width);
+ _ipu_dmfc_set_burst_size(ipu, dma_chan, burst_size);
+ }
+
+ if (_ipu_disp_chan_is_interlaced(ipu, channel) ||
+ ipu->chan_is_interlaced[dma_chan])
+ _ipu_ch_param_set_interlaced_scan(ipu, dma_chan);
+
+ if (_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan) ||
+ _ipu_is_vdi_out_chan(dma_chan)) {
+ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
+ _ipu_ic_idma_init(ipu, dma_chan, width, height, burst_size,
+ rot_mode);
+ } else if (_ipu_is_smfc_chan(dma_chan)) {
+ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
+ /*
+ * This is different from IPUv3 spec, but it is confirmed
+ * in IPUforum that SMFC burst size should be NPB[6:3]
+ * when IDMAC works in 16-bit generic data mode.
+ */
+ if (pixel_fmt == IPU_PIX_FMT_GENERIC)
+ /* 8 bits per pixel */
+ burst_size = burst_size >> 4;
+ else if (pixel_fmt == IPU_PIX_FMT_GENERIC_16)
+ /* 16 bits per pixel */
+ burst_size = burst_size >> 3;
+ else
+ burst_size = burst_size >> 2;
+ _ipu_smfc_set_burst_size(ipu, channel, burst_size-1);
+ }
+
+ switch (dma_chan) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ _ipu_ch_param_set_axi_id(ipu, dma_chan, ipu->ch0123_axi);
+ break;
+ case 23:
+ _ipu_ch_param_set_axi_id(ipu, dma_chan, ipu->ch23_axi);
+ break;
+ case 27:
+ _ipu_ch_param_set_axi_id(ipu, dma_chan, ipu->ch27_axi);
+ break;
+ case 28:
+ _ipu_ch_param_set_axi_id(ipu, dma_chan, ipu->ch28_axi);
+ break;
+ default:
+ _ipu_ch_param_set_axi_id(ipu, dma_chan, ipu->normal_axi);
+ break;
+ }
+
+ if (idma_is_set(ipu, IDMAC_CHA_PRI(dma_chan), dma_chan) &&
+ ipu->devtype == IPUv3H) {
+ uint32_t reg = IDMAC_CH_LOCK_EN_1(ipu->devtype);
+ uint32_t value = 0;
+
+ switch (dma_chan) {
+ case 5:
+ value = 0x3;
+ break;
+ case 11:
+ value = 0x3 << 2;
+ break;
+ case 12:
+ value = 0x3 << 4;
+ break;
+ case 14:
+ value = 0x3 << 6;
+ break;
+ case 15:
+ value = 0x3 << 8;
+ break;
+ case 20:
+ value = 0x3 << 10;
+ break;
+ case 21:
+ value = 0x3 << 12;
+ break;
+ case 22:
+ value = 0x3 << 14;
+ break;
+ case 23:
+ value = 0x3 << 16;
+ break;
+ case 27:
+ value = 0x3 << 18;
+ break;
+ case 28:
+ value = 0x3 << 20;
+ break;
+ case 45:
+ reg = IDMAC_CH_LOCK_EN_2(ipu->devtype);
+ value = 0x3 << 0;
+ break;
+ case 46:
+ reg = IDMAC_CH_LOCK_EN_2(ipu->devtype);
+ value = 0x3 << 2;
+ break;
+ case 47:
+ reg = IDMAC_CH_LOCK_EN_2(ipu->devtype);
+ value = 0x3 << 4;
+ break;
+ case 48:
+ reg = IDMAC_CH_LOCK_EN_2(ipu->devtype);
+ value = 0x3 << 6;
+ break;
+ case 49:
+ reg = IDMAC_CH_LOCK_EN_2(ipu->devtype);
+ value = 0x3 << 8;
+ break;
+ case 50:
+ reg = IDMAC_CH_LOCK_EN_2(ipu->devtype);
+ value = 0x3 << 10;
+ break;
+ default:
+ break;
+ }
+ value |= ipu_idmac_read(ipu, reg);
+ ipu_idmac_write(ipu, value, reg);
+ }
+
+ _ipu_ch_param_dump(ipu, dma_chan);
+
+ if (phyaddr_2 && ipu->devtype >= IPUv3EX) {
+ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
+ reg &= ~idma_mask(dma_chan);
+ ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
+
+ reg = ipu_cm_read(ipu,
+ IPU_CHA_TRB_MODE_SEL(ipu->devtype, dma_chan));
+ reg |= idma_mask(dma_chan);
+ ipu_cm_write(ipu, reg,
+ IPU_CHA_TRB_MODE_SEL(ipu->devtype, dma_chan));
+
+ /* Set IDMAC third buffer's cpmem number */
+ /* See __ipu_ch_get_third_buf_cpmem_num() for mapping */
+ ipu_idmac_write(ipu, 0x00444047L,
+ IDMAC_SUB_ADDR_4(ipu->devtype));
+ ipu_idmac_write(ipu, 0x46004241L,
+ IDMAC_SUB_ADDR_3(ipu->devtype));
+ ipu_idmac_write(ipu, 0x00000045L,
+ IDMAC_SUB_ADDR_1(ipu->devtype));
+
+ /* Reset to buffer 0 */
+ ipu_cm_write(ipu, tri_cur_buf_mask(dma_chan),
+ IPU_CHA_TRIPLE_CUR_BUF(ipu->devtype, dma_chan));
+ } else {
+ reg = ipu_cm_read(ipu,
+ IPU_CHA_TRB_MODE_SEL(ipu->devtype, dma_chan));
+ reg &= ~idma_mask(dma_chan);
+ ipu_cm_write(ipu, reg,
+ IPU_CHA_TRB_MODE_SEL(ipu->devtype, dma_chan));
+
+ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
+ if (phyaddr_1)
+ reg |= idma_mask(dma_chan);
+ else
+ reg &= ~idma_mask(dma_chan);
+ ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
+
+ /* Reset to buffer 0 */
+ ipu_cm_write(ipu, idma_mask(dma_chan),
+ IPU_CHA_CUR_BUF(ipu->devtype, dma_chan));
+
+ }
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_init_channel_buffer);
+
+/*!
+ * This function is called to update the physical address of a buffer for
+ * a logical IPU channel.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param type Input parameter which buffer to initialize.
+ *
+ * @param bufNum Input parameter for buffer number to update.
+ * 0 or 1 are the only valid values.
+ *
+ * @param phyaddr Input parameter buffer physical address.
+ *
+ * @return This function returns 0 on success or negative error code on
+ * fail. This function will fail if the buffer is set to ready.
+ */
+int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
+ ipu_buffer_t type, uint32_t bufNum, dma_addr_t phyaddr)
+{
+ uint32_t reg;
+ int ret = 0;
+ uint32_t dma_chan = channel_2_dma(channel, type);
+ unsigned long lock_flags;
+
+ if (dma_chan == IDMA_CHAN_INVALID)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
+ if (bufNum == 0)
+ reg = ipu_cm_read(ipu,
+ IPU_CHA_BUF0_RDY(ipu->devtype, dma_chan));
+ else if (bufNum == 1)
+ reg = ipu_cm_read(ipu,
+ IPU_CHA_BUF1_RDY(ipu->devtype, dma_chan));
+ else
+ reg = ipu_cm_read(ipu,
+ IPU_CHA_BUF2_RDY(ipu->devtype, dma_chan));
+
+ if ((reg & idma_mask(dma_chan)) == 0)
+ _ipu_ch_param_set_buffer(ipu, dma_chan, bufNum, phyaddr);
+ else
+ ret = -EACCES;
+ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipu_update_channel_buffer);
+
+/*!
+ * This function is called to update the band mode setting for
+ * a logical IPU channel.
+ *
+ * @param ipu ipu handler
+ *
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param type Input parameter which buffer to initialize.
+ *
+ * @param band_height Input parameter for band lines:
+ * shoule be log2(4/8/16/32/64/128/256).
+ *
+ * @return This function returns 0 on success or negative error code on
+ * fail.
+ */
+int32_t ipu_set_channel_bandmode(struct ipu_soc *ipu, ipu_channel_t channel,
+ ipu_buffer_t type, uint32_t band_height)
+{
+ uint32_t reg;
+ int ret = 0;
+ uint32_t dma_chan = channel_2_dma(channel, type);
+
+ if ((2 > band_height) || (8 < band_height))
+ return -EINVAL;
+
+ mutex_lock(&ipu->mutex_lock);
+
+ reg = ipu_idmac_read(ipu, IDMAC_BAND_EN(ipu->devtype, dma_chan));
+ reg |= 1 << (dma_chan % 32);
+ ipu_idmac_write(ipu, reg, IDMAC_BAND_EN(ipu->devtype, dma_chan));
+
+ _ipu_ch_param_set_bandmode(ipu, dma_chan, band_height);
+ dev_dbg(ipu->dev, "dma_chan:%d, band_height:%d.\n\n",
+ dma_chan, 1 << band_height);
+ mutex_unlock(&ipu->mutex_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipu_set_channel_bandmode);
+
+/*!
+ * This function is called to initialize a buffer for logical IPU channel.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param type Input parameter which buffer to initialize.
+ *
+ * @param pixel_fmt Input parameter for pixel format of buffer.
+ * Pixel format is a FOURCC ASCII code.
+ *
+ * @param width Input parameter for width of buffer in pixels.
+ *
+ * @param height Input parameter for height of buffer in pixels.
+ *
+ * @param stride Input parameter for stride length of buffer
+ * in pixels.
+ *
+ * @param u predefined private u offset for additional cropping,
+ * zero if not used.
+ *
+ * @param v predefined private v offset for additional cropping,
+ * zero if not used.
+ *
+ * @param vertical_offset vertical offset for Y coordinate
+ * in the existed frame
+ *
+ *
+ * @param horizontal_offset horizontal offset for X coordinate
+ * in the existed frame
+ *
+ *
+ * @return Returns 0 on success or negative error code on fail
+ * This function will fail if any buffer is set to ready.
+ */
+
+int32_t ipu_update_channel_offset(struct ipu_soc *ipu,
+ ipu_channel_t channel, ipu_buffer_t type,
+ uint32_t pixel_fmt,
+ uint16_t width, uint16_t height,
+ uint32_t stride,
+ uint32_t u, uint32_t v,
+ uint32_t vertical_offset, uint32_t horizontal_offset)
+{
+ int ret = 0;
+ uint32_t dma_chan = channel_2_dma(channel, type);
+ unsigned long lock_flags;
+
+ if (dma_chan == IDMA_CHAN_INVALID)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
+ if ((ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(ipu->devtype, dma_chan)) &
+ idma_mask(dma_chan)) ||
+ (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(ipu->devtype, dma_chan)) &
+ idma_mask(dma_chan)) ||
+ ((ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(ipu->devtype, dma_chan)) &
+ idma_mask(dma_chan)) &&
+ (ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(ipu->devtype, dma_chan)) &
+ idma_mask(dma_chan)) && _ipu_is_trb_chan(ipu, dma_chan)))
+ ret = -EACCES;
+ else
+ _ipu_ch_offset_update(ipu, dma_chan, pixel_fmt, width, height, stride,
+ u, v, 0, vertical_offset, horizontal_offset);
+ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipu_update_channel_offset);
+
+int32_t ipu_get_channel_offset(uint32_t pixel_fmt,
+ uint16_t width, uint16_t height,
+ uint32_t stride,
+ uint32_t u, uint32_t v,
+ uint32_t vertical_offset, uint32_t horizontal_offset,
+ uint32_t *u_offset, uint32_t *v_offset)
+{
+ return __ipu_ch_offset_calc(pixel_fmt, width, height, stride,
+ u, v, 0,
+ vertical_offset, horizontal_offset,
+ u_offset, v_offset);
+}
+EXPORT_SYMBOL(ipu_get_channel_offset);
+
+/*!
+ * This function is called to set a channel's buffer as ready.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param type Input parameter which buffer to initialize.
+ *
+ * @param bufNum Input parameter for which buffer number set to
+ * ready state.
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
+ ipu_buffer_t type, uint32_t bufNum)
+{
+ uint32_t dma_chan = channel_2_dma(channel, type);
+ unsigned long lock_flags;
+
+ if (dma_chan == IDMA_CHAN_INVALID)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
+ /* Mark buffer to be ready. */
+ if (bufNum == 0)
+ ipu_cm_write(ipu, idma_mask(dma_chan),
+ IPU_CHA_BUF0_RDY(ipu->devtype, dma_chan));
+ else if (bufNum == 1)
+ ipu_cm_write(ipu, idma_mask(dma_chan),
+ IPU_CHA_BUF1_RDY(ipu->devtype, dma_chan));
+ else
+ ipu_cm_write(ipu, idma_mask(dma_chan),
+ IPU_CHA_BUF2_RDY(ipu->devtype, dma_chan));
+ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_select_buffer);
+
+/*!
+ * This function is called to set a channel's buffer as ready.
+ *
+ * @param ipu ipu handler
+ * @param bufNum Input parameter for which buffer number set to
+ * ready state.
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum)
+{
+
+ uint32_t dma_chan = channel_2_dma(MEM_VDI_PRP_VF_MEM, IPU_INPUT_BUFFER);
+ uint32_t mask_bit =
+ idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_P, IPU_INPUT_BUFFER))|
+ idma_mask(dma_chan)|
+ idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_N, IPU_INPUT_BUFFER));
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
+ /* Mark buffers to be ready. */
+ if (bufNum == 0)
+ ipu_cm_write(ipu, mask_bit,
+ IPU_CHA_BUF0_RDY(ipu->devtype, dma_chan));
+ else
+ ipu_cm_write(ipu, mask_bit,
+ IPU_CHA_BUF1_RDY(ipu->devtype, dma_chan));
+ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_select_multi_vdi_buffer);
+
+#define NA -1
+static int proc_dest_sel[] = {
+ 0, 1, 1, 3, 5, 5, 4, 7, 8, 9, 10, 11, 12, 14, 15, 16,
+ 0, 1, 1, 5, 5, 5, 5, 5, 7, 8, 9, 10, 11, 12, 14, 31 };
+static int proc_src_sel[] = { 0, 6, 7, 6, 7, 8, 5, NA, NA, NA,
+ NA, NA, NA, NA, NA, 1, 2, 3, 4, 7, 8, NA, 8, NA };
+static int disp_src_sel[] = { 0, 6, 7, 8, 3, 4, 5, NA, NA, NA,
+ NA, NA, NA, NA, NA, 1, NA, 2, NA, 3, 4, 4, 4, 4 };
+
+
+/*!
+ * This function links 2 channels together for automatic frame
+ * synchronization. The output of the source channel is linked to the input of
+ * the destination channel.
+ *
+ * @param ipu ipu handler
+ * @param src_ch Input parameter for the logical channel ID of
+ * the source channel.
+ *
+ * @param dest_ch Input parameter for the logical channel ID of
+ * the destination channel.
+ *
+ * @return This function returns 0 on success or negative error code on
+ * fail.
+ */
+int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
+{
+ int retval = 0;
+ uint32_t fs_proc_flow1;
+ uint32_t fs_proc_flow2;
+ uint32_t fs_proc_flow3;
+ uint32_t fs_disp_flow1;
+
+ mutex_lock(&ipu->mutex_lock);
+
+ fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
+ fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
+ fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
+ fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
+
+ switch (src_ch) {
+ case CSI_MEM0:
+ fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
+ fs_proc_flow3 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_SMFC0_DEST_SEL_OFFSET;
+ break;
+ case CSI_MEM1:
+ fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
+ fs_proc_flow3 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_SMFC1_DEST_SEL_OFFSET;
+ break;
+ case CSI_MEM2:
+ fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
+ fs_proc_flow3 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_SMFC2_DEST_SEL_OFFSET;
+ break;
+ case CSI_MEM3:
+ fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
+ fs_proc_flow3 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_SMFC3_DEST_SEL_OFFSET;
+ break;
+ case CSI_PRP_ENC_MEM:
+ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
+ fs_proc_flow2 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_PRPENC_DEST_SEL_OFFSET;
+ break;
+ case CSI_PRP_VF_MEM:
+ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
+ fs_proc_flow2 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_PRPVF_DEST_SEL_OFFSET;
+ break;
+ case MEM_PP_MEM:
+ fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
+ fs_proc_flow2 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_PP_DEST_SEL_OFFSET;
+ break;
+ case MEM_ROT_PP_MEM:
+ fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
+ fs_proc_flow2 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_PP_ROT_DEST_SEL_OFFSET;
+ break;
+ case MEM_PRP_ENC_MEM:
+ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
+ fs_proc_flow2 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_PRPENC_DEST_SEL_OFFSET;
+ break;
+ case MEM_ROT_ENC_MEM:
+ fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
+ fs_proc_flow2 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_PRPENC_ROT_DEST_SEL_OFFSET;
+ break;
+ case MEM_PRP_VF_MEM:
+ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
+ fs_proc_flow2 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_PRPVF_DEST_SEL_OFFSET;
+ break;
+ case MEM_VDI_PRP_VF_MEM:
+ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
+ fs_proc_flow2 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_PRPVF_DEST_SEL_OFFSET;
+ break;
+ case MEM_ROT_VF_MEM:
+ fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
+ fs_proc_flow2 |=
+ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
+ FS_PRPVF_ROT_DEST_SEL_OFFSET;
+ break;
+ case MEM_VDOA_MEM:
+ fs_proc_flow3 &= ~FS_VDOA_DEST_SEL_MASK;
+ if (MEM_VDI_MEM == dest_ch)
+ fs_proc_flow3 |= FS_VDOA_DEST_SEL_VDI;
+ else if (MEM_PP_MEM == dest_ch)
+ fs_proc_flow3 |= FS_VDOA_DEST_SEL_IC;
+ else {
+ retval = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ retval = -EINVAL;
+ goto err;
+ }
+
+ switch (dest_ch) {
+ case MEM_PP_MEM:
+ fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
+ if (MEM_VDOA_MEM == src_ch)
+ fs_proc_flow1 |= FS_PP_SRC_SEL_VDOA;
+ else
+ fs_proc_flow1 |= proc_src_sel[IPU_CHAN_ID(src_ch)] <<
+ FS_PP_SRC_SEL_OFFSET;
+ break;
+ case MEM_ROT_PP_MEM:
+ fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
+ fs_proc_flow1 |=
+ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
+ FS_PP_ROT_SRC_SEL_OFFSET;
+ break;
+ case MEM_PRP_ENC_MEM:
+ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
+ fs_proc_flow1 |=
+ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
+ break;
+ case MEM_ROT_ENC_MEM:
+ fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
+ fs_proc_flow1 |=
+ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
+ FS_PRPENC_ROT_SRC_SEL_OFFSET;
+ break;
+ case MEM_PRP_VF_MEM:
+ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
+ fs_proc_flow1 |=
+ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
+ break;
+ case MEM_VDI_PRP_VF_MEM:
+ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
+ fs_proc_flow1 |=
+ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
+ break;
+ case MEM_ROT_VF_MEM:
+ fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
+ fs_proc_flow1 |=
+ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
+ FS_PRPVF_ROT_SRC_SEL_OFFSET;
+ break;
+ case MEM_DC_SYNC:
+ fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
+ fs_disp_flow1 |=
+ disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC1_SRC_SEL_OFFSET;
+ break;
+ case MEM_BG_SYNC:
+ fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
+ fs_disp_flow1 |=
+ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
+ FS_DP_SYNC0_SRC_SEL_OFFSET;
+ break;
+ case MEM_FG_SYNC:
+ fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
+ fs_disp_flow1 |=
+ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
+ FS_DP_SYNC1_SRC_SEL_OFFSET;
+ break;
+ case MEM_DC_ASYNC:
+ fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
+ fs_disp_flow1 |=
+ disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC2_SRC_SEL_OFFSET;
+ break;
+ case MEM_BG_ASYNC0:
+ fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
+ fs_disp_flow1 |=
+ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
+ FS_DP_ASYNC0_SRC_SEL_OFFSET;
+ break;
+ case MEM_FG_ASYNC0:
+ fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
+ fs_disp_flow1 |=
+ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
+ FS_DP_ASYNC1_SRC_SEL_OFFSET;
+ break;
+ case MEM_VDI_MEM:
+ fs_proc_flow1 &= ~FS_VDI_SRC_SEL_MASK;
+ if (MEM_VDOA_MEM == src_ch)
+ fs_proc_flow1 |= FS_VDI_SRC_SEL_VDOA;
+ else {
+ retval = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ retval = -EINVAL;
+ goto err;
+ }
+
+ ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
+ ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
+ ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
+ ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
+
+err:
+ mutex_unlock(&ipu->mutex_lock);
+ return retval;
+}
+EXPORT_SYMBOL(ipu_link_channels);
+
+/*!
+ * This function unlinks 2 channels and disables automatic frame
+ * synchronization.
+ *
+ * @param ipu ipu handler
+ * @param src_ch Input parameter for the logical channel ID of
+ * the source channel.
+ *
+ * @param dest_ch Input parameter for the logical channel ID of
+ * the destination channel.
+ *
+ * @return This function returns 0 on success or negative error code on
+ * fail.
+ */
+int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
+{
+ int retval = 0;
+ uint32_t fs_proc_flow1;
+ uint32_t fs_proc_flow2;
+ uint32_t fs_proc_flow3;
+ uint32_t fs_disp_flow1;
+
+ mutex_lock(&ipu->mutex_lock);
+
+ fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
+ fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
+ fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
+ fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
+
+ switch (src_ch) {
+ case CSI_MEM0:
+ fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
+ break;
+ case CSI_MEM1:
+ fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
+ break;
+ case CSI_MEM2:
+ fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
+ break;
+ case CSI_MEM3:
+ fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
+ break;
+ case CSI_PRP_ENC_MEM:
+ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
+ break;
+ case CSI_PRP_VF_MEM:
+ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
+ break;
+ case MEM_PP_MEM:
+ fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
+ break;
+ case MEM_ROT_PP_MEM:
+ fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
+ break;
+ case MEM_PRP_ENC_MEM:
+ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
+ break;
+ case MEM_ROT_ENC_MEM:
+ fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
+ break;
+ case MEM_PRP_VF_MEM:
+ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
+ break;
+ case MEM_VDI_PRP_VF_MEM:
+ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
+ break;
+ case MEM_ROT_VF_MEM:
+ fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
+ break;
+ case MEM_VDOA_MEM:
+ fs_proc_flow3 &= ~FS_VDOA_DEST_SEL_MASK;
+ break;
+ default:
+ retval = -EINVAL;
+ goto err;
+ }
+
+ switch (dest_ch) {
+ case MEM_PP_MEM:
+ fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
+ break;
+ case MEM_ROT_PP_MEM:
+ fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
+ break;
+ case MEM_PRP_ENC_MEM:
+ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
+ break;
+ case MEM_ROT_ENC_MEM:
+ fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
+ break;
+ case MEM_PRP_VF_MEM:
+ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
+ break;
+ case MEM_VDI_PRP_VF_MEM:
+ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
+ break;
+ case MEM_ROT_VF_MEM:
+ fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
+ break;
+ case MEM_DC_SYNC:
+ fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
+ break;
+ case MEM_BG_SYNC:
+ fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
+ break;
+ case MEM_FG_SYNC:
+ fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
+ break;
+ case MEM_DC_ASYNC:
+ fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
+ break;
+ case MEM_BG_ASYNC0:
+ fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
+ break;
+ case MEM_FG_ASYNC0:
+ fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
+ break;
+ case MEM_VDI_MEM:
+ fs_proc_flow1 &= ~FS_VDI_SRC_SEL_MASK;
+ break;
+ default:
+ retval = -EINVAL;
+ goto err;
+ }
+
+ ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
+ ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
+ ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
+ ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
+
+err:
+ mutex_unlock(&ipu->mutex_lock);
+ return retval;
+}
+EXPORT_SYMBOL(ipu_unlink_channels);
+
+/*!
+ * This function check whether a logical channel was enabled.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @return This function returns 1 while request channel is enabled or
+ * 0 for not enabled.
+ */
+int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel)
+{
+ uint32_t reg;
+ uint32_t in_dma;
+ uint32_t out_dma;
+
+ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
+ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
+
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
+ if (reg & idma_mask(in_dma))
+ return 1;
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
+ if (reg & idma_mask(out_dma))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(ipu_is_channel_busy);
+
+/*!
+ * This function enables a logical channel.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @return This function returns 0 on success or negative error code on
+ * fail.
+ */
+int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel)
+{
+ uint32_t reg;
+ uint32_t ipu_conf;
+ uint32_t in_dma;
+ uint32_t out_dma;
+ uint32_t sec_dma;
+ uint32_t thrd_dma;
+
+ mutex_lock(&ipu->mutex_lock);
+
+ if (ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) {
+ dev_err(ipu->dev, "Warning: channel already enabled %d\n",
+ IPU_CHAN_ID(channel));
+ mutex_unlock(&ipu->mutex_lock);
+ return -EACCES;
+ }
+
+ /* Get input and output dma channels */
+ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
+ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
+
+ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
+ if (ipu->di_use_count[0] > 0) {
+ ipu_conf |= IPU_CONF_DI0_EN;
+ }
+ if (ipu->di_use_count[1] > 0) {
+ ipu_conf |= IPU_CONF_DI1_EN;
+ }
+ if (ipu->dp_use_count > 0)
+ ipu_conf |= IPU_CONF_DP_EN;
+ if (ipu->dc_use_count > 0)
+ ipu_conf |= IPU_CONF_DC_EN;
+ if (ipu->dmfc_use_count > 0)
+ ipu_conf |= IPU_CONF_DMFC_EN;
+ if (ipu->ic_use_count > 0)
+ ipu_conf |= IPU_CONF_IC_EN;
+ if (ipu->vdi_use_count > 0) {
+ ipu_conf |= IPU_CONF_ISP_EN;
+ ipu_conf |= IPU_CONF_VDI_EN;
+ ipu_conf |= IPU_CONF_IC_INPUT;
+ }
+ if (ipu->rot_use_count > 0)
+ ipu_conf |= IPU_CONF_ROT_EN;
+ if (ipu->smfc_use_count > 0)
+ ipu_conf |= IPU_CONF_SMFC_EN;
+ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
+
+ if (idma_is_valid(in_dma)) {
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
+ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
+ }
+ if (idma_is_valid(out_dma)) {
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
+ ipu_idmac_write(ipu, reg | idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
+ }
+
+ if ((ipu->sec_chan_en[IPU_CHAN_ID(channel)]) &&
+ ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM) ||
+ (channel == MEM_VDI_PRP_VF_MEM))) {
+ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
+ ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
+ }
+ if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
+ ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM))) {
+ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
+ ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
+
+ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
+ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
+ ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_SEP_ALPHA);
+ } else if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
+ ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))) {
+ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
+ ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
+ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
+ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_SEP_ALPHA);
+ }
+
+ if ((channel == MEM_DC_SYNC) || (channel == MEM_BG_SYNC) ||
+ (channel == MEM_FG_SYNC)) {
+ reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
+ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_WM_EN(in_dma));
+
+ _ipu_dp_dc_enable(ipu, channel);
+ }
+
+ if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
+ _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma) ||
+ _ipu_is_vdi_out_chan(out_dma))
+ _ipu_ic_enable_task(ipu, channel);
+
+ ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(channel);
+
+ if (ipu->prg_clk)
+ clk_prepare_enable(ipu->prg_clk);
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_enable_channel);
+
+/*!
+ * This function check buffer ready for a logical channel.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param type Input parameter which buffer to clear.
+ *
+ * @param bufNum Input parameter for which buffer number clear
+ * ready state.
+ *
+ */
+int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
+ uint32_t bufNum)
+{
+ uint32_t dma_chan = channel_2_dma(channel, type);
+ uint32_t reg;
+ unsigned long lock_flags;
+
+ if (dma_chan == IDMA_CHAN_INVALID)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
+ if (bufNum == 0)
+ reg = ipu_cm_read(ipu,
+ IPU_CHA_BUF0_RDY(ipu->devtype, dma_chan));
+ else if (bufNum == 1)
+ reg = ipu_cm_read(ipu,
+ IPU_CHA_BUF1_RDY(ipu->devtype, dma_chan));
+ else
+ reg = ipu_cm_read(ipu,
+ IPU_CHA_BUF2_RDY(ipu->devtype, dma_chan));
+ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
+
+ if (reg & idma_mask(dma_chan))
+ return 1;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(ipu_check_buffer_ready);
+
+/*!
+ * This function clear buffer ready for a logical channel.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param type Input parameter which buffer to clear.
+ *
+ * @param bufNum Input parameter for which buffer number clear
+ * ready state.
+ *
+ */
+void _ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
+ uint32_t bufNum)
+{
+ uint32_t dma_ch = channel_2_dma(channel, type);
+
+ if (!idma_is_valid(dma_ch))
+ return;
+
+ ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
+ if (bufNum == 0)
+ ipu_cm_write(ipu, idma_mask(dma_ch),
+ IPU_CHA_BUF0_RDY(ipu->devtype, dma_ch));
+ else if (bufNum == 1)
+ ipu_cm_write(ipu, idma_mask(dma_ch),
+ IPU_CHA_BUF1_RDY(ipu->devtype, dma_ch));
+ else
+ ipu_cm_write(ipu, idma_mask(dma_ch),
+ IPU_CHA_BUF2_RDY(ipu->devtype, dma_ch));
+ ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
+}
+
+void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
+ uint32_t bufNum)
+{
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
+ _ipu_clear_buffer_ready(ipu, channel, type, bufNum);
+ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
+}
+EXPORT_SYMBOL(ipu_clear_buffer_ready);
+
+/*!
+ * This function disables a logical channel.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param wait_for_stop Flag to set whether to wait for channel end
+ * of frame or return immediately.
+ *
+ * @return This function returns 0 on success or negative error code on
+ * fail.
+ */
+int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop)
+{
+ uint32_t reg;
+ uint32_t in_dma;
+ uint32_t out_dma;
+ uint32_t sec_dma = NO_DMA;
+ uint32_t thrd_dma = NO_DMA;
+ uint16_t fg_pos_x, fg_pos_y;
+ unsigned long lock_flags;
+
+ mutex_lock(&ipu->mutex_lock);
+
+ if ((ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
+ dev_dbg(ipu->dev, "Channel already disabled %d\n",
+ IPU_CHAN_ID(channel));
+ mutex_unlock(&ipu->mutex_lock);
+ return -EACCES;
+ }
+
+ /* Get input and output dma channels */
+ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
+ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
+
+ if ((idma_is_valid(in_dma) &&
+ !idma_is_set(ipu, IDMAC_CHA_EN(in_dma), in_dma))
+ && (idma_is_valid(out_dma) &&
+ !idma_is_set(ipu, IDMAC_CHA_EN(out_dma), out_dma))) {
+ mutex_unlock(&ipu->mutex_lock);
+ return -EINVAL;
+ }
+
+ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)])
+ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
+ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) {
+ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
+ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
+ }
+
+ if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
+ (channel == MEM_DC_SYNC)) {
+ if (channel == MEM_FG_SYNC) {
+ _ipu_disp_get_window_pos(ipu, channel, &fg_pos_x, &fg_pos_y);
+ _ipu_disp_set_window_pos(ipu, channel, 0, 0);
+ }
+
+ _ipu_dp_dc_disable(ipu, channel, false);
+
+ /*
+ * wait for BG channel EOF then disable FG-IDMAC,
+ * it avoid FG NFB4EOF error.
+ */
+ if ((channel == MEM_FG_SYNC) && (ipu_is_channel_busy(ipu, MEM_BG_SYNC))) {
+ int timeout = 50;
+
+ ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF),
+ IPUIRQ_2_STATREG(ipu->devtype,
+ IPU_IRQ_BG_SYNC_EOF));
+ while ((ipu_cm_read(ipu,
+ IPUIRQ_2_STATREG(ipu->devtype,
+ IPU_IRQ_BG_SYNC_EOF)) &
+ IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF)) == 0) {
+ msleep(10);
+ timeout -= 10;
+ if (timeout <= 0) {
+ dev_err(ipu->dev, "warning: wait for bg sync eof timeout\n");
+ break;
+ }
+ }
+ }
+ } else if (wait_for_stop && !_ipu_is_smfc_chan(out_dma) &&
+ channel != CSI_PRP_VF_MEM && channel != CSI_PRP_ENC_MEM) {
+ while (idma_is_set(ipu, IDMAC_CHA_BUSY(ipu->devtype, in_dma),
+ in_dma) ||
+ idma_is_set(ipu, IDMAC_CHA_BUSY(ipu->devtype, out_dma),
+ out_dma) ||
+ (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
+ idma_is_set(ipu, IDMAC_CHA_BUSY(ipu->devtype, sec_dma),
+ sec_dma)) ||
+ (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
+ idma_is_set(ipu, IDMAC_CHA_BUSY(ipu->devtype, thrd_dma),
+ thrd_dma))) {
+ uint32_t irq = 0xffffffff;
+ int timeout = 50000;
+
+ if (idma_is_set(ipu, IDMAC_CHA_BUSY(ipu->devtype,
+ out_dma), out_dma))
+ irq = out_dma;
+ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
+ idma_is_set(ipu, IDMAC_CHA_BUSY(ipu->devtype,
+ sec_dma), sec_dma))
+ irq = sec_dma;
+ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
+ idma_is_set(ipu, IDMAC_CHA_BUSY(ipu->devtype,
+ thrd_dma), thrd_dma))
+ irq = thrd_dma;
+ if (idma_is_set(ipu, IDMAC_CHA_BUSY(ipu->devtype,
+ in_dma), in_dma))
+ irq = in_dma;
+
+ if (irq == 0xffffffff) {
+ dev_dbg(ipu->dev, "warning: no channel busy, break\n");
+ break;
+ }
+
+ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq),
+ IPUIRQ_2_STATREG(ipu->devtype, irq));
+
+ dev_dbg(ipu->dev, "warning: channel %d busy, need wait\n", irq);
+
+ while (((ipu_cm_read(ipu,
+ IPUIRQ_2_STATREG(ipu->devtype, irq))
+ & IPUIRQ_2_MASK(irq)) == 0) &&
+ (idma_is_set(ipu, IDMAC_CHA_BUSY(ipu->devtype,
+ irq), irq))) {
+ udelay(10);
+ timeout -= 10;
+ if (timeout <= 0) {
+ ipu_dump_registers(ipu);
+ dev_err(ipu->dev, "warning: disable ipu dma channel %d during its busy state\n", irq);
+ break;
+ }
+ }
+ dev_dbg(ipu->dev, "wait_time:%d\n", 50000 - timeout);
+
+ }
+ }
+
+ if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
+ (channel == MEM_DC_SYNC)) {
+ reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
+ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_WM_EN(in_dma));
+ }
+
+ /* Disable IC task */
+ if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
+ _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma) ||
+ _ipu_is_vdi_out_chan(out_dma))
+ _ipu_ic_disable_task(ipu, channel);
+
+ /* Disable DMA channel(s) */
+ if (idma_is_valid(in_dma)) {
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
+ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
+ ipu_cm_write(ipu, idma_mask(in_dma),
+ IPU_CHA_CUR_BUF(ipu->devtype, in_dma));
+ ipu_cm_write(ipu, tri_cur_buf_mask(in_dma),
+ IPU_CHA_TRIPLE_CUR_BUF(ipu->devtype, in_dma));
+ }
+ if (idma_is_valid(out_dma)) {
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
+ ipu_idmac_write(ipu, reg & ~idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
+ ipu_cm_write(ipu, idma_mask(out_dma),
+ IPU_CHA_CUR_BUF(ipu->devtype, out_dma));
+ ipu_cm_write(ipu, tri_cur_buf_mask(out_dma),
+ IPU_CHA_TRIPLE_CUR_BUF(ipu->devtype, out_dma));
+ }
+ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
+ ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
+ ipu_cm_write(ipu, idma_mask(sec_dma),
+ IPU_CHA_CUR_BUF(ipu->devtype, sec_dma));
+ }
+ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
+ ipu_idmac_write(ipu, reg & ~idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
+ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC) {
+ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
+ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_SEP_ALPHA);
+ } else {
+ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
+ ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_SEP_ALPHA);
+ }
+ ipu_cm_write(ipu, idma_mask(thrd_dma),
+ IPU_CHA_CUR_BUF(ipu->devtype, thrd_dma));
+ }
+
+ if (channel == MEM_FG_SYNC)
+ _ipu_disp_set_window_pos(ipu, channel, fg_pos_x, fg_pos_y);
+
+ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
+ /* Set channel buffers NOT to be ready */
+ if (idma_is_valid(in_dma)) {
+ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 0);
+ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 1);
+ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 2);
+ }
+ if (idma_is_valid(out_dma)) {
+ _ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 0);
+ _ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 1);
+ }
+ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
+ _ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 0);
+ _ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 1);
+ }
+ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
+ _ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 0);
+ _ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 1);
+ }
+ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
+
+ ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(channel));
+
+ if (ipu->prg_clk)
+ clk_disable_unprepare(ipu->prg_clk);
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_disable_channel);
+
+/*!
+ * This function enables CSI.
+ *
+ * @param ipu ipu handler
+ * @param csi csi num 0 or 1
+ *
+ * @return This function returns 0 on success or negative error code on
+ * fail.
+ */
+int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi)
+{
+ uint32_t reg;
+
+ if (csi > 1) {
+ dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
+ return -EINVAL;
+ }
+
+ _ipu_get(ipu);
+ mutex_lock(&ipu->mutex_lock);
+ ipu->csi_use_count[csi]++;
+
+ if (ipu->csi_use_count[csi] == 1) {
+ reg = ipu_cm_read(ipu, IPU_CONF);
+ if (csi == 0)
+ ipu_cm_write(ipu, reg | IPU_CONF_CSI0_EN, IPU_CONF);
+ else
+ ipu_cm_write(ipu, reg | IPU_CONF_CSI1_EN, IPU_CONF);
+ }
+ mutex_unlock(&ipu->mutex_lock);
+ _ipu_put(ipu);
+ return 0;
+}
+EXPORT_SYMBOL(ipu_enable_csi);
+
+/*!
+ * This function disables CSI.
+ *
+ * @param ipu ipu handler
+ * @param csi csi num 0 or 1
+ *
+ * @return This function returns 0 on success or negative error code on
+ * fail.
+ */
+int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi)
+{
+ uint32_t reg;
+
+ if (csi > 1) {
+ dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
+ return -EINVAL;
+ }
+ _ipu_get(ipu);
+ mutex_lock(&ipu->mutex_lock);
+ ipu->csi_use_count[csi]--;
+ if (ipu->csi_use_count[csi] == 0) {
+ _ipu_csi_wait4eof(ipu, ipu->csi_channel[csi]);
+ reg = ipu_cm_read(ipu, IPU_CONF);
+ if (csi == 0)
+ ipu_cm_write(ipu, reg & ~IPU_CONF_CSI0_EN, IPU_CONF);
+ else
+ ipu_cm_write(ipu, reg & ~IPU_CONF_CSI1_EN, IPU_CONF);
+ }
+ mutex_unlock(&ipu->mutex_lock);
+ _ipu_put(ipu);
+ return 0;
+}
+EXPORT_SYMBOL(ipu_disable_csi);
+
+static irqreturn_t ipu_sync_irq_handler(int irq, void *desc)
+{
+ struct ipu_soc *ipu = desc;
+ int i;
+ uint32_t line, bit, int_stat, int_ctrl;
+ irqreturn_t result = IRQ_NONE;
+ const int int_reg[] = { 1, 2, 3, 4, 11, 12, 13, 14, 15, 0 };
+
+ spin_lock(&ipu->int_reg_spin_lock);
+
+ for (i = 0; int_reg[i] != 0; i++) {
+ int_stat = ipu_cm_read(ipu,
+ IPU_INT_STAT(ipu->devtype, int_reg[i]));
+ int_ctrl = ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
+ int_stat &= int_ctrl;
+ ipu_cm_write(ipu, int_stat,
+ IPU_INT_STAT(ipu->devtype, int_reg[i]));
+ while ((line = ffs(int_stat)) != 0) {
+ bit = --line;
+ int_stat &= ~(1UL << line);
+ line += (int_reg[i] - 1) * 32;
+ result |=
+ ipu->irq_list[line].handler(line,
+ ipu->irq_list[line].
+ dev_id);
+ if (ipu->irq_list[line].flags & IPU_IRQF_ONESHOT) {
+ int_ctrl &= ~(1UL << bit);
+ ipu_cm_write(ipu, int_ctrl,
+ IPU_INT_CTRL(int_reg[i]));
+ }
+ }
+ }
+
+ spin_unlock(&ipu->int_reg_spin_lock);
+
+ return result;
+}
+
+static irqreturn_t ipu_err_irq_handler(int irq, void *desc)
+{
+ struct ipu_soc *ipu = desc;
+ int i;
+ uint32_t int_stat;
+ const int err_reg[] = { 5, 6, 9, 10, 0 };
+
+ spin_lock(&ipu->int_reg_spin_lock);
+
+ for (i = 0; err_reg[i] != 0; i++) {
+ int_stat = ipu_cm_read(ipu,
+ IPU_INT_STAT(ipu->devtype, err_reg[i]));
+ int_stat &= ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i]));
+ if (int_stat) {
+ ipu_cm_write(ipu, int_stat,
+ IPU_INT_STAT(ipu->devtype, err_reg[i]));
+ dev_warn(ipu->dev,
+ "IPU Warning - IPU_INT_STAT_%d = 0x%08X\n",
+ err_reg[i], int_stat);
+ /* Disable interrupts so we only get error once */
+ int_stat = ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i])) &
+ ~int_stat;
+ ipu_cm_write(ipu, int_stat, IPU_INT_CTRL(err_reg[i]));
+ }
+ }
+
+ spin_unlock(&ipu->int_reg_spin_lock);
+
+ return IRQ_HANDLED;
+}
+
+/*!
+ * This function enables the interrupt for the specified interrupt line.
+ * The interrupt lines are defined in \b ipu_irq_line enum.
+ *
+ * @param ipu ipu handler
+ * @param irq Interrupt line to enable interrupt for.
+ *
+ * @return This function returns 0 on success or negative error code on
+ * fail.
+ */
+int ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq)
+{
+ uint32_t reg;
+ unsigned long lock_flags;
+ int ret = 0;
+
+ _ipu_get(ipu);
+
+ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
+
+ /*
+ * Check sync interrupt handler only, since we do nothing for
+ * error interrupts but than print out register values in the
+ * error interrupt source handler.
+ */
+ if (_ipu_is_sync_irq(irq) && (ipu->irq_list[irq].handler == NULL)) {
+ dev_err(ipu->dev, "handler hasn't been registered on sync "
+ "irq %d\n", irq);
+ ret = -EACCES;
+ goto out;
+ }
+
+ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
+ reg |= IPUIRQ_2_MASK(irq);
+ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
+out:
+ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
+
+ _ipu_put(ipu);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipu_enable_irq);
+
+/*!
+ * This function disables the interrupt for the specified interrupt line.
+ * The interrupt lines are defined in \b ipu_irq_line enum.
+ *
+ * @param ipu ipu handler
+ * @param irq Interrupt line to disable interrupt for.
+ *
+ */
+void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq)
+{
+ uint32_t reg;
+ unsigned long lock_flags;
+
+ _ipu_get(ipu);
+
+ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
+
+ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
+ reg &= ~IPUIRQ_2_MASK(irq);
+ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
+
+ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
+
+ _ipu_put(ipu);
+}
+EXPORT_SYMBOL(ipu_disable_irq);
+
+/*!
+ * This function clears the interrupt for the specified interrupt line.
+ * The interrupt lines are defined in \b ipu_irq_line enum.
+ *
+ * @param ipu ipu handler
+ * @param irq Interrupt line to clear interrupt for.
+ *
+ */
+void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq)
+{
+ unsigned long lock_flags;
+
+ _ipu_get(ipu);
+
+ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
+
+ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq),
+ IPUIRQ_2_STATREG(ipu->devtype, irq));
+
+ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
+
+ _ipu_put(ipu);
+}
+EXPORT_SYMBOL(ipu_clear_irq);
+
+/*!
+ * This function returns the current interrupt status for the specified
+ * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
+ *
+ * @param ipu ipu handler
+ * @param irq Interrupt line to get status for.
+ *
+ * @return Returns true if the interrupt is pending/asserted or false if
+ * the interrupt is not pending.
+ */
+bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq)
+{
+ uint32_t reg;
+ unsigned long lock_flags;
+
+ _ipu_get(ipu);
+
+ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
+ reg = ipu_cm_read(ipu, IPUIRQ_2_STATREG(ipu->devtype, irq));
+ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
+
+ _ipu_put(ipu);
+
+ if (reg & IPUIRQ_2_MASK(irq))
+ return true;
+ else
+ return false;
+}
+EXPORT_SYMBOL(ipu_get_irq_status);
+
+/*!
+ * This function registers an interrupt handler function for the specified
+ * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
+ *
+ * @param ipu ipu handler
+ * @param irq Interrupt line to get status for.
+ *
+ * @param handler Input parameter for address of the handler
+ * function.
+ *
+ * @param irq_flags Flags for interrupt mode. Currently not used.
+ *
+ * @param devname Input parameter for string name of driver
+ * registering the handler.
+ *
+ * @param dev_id Input parameter for pointer of data to be
+ * passed to the handler.
+ *
+ * @return This function returns 0 on success or negative error code on
+ * fail.
+ */
+int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
+ irqreturn_t(*handler) (int, void *),
+ uint32_t irq_flags, const char *devname, void *dev_id)
+{
+ uint32_t reg;
+ unsigned long lock_flags;
+ int ret = 0;
+
+ BUG_ON(irq >= IPU_IRQ_COUNT);
+
+ _ipu_get(ipu);
+
+ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
+
+ if (ipu->irq_list[irq].handler != NULL) {
+ dev_err(ipu->dev,
+ "handler already installed on irq %d\n", irq);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Check sync interrupt handler only, since we do nothing for
+ * error interrupts but than print out register values in the
+ * error interrupt source handler.
+ */
+ if (_ipu_is_sync_irq(irq) && (handler == NULL)) {
+ dev_err(ipu->dev, "handler is NULL for sync irq %d\n", irq);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ipu->irq_list[irq].handler = handler;
+ ipu->irq_list[irq].flags = irq_flags;
+ ipu->irq_list[irq].dev_id = dev_id;
+ ipu->irq_list[irq].name = devname;
+
+ /* clear irq stat for previous use */
+ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq),
+ IPUIRQ_2_STATREG(ipu->devtype, irq));
+ /* enable the interrupt */
+ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
+ reg |= IPUIRQ_2_MASK(irq);
+ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
+out:
+ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
+
+ _ipu_put(ipu);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipu_request_irq);
+
+/*!
+ * This function unregisters an interrupt handler for the specified interrupt
+ * line. The interrupt lines are defined in \b ipu_irq_line enum.
+ *
+ * @param ipu ipu handler
+ * @param irq Interrupt line to get status for.
+ *
+ * @param dev_id Input parameter for pointer of data to be passed
+ * to the handler. This must match value passed to
+ * ipu_request_irq().
+ *
+ */
+void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id)
+{
+ uint32_t reg;
+ unsigned long lock_flags;
+
+ _ipu_get(ipu);
+
+ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
+
+ /* disable the interrupt */
+ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
+ reg &= ~IPUIRQ_2_MASK(irq);
+ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
+ if (ipu->irq_list[irq].dev_id == dev_id)
+ memset(&ipu->irq_list[irq], 0, sizeof(ipu->irq_list[irq]));
+
+ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
+
+ _ipu_put(ipu);
+}
+EXPORT_SYMBOL(ipu_free_irq);
+
+uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type)
+{
+ uint32_t reg, dma_chan;
+
+ dma_chan = channel_2_dma(channel, type);
+ if (!idma_is_valid(dma_chan))
+ return -EINVAL;
+
+ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(ipu->devtype, dma_chan));
+ if ((reg & idma_mask(dma_chan)) && _ipu_is_trb_chan(ipu, dma_chan)) {
+ reg = ipu_cm_read(ipu,
+ IPU_CHA_TRIPLE_CUR_BUF(ipu->devtype, dma_chan));
+ return (reg & tri_cur_buf_mask(dma_chan)) >>
+ tri_cur_buf_shift(dma_chan);
+ } else {
+ reg = ipu_cm_read(ipu, IPU_CHA_CUR_BUF(ipu->devtype, dma_chan));
+ if (reg & idma_mask(dma_chan))
+ return 1;
+ else
+ return 0;
+ }
+}
+EXPORT_SYMBOL(ipu_get_cur_buffer_idx);
+
+uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
+{
+ uint32_t stat = 0;
+ uint32_t task_stat_reg = ipu_cm_read(ipu,
+ IPU_PROC_TASK_STAT(ipu->devtype));
+
+ switch (channel) {
+ case MEM_PRP_VF_MEM:
+ stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
+ break;
+ case MEM_VDI_PRP_VF_MEM:
+ stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
+ break;
+ case MEM_ROT_VF_MEM:
+ stat =
+ (task_stat_reg & TSTAT_VF_ROT_MASK) >> TSTAT_VF_ROT_OFFSET;
+ break;
+ case MEM_PRP_ENC_MEM:
+ stat = (task_stat_reg & TSTAT_ENC_MASK) >> TSTAT_ENC_OFFSET;
+ break;
+ case MEM_ROT_ENC_MEM:
+ stat =
+ (task_stat_reg & TSTAT_ENC_ROT_MASK) >>
+ TSTAT_ENC_ROT_OFFSET;
+ break;
+ case MEM_PP_MEM:
+ stat = (task_stat_reg & TSTAT_PP_MASK) >> TSTAT_PP_OFFSET;
+ break;
+ case MEM_ROT_PP_MEM:
+ stat =
+ (task_stat_reg & TSTAT_PP_ROT_MASK) >> TSTAT_PP_ROT_OFFSET;
+ break;
+
+ default:
+ stat = TASK_STAT_IDLE;
+ break;
+ }
+ return stat;
+}
+
+/*!
+ * This function check for a logical channel status
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @return This function returns 0 on idle and 1 on busy.
+ *
+ */
+uint32_t ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
+{
+ uint32_t dma_status;
+
+ _ipu_get(ipu);
+ mutex_lock(&ipu->mutex_lock);
+ dma_status = ipu_is_channel_busy(ipu, channel);
+ mutex_unlock(&ipu->mutex_lock);
+ _ipu_put(ipu);
+
+ dev_dbg(ipu->dev, "%s, dma_status:%d.\n", __func__, dma_status);
+
+ return dma_status;
+}
+EXPORT_SYMBOL(ipu_channel_status);
+
+int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch)
+{
+ uint32_t reg;
+ unsigned long lock_flags;
+ int from_dma = channel_2_dma(from_ch, IPU_INPUT_BUFFER);
+ int to_dma = channel_2_dma(to_ch, IPU_INPUT_BUFFER);
+
+ mutex_lock(&ipu->mutex_lock);
+
+ /* enable target channel */
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(to_dma));
+ ipu_idmac_write(ipu, reg | idma_mask(to_dma), IDMAC_CHA_EN(to_dma));
+
+ ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(to_ch);
+
+ /* switch dp dc */
+ _ipu_dp_dc_disable(ipu, from_ch, true);
+
+ /* disable source channel */
+ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(from_dma));
+ ipu_idmac_write(ipu, reg & ~idma_mask(from_dma), IDMAC_CHA_EN(from_dma));
+ ipu_cm_write(ipu, idma_mask(from_dma),
+ IPU_CHA_CUR_BUF(ipu->devtype, from_dma));
+ ipu_cm_write(ipu, tri_cur_buf_mask(from_dma),
+ IPU_CHA_TRIPLE_CUR_BUF(ipu->devtype, from_dma));
+
+ ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(from_ch));
+
+ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
+ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 0);
+ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 1);
+ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 2);
+ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_swap_channel);
+
+uint32_t bytes_per_pixel(uint32_t fmt)
+{
+ switch (fmt) {
+ case IPU_PIX_FMT_GENERIC: /*generic data */
+ case IPU_PIX_FMT_RGB332:
+ case IPU_PIX_FMT_YUV420P:
+ case IPU_PIX_FMT_YVU420P:
+ case IPU_PIX_FMT_YUV422P:
+ case IPU_PIX_FMT_YUV444P:
+ case IPU_PIX_FMT_NV12:
+ case PRE_PIX_FMT_NV21:
+ case IPU_PIX_FMT_NV16:
+ case PRE_PIX_FMT_NV61:
+ return 1;
+ break;
+ case IPU_PIX_FMT_GENERIC_16: /* generic data */
+ case IPU_PIX_FMT_RGB565:
+ case IPU_PIX_FMT_BGRA4444:
+ case IPU_PIX_FMT_BGRA5551:
+ case IPU_PIX_FMT_YUYV:
+ case IPU_PIX_FMT_UYVY:
+ case IPU_PIX_FMT_GPU16_SB_ST:
+ case IPU_PIX_FMT_GPU16_SB_SRT:
+ case IPU_PIX_FMT_GPU16_ST:
+ case IPU_PIX_FMT_GPU16_SRT:
+ return 2;
+ break;
+ case IPU_PIX_FMT_BGR24:
+ case IPU_PIX_FMT_RGB24:
+ case IPU_PIX_FMT_YUV444:
+ return 3;
+ break;
+ case IPU_PIX_FMT_GENERIC_32: /*generic data */
+ case IPU_PIX_FMT_BGR32:
+ case IPU_PIX_FMT_BGRA32:
+ case IPU_PIX_FMT_RGB32:
+ case IPU_PIX_FMT_RGBA32:
+ case IPU_PIX_FMT_ABGR32:
+ case IPU_PIX_FMT_GPU32_SB_ST:
+ case IPU_PIX_FMT_GPU32_SB_SRT:
+ case IPU_PIX_FMT_GPU32_ST:
+ case IPU_PIX_FMT_GPU32_SRT:
+ case IPU_PIX_FMT_AYUV:
+ return 4;
+ break;
+ default:
+ return 1;
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(bytes_per_pixel);
+
+ipu_color_space_t format_to_colorspace(uint32_t fmt)
+{
+ switch (fmt) {
+ case IPU_PIX_FMT_RGB666:
+ case IPU_PIX_FMT_RGB565:
+ case IPU_PIX_FMT_BGRA4444:
+ case IPU_PIX_FMT_BGRA5551:
+ case IPU_PIX_FMT_BGR24:
+ case IPU_PIX_FMT_RGB24:
+ case IPU_PIX_FMT_GBR24:
+ case IPU_PIX_FMT_BGR32:
+ case IPU_PIX_FMT_BGRA32:
+ case IPU_PIX_FMT_RGB32:
+ case IPU_PIX_FMT_RGBA32:
+ case IPU_PIX_FMT_ABGR32:
+ case IPU_PIX_FMT_LVDS666:
+ case IPU_PIX_FMT_LVDS888:
+ case IPU_PIX_FMT_GPU32_SB_ST:
+ case IPU_PIX_FMT_GPU32_SB_SRT:
+ case IPU_PIX_FMT_GPU32_ST:
+ case IPU_PIX_FMT_GPU32_SRT:
+ case IPU_PIX_FMT_GPU16_SB_ST:
+ case IPU_PIX_FMT_GPU16_SB_SRT:
+ case IPU_PIX_FMT_GPU16_ST:
+ case IPU_PIX_FMT_GPU16_SRT:
+ return RGB;
+ break;
+
+ default:
+ return YCbCr;
+ break;
+ }
+ return RGB;
+}
+EXPORT_SYMBOL(format_to_colorspace);
+
+bool ipu_pixel_format_has_alpha(uint32_t fmt)
+{
+ switch (fmt) {
+ case IPU_PIX_FMT_RGBA32:
+ case IPU_PIX_FMT_BGRA32:
+ case IPU_PIX_FMT_ABGR32:
+ return true;
+ break;
+ default:
+ return false;
+ break;
+ }
+ return false;
+}
+
+bool ipu_ch_param_bad_alpha_pos(uint32_t pixel_fmt)
+{
+ return _ipu_ch_param_bad_alpha_pos(pixel_fmt);
+}
+EXPORT_SYMBOL(ipu_ch_param_bad_alpha_pos);
+
+bool ipu_pixel_format_is_gpu_tile(uint32_t fmt)
+{
+ switch (fmt) {
+ case IPU_PIX_FMT_GPU32_SB_ST:
+ case IPU_PIX_FMT_GPU32_SB_SRT:
+ case IPU_PIX_FMT_GPU32_ST:
+ case IPU_PIX_FMT_GPU32_SRT:
+ case IPU_PIX_FMT_GPU16_SB_ST:
+ case IPU_PIX_FMT_GPU16_SB_SRT:
+ case IPU_PIX_FMT_GPU16_ST:
+ case IPU_PIX_FMT_GPU16_SRT:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(ipu_pixel_format_is_gpu_tile);
+
+bool ipu_pixel_format_is_split_gpu_tile(uint32_t fmt)
+{
+ switch (fmt) {
+ case IPU_PIX_FMT_GPU32_SB_ST:
+ case IPU_PIX_FMT_GPU32_SB_SRT:
+ case IPU_PIX_FMT_GPU16_SB_ST:
+ case IPU_PIX_FMT_GPU16_SB_SRT:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(ipu_pixel_format_is_split_gpu_tile);
+
+bool ipu_pixel_format_is_pre_yuv(uint32_t fmt)
+{
+ switch (fmt) {
+ case PRE_PIX_FMT_NV21:
+ case PRE_PIX_FMT_NV61:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(ipu_pixel_format_is_pre_yuv);
+
+bool ipu_pixel_format_is_multiplanar_yuv(uint32_t fmt)
+{
+ switch (fmt) {
+ case IPU_PIX_FMT_YVU410P:
+ case IPU_PIX_FMT_YUV420P:
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YVU420P:
+ case IPU_PIX_FMT_YUV422P:
+ case IPU_PIX_FMT_YVU422P:
+ case IPU_PIX_FMT_YUV444P:
+ case IPU_PIX_FMT_NV12:
+ case PRE_PIX_FMT_NV21:
+ case IPU_PIX_FMT_NV16:
+ case PRE_PIX_FMT_NV61:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(ipu_pixel_format_is_multiplanar_yuv);
+
+int ipu_ch_param_get_axi_id(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type)
+{
+ uint32_t dma_chan = channel_2_dma(channel, type);
+ int axi_id;
+
+ if (!idma_is_valid(dma_chan))
+ return -EINVAL;
+
+ _ipu_get(ipu);
+ mutex_lock(&ipu->mutex_lock);
+ axi_id = _ipu_ch_param_get_axi_id(ipu, dma_chan);
+ mutex_unlock(&ipu->mutex_lock);
+ _ipu_put(ipu);
+
+ return axi_id;
+}
+EXPORT_SYMBOL(ipu_ch_param_get_axi_id);
+
+#ifdef CONFIG_PM
+int ipu_runtime_suspend(struct device *dev)
+{
+ release_bus_freq(BUS_FREQ_HIGH);
+ dev_dbg(dev, "ipu busfreq high release.\n");
+
+ return 0;
+}
+
+int ipu_runtime_resume(struct device *dev)
+{
+ request_bus_freq(BUS_FREQ_HIGH);
+ dev_dbg(dev, "ipu busfreq high requst.\n");
+
+ return 0;
+}
+
+static const struct dev_pm_ops ipu_pm_ops = {
+ SET_RUNTIME_PM_OPS(ipu_runtime_suspend, ipu_runtime_resume, NULL)
+};
+#endif
+
+/*!
+ * This structure contains pointers to the power management callback functions.
+ */
+static struct platform_driver mxcipu_driver = {
+ .driver = {
+ .name = "imx-ipuv3",
+ .of_match_table = imx_ipuv3_dt_ids,
+ #ifdef CONFIG_PM
+ .pm = &ipu_pm_ops,
+ #endif
+ },
+ .probe = ipu_probe,
+ .remove = ipu_remove,
+};
+
+int32_t __init ipu_gen_init(void)
+{
+ int32_t ret;
+
+ ret = platform_driver_register(&mxcipu_driver);
+ return 0;
+}
+
+subsys_initcall(ipu_gen_init);
+
+static void __exit ipu_gen_uninit(void)
+{
+ platform_driver_unregister(&mxcipu_driver);
+}
+
+module_exit(ipu_gen_uninit);
diff --git a/drivers/mxc/ipu3/ipu_device.c b/drivers/mxc/ipu3/ipu_device.c
new file mode 100644
index 000000000000..f672b0e4fe46
--- /dev/null
+++ b/drivers/mxc/ipu3/ipu_device.c
@@ -0,0 +1,3739 @@
+/*
+ * Copyright 2005-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file ipu_device.c
+ *
+ * @brief This file contains the IPUv3 driver device interface and fops functions.
+ *
+ * @ingroup IPU
+ */
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ipu-v3.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+
+#include "ipu_param_mem.h"
+#include "ipu_regs.h"
+#include "vdoa.h"
+
+#define CHECK_RETCODE(cont, str, err, label, ret) \
+do { \
+ if (cont) { \
+ dev_err(t->dev, "ERR:[0x%p]-no:0x%x "#str" ret:%d," \
+ "line:%d\n", t, t->task_no, ret, __LINE__);\
+ if (ret != -EACCES) { \
+ t->state = err; \
+ goto label; \
+ } \
+ } \
+} while (0)
+
+#define CHECK_RETCODE_CONT(cont, str, err, ret) \
+do { \
+ if (cont) { \
+ dev_err(t->dev, "ERR:[0x%p]-no:0x%x"#str" ret:%d," \
+ "line:%d\n", t, t->task_no, ret, __LINE__);\
+ if (ret != -EACCES) { \
+ if (t->state == STATE_OK) \
+ t->state = err; \
+ } \
+ } \
+} while (0)
+
+#undef DBG_IPU_PERF
+#ifdef DBG_IPU_PERF
+#define CHECK_PERF(ts) \
+do { \
+ getnstimeofday(ts); \
+} while (0)
+
+#define DECLARE_PERF_VAR \
+ struct timespec ts_queue; \
+ struct timespec ts_dotask; \
+ struct timespec ts_waitirq; \
+ struct timespec ts_sche; \
+ struct timespec ts_rel; \
+ struct timespec ts_frame
+
+#define PRINT_TASK_STATISTICS \
+do { \
+ ts_queue = timespec_sub(tsk->ts_dotask, tsk->ts_queue); \
+ ts_dotask = timespec_sub(tsk->ts_waitirq, tsk->ts_dotask); \
+ ts_waitirq = timespec_sub(tsk->ts_inirq, tsk->ts_waitirq); \
+ ts_sche = timespec_sub(tsk->ts_wakeup, tsk->ts_inirq); \
+ ts_rel = timespec_sub(tsk->ts_rel, tsk->ts_wakeup); \
+ ts_frame = timespec_sub(tsk->ts_rel, tsk->ts_queue); \
+ dev_dbg(tsk->dev, "[0x%p] no-0x%x, ts_q:%ldus, ts_do:%ldus," \
+ "ts_waitirq:%ldus,ts_sche:%ldus, ts_rel:%ldus," \
+ "ts_frame: %ldus\n", tsk, tsk->task_no, \
+ ts_queue.tv_nsec / NSEC_PER_USEC + ts_queue.tv_sec * USEC_PER_SEC,\
+ ts_dotask.tv_nsec / NSEC_PER_USEC + ts_dotask.tv_sec * USEC_PER_SEC,\
+ ts_waitirq.tv_nsec / NSEC_PER_USEC + ts_waitirq.tv_sec * USEC_PER_SEC,\
+ ts_sche.tv_nsec / NSEC_PER_USEC + ts_sche.tv_sec * USEC_PER_SEC,\
+ ts_rel.tv_nsec / NSEC_PER_USEC + ts_rel.tv_sec * USEC_PER_SEC,\
+ ts_frame.tv_nsec / NSEC_PER_USEC + ts_frame.tv_sec * USEC_PER_SEC); \
+ if ((ts_frame.tv_nsec/NSEC_PER_USEC + ts_frame.tv_sec*USEC_PER_SEC) > \
+ 80000) \
+ dev_dbg(tsk->dev, "ts_frame larger than 80ms [0x%p] no-0x%x.\n"\
+ , tsk, tsk->task_no); \
+} while (0)
+#else
+#define CHECK_PERF(ts)
+#define DECLARE_PERF_VAR
+#define PRINT_TASK_STATISTICS
+#endif
+
+#define IPU_PP_CH_VF (IPU_TASK_ID_VF - 1)
+#define IPU_PP_CH_PP (IPU_TASK_ID_PP - 1)
+#define MAX_PP_CH (IPU_TASK_ID_MAX - 1)
+#define VDOA_DEF_TIMEOUT_MS (HZ/2)
+
+/* Strucutures and variables for exporting MXC IPU as device*/
+typedef enum {
+ STATE_OK = 0,
+ STATE_QUEUE,
+ STATE_IN_PROGRESS,
+ STATE_ERR,
+ STATE_TIMEOUT,
+ STATE_RES_TIMEOUT,
+ STATE_NO_IPU,
+ STATE_NO_IRQ,
+ STATE_IPU_BUSY,
+ STATE_IRQ_FAIL,
+ STATE_IRQ_TIMEOUT,
+ STATE_ENABLE_CHAN_FAIL,
+ STATE_DISABLE_CHAN_FAIL,
+ STATE_SEL_BUF_FAIL,
+ STATE_INIT_CHAN_FAIL,
+ STATE_LINK_CHAN_FAIL,
+ STATE_UNLINK_CHAN_FAIL,
+ STATE_INIT_CHAN_BUF_FAIL,
+ STATE_INIT_CHAN_BAND_FAIL,
+ STATE_SYS_NO_MEM,
+ STATE_VDOA_IRQ_TIMEOUT,
+ STATE_VDOA_IRQ_FAIL,
+ STATE_VDOA_TASK_FAIL,
+} ipu_state_t;
+
+enum {
+ INPUT_CHAN_VDI_P = 1,
+ INPUT_CHAN,
+ INPUT_CHAN_VDI_N,
+};
+
+struct ipu_state_msg {
+ int state;
+ char *msg;
+} state_msg[] = {
+ {STATE_OK, "ok"},
+ {STATE_QUEUE, "split queue"},
+ {STATE_IN_PROGRESS, "split in progress"},
+ {STATE_ERR, "error"},
+ {STATE_TIMEOUT, "split task timeout"},
+ {STATE_RES_TIMEOUT, "wait resource timeout"},
+ {STATE_NO_IPU, "no ipu found"},
+ {STATE_NO_IRQ, "no irq found for task"},
+ {STATE_IPU_BUSY, "ipu busy"},
+ {STATE_IRQ_FAIL, "request irq failed"},
+ {STATE_IRQ_TIMEOUT, "wait for irq timeout"},
+ {STATE_ENABLE_CHAN_FAIL, "ipu enable channel fail"},
+ {STATE_DISABLE_CHAN_FAIL, "ipu disable channel fail"},
+ {STATE_SEL_BUF_FAIL, "ipu select buf fail"},
+ {STATE_INIT_CHAN_FAIL, "ipu init channel fail"},
+ {STATE_LINK_CHAN_FAIL, "ipu link channel fail"},
+ {STATE_UNLINK_CHAN_FAIL, "ipu unlink channel fail"},
+ {STATE_INIT_CHAN_BUF_FAIL, "ipu init channel buffer fail"},
+ {STATE_INIT_CHAN_BAND_FAIL, "ipu init channel band mode fail"},
+ {STATE_SYS_NO_MEM, "sys no mem: -ENOMEM"},
+ {STATE_VDOA_IRQ_TIMEOUT, "wait for vdoa irq timeout"},
+ {STATE_VDOA_IRQ_FAIL, "vdoa irq fail"},
+ {STATE_VDOA_TASK_FAIL, "vdoa task fail"},
+};
+
+struct stripe_setting {
+ u32 iw;
+ u32 ih;
+ u32 ow;
+ u32 oh;
+ u32 outh_resize_ratio;
+ u32 outv_resize_ratio;
+ u32 i_left_pos;
+ u32 i_right_pos;
+ u32 i_top_pos;
+ u32 i_bottom_pos;
+ u32 o_left_pos;
+ u32 o_right_pos;
+ u32 o_top_pos;
+ u32 o_bottom_pos;
+ u32 rl_split_line;
+ u32 ud_split_line;
+};
+
+struct task_set {
+#define NULL_MODE 0x0
+#define IC_MODE 0x1
+#define ROT_MODE 0x2
+#define VDI_MODE 0x4
+#define IPU_PREPROCESS_MODE_MASK (IC_MODE | ROT_MODE | VDI_MODE)
+/* VDOA_MODE means this task use vdoa, and VDOA has two modes:
+ * BAND MODE and non-BAND MODE. Non-band mode will do transfer data
+ * to memory. BAND mode needs hareware sync with IPU, it is used default
+ * if connected to VDIC.
+ */
+#define VDOA_MODE 0x8
+#define VDOA_BAND_MODE 0x10
+ u8 mode;
+#define IC_VF 0x1
+#define IC_PP 0x2
+#define ROT_VF 0x4
+#define ROT_PP 0x8
+#define VDI_VF 0x10
+#define VDOA_ONLY 0x20
+ u8 task;
+#define NO_SPLIT 0x0
+#define RL_SPLIT 0x1
+#define UD_SPLIT 0x2
+#define LEFT_STRIPE 0x1
+#define RIGHT_STRIPE 0x2
+#define UP_STRIPE 0x4
+#define DOWN_STRIPE 0x8
+#define SPLIT_MASK 0xF
+ u8 split_mode;
+ u8 band_lines;
+ ipu_channel_t ic_chan;
+ ipu_channel_t rot_chan;
+ ipu_channel_t vdi_ic_p_chan;
+ ipu_channel_t vdi_ic_n_chan;
+
+ u32 i_off;
+ u32 i_uoff;
+ u32 i_voff;
+ u32 istride;
+
+ u32 ov_off;
+ u32 ov_uoff;
+ u32 ov_voff;
+ u32 ovstride;
+
+ u32 ov_alpha_off;
+ u32 ov_alpha_stride;
+
+ u32 o_off;
+ u32 o_uoff;
+ u32 o_voff;
+ u32 ostride;
+
+ u32 r_fmt;
+ u32 r_width;
+ u32 r_height;
+ u32 r_stride;
+ dma_addr_t r_paddr;
+
+ struct stripe_setting sp_setting;
+};
+
+struct ipu_split_task {
+ struct ipu_task task;
+ struct ipu_task_entry *parent_task;
+ struct ipu_task_entry *child_task;
+ u32 task_no;
+};
+
+struct ipu_task_entry {
+ struct ipu_input input;
+ struct ipu_output output;
+
+ bool overlay_en;
+ struct ipu_overlay overlay;
+#define DEF_TIMEOUT_MS 1000
+#define DEF_DELAY_MS 20
+ int timeout;
+ int irq;
+
+ u8 task_id;
+ u8 ipu_id;
+ u8 task_in_list;
+ u8 split_done;
+ struct mutex split_lock;
+ struct mutex vdic_lock;
+ wait_queue_head_t split_waitq;
+
+ struct list_head node;
+ struct list_head split_list;
+ struct ipu_soc *ipu;
+ struct device *dev;
+ struct task_set set;
+ wait_queue_head_t task_waitq;
+ struct completion irq_comp;
+ struct kref refcount;
+ ipu_state_t state;
+ u32 task_no;
+ atomic_t done;
+ atomic_t res_free;
+ atomic_t res_get;
+
+ struct ipu_task_entry *parent;
+ char *vditmpbuf[2];
+ u32 old_save_lines;
+ u32 old_size;
+ bool buf1filled;
+ bool buf0filled;
+
+ vdoa_handle_t vdoa_handle;
+ struct vdoa_output_mem {
+ void *vaddr;
+ dma_addr_t paddr;
+ int size;
+ } vdoa_dma;
+
+#ifdef DBG_IPU_PERF
+ struct timespec ts_queue;
+ struct timespec ts_dotask;
+ struct timespec ts_waitirq;
+ struct timespec ts_inirq;
+ struct timespec ts_wakeup;
+ struct timespec ts_rel;
+#endif
+};
+
+struct ipu_channel_tabel {
+ struct mutex lock;
+ u8 used[MXC_IPU_MAX_NUM][MAX_PP_CH];
+ u8 vdoa_used;
+};
+
+struct ipu_thread_data {
+ struct ipu_soc *ipu;
+ u32 id;
+ u32 is_vdoa;
+};
+
+struct ipu_alloc_list {
+ struct list_head list;
+ dma_addr_t phy_addr;
+ void *cpu_addr;
+ u32 size;
+ void *file_index;
+};
+
+static LIST_HEAD(ipu_alloc_list);
+static DEFINE_MUTEX(ipu_alloc_lock);
+static struct ipu_channel_tabel ipu_ch_tbl;
+static LIST_HEAD(ipu_task_list);
+static DEFINE_SPINLOCK(ipu_task_list_lock);
+static DECLARE_WAIT_QUEUE_HEAD(thread_waitq);
+static DECLARE_WAIT_QUEUE_HEAD(res_waitq);
+static atomic_t req_cnt;
+static atomic_t file_index = ATOMIC_INIT(1);
+static int major;
+static int max_ipu_no;
+static int thread_id;
+static atomic_t frame_no;
+static struct class *ipu_class;
+static struct device *ipu_dev;
+static int debug;
+module_param(debug, int, 0600);
+#ifdef DBG_IPU_PERF
+static struct timespec ts_frame_max;
+static u32 ts_frame_avg;
+static atomic_t frame_cnt;
+#endif
+
+static bool deinterlace_3_field(struct ipu_task_entry *t)
+{
+ return ((t->set.mode & VDI_MODE) &&
+ (t->input.deinterlace.motion != HIGH_MOTION));
+}
+
+static u32 tiled_filed_size(struct ipu_task_entry *t)
+{
+ u32 field_size;
+
+ /* note: page_align is required by VPU hw ouput buffer */
+ field_size = TILED_NV12_FRAME_SIZE(t->input.width, t->input.height/2);
+ return field_size;
+}
+
+static bool only_ic(u8 mode)
+{
+ mode = mode & IPU_PREPROCESS_MODE_MASK;
+ return ((mode == IC_MODE) || (mode == VDI_MODE));
+}
+
+static bool only_rot(u8 mode)
+{
+ mode = mode & IPU_PREPROCESS_MODE_MASK;
+ return (mode == ROT_MODE);
+}
+
+static bool ic_and_rot(u8 mode)
+{
+ mode = mode & IPU_PREPROCESS_MODE_MASK;
+ return ((mode == (IC_MODE | ROT_MODE)) ||
+ (mode == (VDI_MODE | ROT_MODE)));
+}
+
+static bool need_split(struct ipu_task_entry *t)
+{
+ return ((t->set.split_mode != NO_SPLIT) || (t->task_no & SPLIT_MASK));
+}
+
+unsigned int fmt_to_bpp(unsigned int pixelformat)
+{
+ u32 bpp;
+
+ switch (pixelformat) {
+ case IPU_PIX_FMT_RGB565:
+ /*interleaved 422*/
+ case IPU_PIX_FMT_YUYV:
+ case IPU_PIX_FMT_UYVY:
+ /*non-interleaved 422*/
+ case IPU_PIX_FMT_YUV422P:
+ case IPU_PIX_FMT_YVU422P:
+ bpp = 16;
+ break;
+ case IPU_PIX_FMT_BGR24:
+ case IPU_PIX_FMT_RGB24:
+ case IPU_PIX_FMT_YUV444:
+ case IPU_PIX_FMT_YUV444P:
+ bpp = 24;
+ break;
+ case IPU_PIX_FMT_BGR32:
+ case IPU_PIX_FMT_BGRA32:
+ case IPU_PIX_FMT_RGB32:
+ case IPU_PIX_FMT_RGBA32:
+ case IPU_PIX_FMT_ABGR32:
+ bpp = 32;
+ break;
+ /*non-interleaved 420*/
+ case IPU_PIX_FMT_YUV420P:
+ case IPU_PIX_FMT_YVU420P:
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_NV12:
+ bpp = 12;
+ break;
+ default:
+ bpp = 8;
+ break;
+ }
+ return bpp;
+}
+EXPORT_SYMBOL_GPL(fmt_to_bpp);
+
+cs_t colorspaceofpixel(int fmt)
+{
+ switch (fmt) {
+ case IPU_PIX_FMT_RGB565:
+ case IPU_PIX_FMT_RGB666:
+ case IPU_PIX_FMT_BGR24:
+ case IPU_PIX_FMT_RGB24:
+ case IPU_PIX_FMT_BGRA32:
+ case IPU_PIX_FMT_BGR32:
+ case IPU_PIX_FMT_RGBA32:
+ case IPU_PIX_FMT_RGB32:
+ case IPU_PIX_FMT_ABGR32:
+ return RGB_CS;
+ break;
+ case IPU_PIX_FMT_UYVY:
+ case IPU_PIX_FMT_YUYV:
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YUV420P:
+ case IPU_PIX_FMT_YVU420P:
+ case IPU_PIX_FMT_YVU422P:
+ case IPU_PIX_FMT_YUV422P:
+ case IPU_PIX_FMT_YUV444:
+ case IPU_PIX_FMT_YUV444P:
+ case IPU_PIX_FMT_NV12:
+ case IPU_PIX_FMT_TILED_NV12:
+ case IPU_PIX_FMT_TILED_NV12F:
+ return YUV_CS;
+ break;
+ default:
+ return NULL_CS;
+ }
+}
+EXPORT_SYMBOL_GPL(colorspaceofpixel);
+
+int need_csc(int ifmt, int ofmt)
+{
+ cs_t ics, ocs;
+
+ ics = colorspaceofpixel(ifmt);
+ ocs = colorspaceofpixel(ofmt);
+
+ if ((ics == NULL_CS) || (ocs == NULL_CS))
+ return -1;
+ else if (ics != ocs)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(need_csc);
+
+static int soc_max_in_width(u32 is_vdoa)
+{
+ return is_vdoa ? 8192 : 4096;
+}
+
+static int soc_max_vdi_in_width(struct ipu_soc *ipu)
+{
+ int i;
+
+ if (!ipu) {
+ for (i = 0; i < max_ipu_no; i++) {
+ ipu = ipu_get_soc(i);
+ if (!IS_ERR_OR_NULL(ipu))
+ break;
+ }
+
+ if (i == max_ipu_no)
+ return 720;
+ }
+ return IPU_MAX_VDI_IN_WIDTH(ipu->devtype);
+}
+static int soc_max_in_height(void)
+{
+ return 4096;
+}
+
+static int soc_max_out_width(void)
+{
+ /* mx51/mx53/mx6q is 1024*/
+ return 1024;
+}
+
+static int soc_max_out_height(void)
+{
+ /* mx51/mx53/mx6q is 1024*/
+ return 1024;
+}
+
+static void dump_task_info(struct ipu_task_entry *t)
+{
+ if (!debug)
+ return;
+ dev_dbg(t->dev, "[0x%p]input:\n", (void *)t);
+ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->input.format);
+ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->input.width);
+ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->input.height);
+ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n", (void *)t, t->input.crop.w);
+ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n", (void *)t, t->input.crop.h);
+ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
+ (void *)t, t->input.crop.pos.x);
+ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
+ (void *)t, t->input.crop.pos.y);
+ dev_dbg(t->dev, "[0x%p]input buffer:\n", (void *)t);
+ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->input.paddr);
+ dev_dbg(t->dev, "[0x%p]\ti_off = 0x%x\n", (void *)t, t->set.i_off);
+ dev_dbg(t->dev, "[0x%p]\ti_uoff = 0x%x\n", (void *)t, t->set.i_uoff);
+ dev_dbg(t->dev, "[0x%p]\ti_voff = 0x%x\n", (void *)t, t->set.i_voff);
+ dev_dbg(t->dev, "[0x%p]\tistride = %d\n", (void *)t, t->set.istride);
+ if (t->input.deinterlace.enable) {
+ dev_dbg(t->dev, "[0x%p]deinterlace enabled with:\n", (void *)t);
+ if (t->input.deinterlace.motion != HIGH_MOTION) {
+ dev_dbg(t->dev, "[0x%p]\tlow/medium motion\n", (void *)t);
+ dev_dbg(t->dev, "[0x%p]\tpaddr_n = 0x%x\n",
+ (void *)t, t->input.paddr_n);
+ } else
+ dev_dbg(t->dev, "[0x%p]\thigh motion\n", (void *)t);
+ }
+
+ dev_dbg(t->dev, "[0x%p]output:\n", (void *)t);
+ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->output.format);
+ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->output.width);
+ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->output.height);
+ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n", (void *)t, t->output.crop.w);
+ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n", (void *)t, t->output.crop.h);
+ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
+ (void *)t, t->output.crop.pos.x);
+ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
+ (void *)t, t->output.crop.pos.y);
+ dev_dbg(t->dev, "[0x%p]\trotate = %d\n", (void *)t, t->output.rotate);
+ dev_dbg(t->dev, "[0x%p]output buffer:\n", (void *)t);
+ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->output.paddr);
+ dev_dbg(t->dev, "[0x%p]\to_off = 0x%x\n", (void *)t, t->set.o_off);
+ dev_dbg(t->dev, "[0x%p]\to_uoff = 0x%x\n", (void *)t, t->set.o_uoff);
+ dev_dbg(t->dev, "[0x%p]\to_voff = 0x%x\n", (void *)t, t->set.o_voff);
+ dev_dbg(t->dev, "[0x%p]\tostride = %d\n", (void *)t, t->set.ostride);
+
+ if (t->overlay_en) {
+ dev_dbg(t->dev, "[0x%p]overlay:\n", (void *)t);
+ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n",
+ (void *)t, t->overlay.format);
+ dev_dbg(t->dev, "[0x%p]\twidth = %d\n",
+ (void *)t, t->overlay.width);
+ dev_dbg(t->dev, "[0x%p]\theight = %d\n",
+ (void *)t, t->overlay.height);
+ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n",
+ (void *)t, t->overlay.crop.w);
+ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n",
+ (void *)t, t->overlay.crop.h);
+ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
+ (void *)t, t->overlay.crop.pos.x);
+ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
+ (void *)t, t->overlay.crop.pos.y);
+ dev_dbg(t->dev, "[0x%p]overlay buffer:\n", (void *)t);
+ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n",
+ (void *)t, t->overlay.paddr);
+ dev_dbg(t->dev, "[0x%p]\tov_off = 0x%x\n",
+ (void *)t, t->set.ov_off);
+ dev_dbg(t->dev, "[0x%p]\tov_uoff = 0x%x\n",
+ (void *)t, t->set.ov_uoff);
+ dev_dbg(t->dev, "[0x%p]\tov_voff = 0x%x\n",
+ (void *)t, t->set.ov_voff);
+ dev_dbg(t->dev, "[0x%p]\tovstride = %d\n",
+ (void *)t, t->set.ovstride);
+ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
+ dev_dbg(t->dev, "[0x%p]local alpha enabled with:\n",
+ (void *)t);
+ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n",
+ (void *)t, t->overlay.alpha.loc_alp_paddr);
+ dev_dbg(t->dev, "[0x%p]\tov_alpha_off = 0x%x\n",
+ (void *)t, t->set.ov_alpha_off);
+ dev_dbg(t->dev, "[0x%p]\tov_alpha_stride = %d\n",
+ (void *)t, t->set.ov_alpha_stride);
+ } else
+ dev_dbg(t->dev, "[0x%p]globle alpha enabled with value 0x%x\n",
+ (void *)t, t->overlay.alpha.gvalue);
+ if (t->overlay.colorkey.enable)
+ dev_dbg(t->dev, "[0x%p]colorkey enabled with value 0x%x\n",
+ (void *)t, t->overlay.colorkey.value);
+ }
+
+ dev_dbg(t->dev, "[0x%p]want task_id = %d\n", (void *)t, t->task_id);
+ dev_dbg(t->dev, "[0x%p]want task mode is 0x%x\n",
+ (void *)t, t->set.mode);
+ dev_dbg(t->dev, "[0x%p]\tIC_MODE = 0x%x\n", (void *)t, IC_MODE);
+ dev_dbg(t->dev, "[0x%p]\tROT_MODE = 0x%x\n", (void *)t, ROT_MODE);
+ dev_dbg(t->dev, "[0x%p]\tVDI_MODE = 0x%x\n", (void *)t, VDI_MODE);
+ dev_dbg(t->dev, "[0x%p]\tTask_no = 0x%x\n\n\n", (void *)t, t->task_no);
+}
+
+static void dump_check_err(struct device *dev, int err)
+{
+ switch (err) {
+ case IPU_CHECK_ERR_INPUT_CROP:
+ dev_err(dev, "input crop setting error\n");
+ break;
+ case IPU_CHECK_ERR_OUTPUT_CROP:
+ dev_err(dev, "output crop setting error\n");
+ break;
+ case IPU_CHECK_ERR_OVERLAY_CROP:
+ dev_err(dev, "overlay crop setting error\n");
+ break;
+ case IPU_CHECK_ERR_INPUT_OVER_LIMIT:
+ dev_err(dev, "input over limitation\n");
+ break;
+ case IPU_CHECK_ERR_OVERLAY_WITH_VDI:
+ dev_err(dev, "do not support overlay with deinterlace\n");
+ break;
+ case IPU_CHECK_ERR_OV_OUT_NO_FIT:
+ dev_err(dev,
+ "width/height of overlay and ic output should be same\n");
+ break;
+ case IPU_CHECK_ERR_PROC_NO_NEED:
+ dev_err(dev, "no ipu processing need\n");
+ break;
+ case IPU_CHECK_ERR_SPLIT_INPUTW_OVER:
+ dev_err(dev, "split mode input width overflow\n");
+ break;
+ case IPU_CHECK_ERR_SPLIT_INPUTH_OVER:
+ dev_err(dev, "split mode input height overflow\n");
+ break;
+ case IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER:
+ dev_err(dev, "split mode output width overflow\n");
+ break;
+ case IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER:
+ dev_err(dev, "split mode output height overflow\n");
+ break;
+ case IPU_CHECK_ERR_SPLIT_WITH_ROT:
+ dev_err(dev, "not support split mode with rotation\n");
+ break;
+ case IPU_CHECK_ERR_W_DOWNSIZE_OVER:
+ dev_err(dev, "horizontal downsizing ratio overflow\n");
+ break;
+ case IPU_CHECK_ERR_H_DOWNSIZE_OVER:
+ dev_err(dev, "vertical downsizing ratio overflow\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void dump_check_warn(struct device *dev, int warn)
+{
+ if (warn & IPU_CHECK_WARN_INPUT_OFFS_NOT8ALIGN)
+ dev_warn(dev, "input u/v offset not 8 align\n");
+ if (warn & IPU_CHECK_WARN_OUTPUT_OFFS_NOT8ALIGN)
+ dev_warn(dev, "output u/v offset not 8 align\n");
+ if (warn & IPU_CHECK_WARN_OVERLAY_OFFS_NOT8ALIGN)
+ dev_warn(dev, "overlay u/v offset not 8 align\n");
+}
+
+static int set_crop(struct ipu_crop *crop, int width, int height, int fmt)
+{
+ if ((width == 0) || (height == 0)) {
+ pr_err("Invalid param: width=%d, height=%d\n", width, height);
+ return -EINVAL;
+ }
+
+ if ((IPU_PIX_FMT_TILED_NV12 == fmt) ||
+ (IPU_PIX_FMT_TILED_NV12F == fmt)) {
+ if (crop->w || crop->h) {
+ if (((crop->w + crop->pos.x) > width)
+ || ((crop->h + crop->pos.y) > height)
+ || (0 != (crop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN))
+ || (0 != (crop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN))
+ || (0 != (crop->pos.x % IPU_PIX_FMT_TILED_NV12_MBALIGN))
+ || (0 != (crop->pos.y % IPU_PIX_FMT_TILED_NV12_MBALIGN))
+ ) {
+ pr_err("set_crop error MB align.\n");
+ return -EINVAL;
+ }
+ } else {
+ crop->pos.x = 0;
+ crop->pos.y = 0;
+ crop->w = width;
+ crop->h = height;
+ if ((0 != (crop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN))
+ || (0 != (crop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN))) {
+ pr_err("set_crop error w/h MB align.\n");
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (crop->w || crop->h) {
+ if (((crop->w + crop->pos.x) > (width + 16))
+ || ((crop->h + crop->pos.y) > height + 16)) {
+ pr_err("set_crop error exceeds width/height.\n");
+ return -EINVAL;
+ }
+ } else {
+ crop->pos.x = 0;
+ crop->pos.y = 0;
+ crop->w = width;
+ crop->h = height;
+ }
+ crop->w -= crop->w%8;
+ crop->h -= crop->h%8;
+ }
+
+ if ((crop->w == 0) || (crop->h == 0)) {
+ pr_err("Invalid crop param: crop.w=%d, crop.h=%d\n",
+ crop->w, crop->h);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void update_offset(unsigned int fmt,
+ unsigned int width, unsigned int height,
+ unsigned int pos_x, unsigned int pos_y,
+ int *off, int *uoff, int *voff, int *stride)
+{
+ /* NOTE: u v offset should based on start point of off*/
+ switch (fmt) {
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YUV420P:
+ *off = pos_y * width + pos_x;
+ *uoff = (width * (height - pos_y) - pos_x)
+ + (width/2) * (pos_y/2) + pos_x/2;
+ /* In case height is odd, round up to even */
+ *voff = *uoff + (width/2) * ((height+1)/2);
+ break;
+ case IPU_PIX_FMT_YVU420P:
+ *off = pos_y * width + pos_x;
+ *voff = (width * (height - pos_y) - pos_x)
+ + (width/2) * (pos_y/2) + pos_x/2;
+ /* In case height is odd, round up to even */
+ *uoff = *voff + (width/2) * ((height+1)/2);
+ break;
+ case IPU_PIX_FMT_YVU422P:
+ *off = pos_y * width + pos_x;
+ *voff = (width * (height - pos_y) - pos_x)
+ + (width/2) * pos_y + pos_x/2;
+ *uoff = *voff + (width/2) * height;
+ break;
+ case IPU_PIX_FMT_YUV422P:
+ *off = pos_y * width + pos_x;
+ *uoff = (width * (height - pos_y) - pos_x)
+ + (width/2) * pos_y + pos_x/2;
+ *voff = *uoff + (width/2) * height;
+ break;
+ case IPU_PIX_FMT_YUV444P:
+ *off = pos_y * width + pos_x;
+ *uoff = width * height;
+ *voff = width * height * 2;
+ break;
+ case IPU_PIX_FMT_NV12:
+ *off = pos_y * width + pos_x;
+ *uoff = (width * (height - pos_y) - pos_x)
+ + width * (pos_y/2) + pos_x;
+ break;
+ case IPU_PIX_FMT_TILED_NV12:
+ /*
+ * tiled format, progressive:
+ * assuming that line is aligned with MB height (aligned to 16)
+ * offset = line * stride + (pixel / MB_width) * pixels_in_MB
+ * = line * stride + (pixel / 16) * 256
+ * = line * stride + pixel * 16
+ */
+ *off = pos_y * width + (pos_x << 4);
+ *uoff = ALIGN(width * height, SZ_4K) + (*off >> 1) - *off;
+ break;
+ case IPU_PIX_FMT_TILED_NV12F:
+ /*
+ * tiled format, interlaced:
+ * same as above, only number of pixels in MB is 128,
+ * instead of 256
+ */
+ *off = (pos_y >> 1) * width + (pos_x << 3);
+ *uoff = ALIGN(width * height/2, SZ_4K) + (*off >> 1) - *off;
+ break;
+ default:
+ *off = (pos_y * width + pos_x) * fmt_to_bpp(fmt)/8;
+ break;
+ }
+ *stride = width * bytes_per_pixel(fmt);
+}
+
+static int update_split_setting(struct ipu_task_entry *t, bool vdi_split)
+{
+ struct stripe_param left_stripe;
+ struct stripe_param right_stripe;
+ struct stripe_param up_stripe;
+ struct stripe_param down_stripe;
+ u32 iw, ih, ow, oh;
+ u32 max_width;
+ int ret;
+
+ if (t->output.rotate >= IPU_ROTATE_90_RIGHT)
+ return IPU_CHECK_ERR_SPLIT_WITH_ROT;
+
+ iw = t->input.crop.w;
+ ih = t->input.crop.h;
+
+ ow = t->output.crop.w;
+ oh = t->output.crop.h;
+
+ memset(&left_stripe, 0, sizeof(left_stripe));
+ memset(&right_stripe, 0, sizeof(right_stripe));
+ memset(&up_stripe, 0, sizeof(up_stripe));
+ memset(&down_stripe, 0, sizeof(down_stripe));
+
+ if (t->set.split_mode & RL_SPLIT) {
+ /*
+ * We do want equal strips: initialize stripes in case
+ * calc_stripes returns before actually doing the calculation
+ */
+ left_stripe.input_width = iw / 2;
+ left_stripe.output_width = ow / 2;
+ right_stripe.input_column = iw / 2;
+ right_stripe.output_column = ow / 2;
+
+ if (vdi_split)
+ max_width = soc_max_vdi_in_width(t->ipu);
+ else
+ max_width = soc_max_out_width();
+ ret = ipu_calc_stripes_sizes(iw,
+ ow,
+ max_width,
+ (((unsigned long long)1) << 32), /* 32bit for fractional*/
+ 1, /* equal stripes */
+ t->input.format,
+ t->output.format,
+ &left_stripe,
+ &right_stripe);
+ if (ret < 0)
+ return IPU_CHECK_ERR_W_DOWNSIZE_OVER;
+ else if (ret)
+ dev_dbg(t->dev, "Warn: no:0x%x,calc_stripes ret:%d\n",
+ t->task_no, ret);
+ t->set.sp_setting.iw = left_stripe.input_width;
+ t->set.sp_setting.ow = left_stripe.output_width;
+ t->set.sp_setting.outh_resize_ratio = left_stripe.irr;
+ t->set.sp_setting.i_left_pos = left_stripe.input_column;
+ t->set.sp_setting.o_left_pos = left_stripe.output_column;
+ t->set.sp_setting.i_right_pos = right_stripe.input_column;
+ t->set.sp_setting.o_right_pos = right_stripe.output_column;
+ } else {
+ t->set.sp_setting.iw = iw;
+ t->set.sp_setting.ow = ow;
+ t->set.sp_setting.outh_resize_ratio = 0;
+ t->set.sp_setting.i_left_pos = 0;
+ t->set.sp_setting.o_left_pos = 0;
+ t->set.sp_setting.i_right_pos = 0;
+ t->set.sp_setting.o_right_pos = 0;
+ }
+ if ((t->set.sp_setting.iw + t->set.sp_setting.i_right_pos) > (iw+16))
+ return IPU_CHECK_ERR_SPLIT_INPUTW_OVER;
+ if (((t->set.sp_setting.ow + t->set.sp_setting.o_right_pos) > ow)
+ || (t->set.sp_setting.ow > soc_max_out_width()))
+ return IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER;
+ if (rounddown(t->set.sp_setting.ow, 8) * 8 <=
+ rounddown(t->set.sp_setting.iw, 8))
+ return IPU_CHECK_ERR_W_DOWNSIZE_OVER;
+
+ if (t->set.split_mode & UD_SPLIT) {
+ /*
+ * We do want equal strips: initialize stripes in case
+ * calc_stripes returns before actually doing the calculation
+ */
+ up_stripe.input_width = ih / 2;
+ up_stripe.output_width = oh / 2;
+ down_stripe.input_column = ih / 2;
+ down_stripe.output_column = oh / 2;
+ ret = ipu_calc_stripes_sizes(ih,
+ oh,
+ soc_max_out_height(),
+ (((unsigned long long)1) << 32), /* 32bit for fractional*/
+ 0x1 | 0x2, /* equal stripes and vertical */
+ t->input.format,
+ t->output.format,
+ &up_stripe,
+ &down_stripe);
+ if (ret < 0)
+ return IPU_CHECK_ERR_H_DOWNSIZE_OVER;
+ else if (ret)
+ dev_err(t->dev, "Warn: no:0x%x,calc_stripes ret:%d\n",
+ t->task_no, ret);
+ t->set.sp_setting.ih = up_stripe.input_width;
+ t->set.sp_setting.oh = up_stripe.output_width;
+ t->set.sp_setting.outv_resize_ratio = up_stripe.irr;
+ t->set.sp_setting.i_top_pos = up_stripe.input_column;
+ t->set.sp_setting.o_top_pos = up_stripe.output_column;
+ t->set.sp_setting.i_bottom_pos = down_stripe.input_column;
+ t->set.sp_setting.o_bottom_pos = down_stripe.output_column;
+ } else {
+ t->set.sp_setting.ih = ih;
+ t->set.sp_setting.oh = oh;
+ t->set.sp_setting.outv_resize_ratio = 0;
+ t->set.sp_setting.i_top_pos = 0;
+ t->set.sp_setting.o_top_pos = 0;
+ t->set.sp_setting.i_bottom_pos = 0;
+ t->set.sp_setting.o_bottom_pos = 0;
+ }
+
+ /* downscale case: enforce limits */
+ if (((t->set.sp_setting.ih + t->set.sp_setting.i_bottom_pos) > (ih))
+ && (t->set.sp_setting.ih >= t->set.sp_setting.oh))
+ return IPU_CHECK_ERR_SPLIT_INPUTH_OVER;
+ /* upscale case: relax limits because ipu_calc_stripes_sizes() may
+ create input stripe that falls just outside of the input window */
+ else if ((t->set.sp_setting.ih + t->set.sp_setting.i_bottom_pos)
+ > (ih+16))
+ return IPU_CHECK_ERR_SPLIT_INPUTH_OVER;
+ if (((t->set.sp_setting.oh + t->set.sp_setting.o_bottom_pos) > oh)
+ || (t->set.sp_setting.oh > soc_max_out_height()))
+ return IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER;
+ if (rounddown(t->set.sp_setting.oh, 8) * 8 <=
+ rounddown(t->set.sp_setting.ih, 8))
+ return IPU_CHECK_ERR_H_DOWNSIZE_OVER;
+
+ return IPU_CHECK_OK;
+}
+
+static int check_task(struct ipu_task_entry *t)
+{
+ int tmp;
+ int ret = IPU_CHECK_OK;
+ int timeout;
+ bool vdi_split = false;
+ int ocw, och;
+
+ if ((IPU_PIX_FMT_TILED_NV12 == t->overlay.format) ||
+ (IPU_PIX_FMT_TILED_NV12F == t->overlay.format) ||
+ (IPU_PIX_FMT_TILED_NV12 == t->output.format) ||
+ (IPU_PIX_FMT_TILED_NV12F == t->output.format) ||
+ ((IPU_PIX_FMT_TILED_NV12F == t->input.format) &&
+ !t->input.deinterlace.enable)) {
+ ret = IPU_CHECK_ERR_NOT_SUPPORT;
+ goto done;
+ }
+
+ /* check input */
+ ret = set_crop(&t->input.crop, t->input.width, t->input.height,
+ t->input.format);
+ if (ret < 0) {
+ ret = IPU_CHECK_ERR_INPUT_CROP;
+ goto done;
+ } else
+ update_offset(t->input.format, t->input.width, t->input.height,
+ t->input.crop.pos.x, t->input.crop.pos.y,
+ &t->set.i_off, &t->set.i_uoff,
+ &t->set.i_voff, &t->set.istride);
+
+ /* check output */
+ ret = set_crop(&t->output.crop, t->output.width, t->output.height,
+ t->output.format);
+ if (ret < 0) {
+ ret = IPU_CHECK_ERR_OUTPUT_CROP;
+ goto done;
+ } else
+ update_offset(t->output.format,
+ t->output.width, t->output.height,
+ t->output.crop.pos.x, t->output.crop.pos.y,
+ &t->set.o_off, &t->set.o_uoff,
+ &t->set.o_voff, &t->set.ostride);
+
+ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
+ /*
+ * Cache output width and height and
+ * swap them so that we may check
+ * downsize overflow correctly.
+ */
+ ocw = t->output.crop.h;
+ och = t->output.crop.w;
+ } else {
+ ocw = t->output.crop.w;
+ och = t->output.crop.h;
+ }
+
+ if (ocw * 8 <= t->input.crop.w) {
+ ret = IPU_CHECK_ERR_W_DOWNSIZE_OVER;
+ goto done;
+ }
+
+ if (och * 8 <= t->input.crop.h) {
+ ret = IPU_CHECK_ERR_H_DOWNSIZE_OVER;
+ goto done;
+ }
+
+ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
+ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
+ if ((t->input.crop.w > soc_max_in_width(1)) ||
+ (t->input.crop.h > soc_max_in_height())) {
+ ret = IPU_CHECK_ERR_INPUT_OVER_LIMIT;
+ goto done;
+ }
+ /* output fmt: NV12 and YUYV, now don't support resize */
+ if (((IPU_PIX_FMT_NV12 != t->output.format) &&
+ (IPU_PIX_FMT_YUYV != t->output.format)) ||
+ (t->input.crop.w != t->output.crop.w) ||
+ (t->input.crop.h != t->output.crop.h)) {
+ ret = IPU_CHECK_ERR_NOT_SUPPORT;
+ goto done;
+ }
+ }
+
+ /* check overlay if there is */
+ if (t->overlay_en) {
+ if (t->input.deinterlace.enable) {
+ ret = IPU_CHECK_ERR_OVERLAY_WITH_VDI;
+ goto done;
+ }
+
+ ret = set_crop(&t->overlay.crop, t->overlay.width,
+ t->overlay.height, t->overlay.format);
+ if (ret < 0) {
+ ret = IPU_CHECK_ERR_OVERLAY_CROP;
+ goto done;
+ } else {
+ ocw = t->output.crop.w;
+ och = t->output.crop.h;
+
+ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
+ ocw = t->output.crop.h;
+ och = t->output.crop.w;
+ }
+ if ((t->overlay.crop.w != ocw) ||
+ (t->overlay.crop.h != och)) {
+ ret = IPU_CHECK_ERR_OV_OUT_NO_FIT;
+ goto done;
+ }
+
+ update_offset(t->overlay.format,
+ t->overlay.width, t->overlay.height,
+ t->overlay.crop.pos.x, t->overlay.crop.pos.y,
+ &t->set.ov_off, &t->set.ov_uoff,
+ &t->set.ov_voff, &t->set.ovstride);
+ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
+ t->set.ov_alpha_stride = t->overlay.width;
+ t->set.ov_alpha_off = t->overlay.crop.pos.y *
+ t->overlay.width + t->overlay.crop.pos.x;
+ }
+ }
+ }
+
+ /* input overflow? */
+ if (!((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
+ (IPU_PIX_FMT_TILED_NV12F == t->input.format))) {
+ if ((t->input.crop.w > soc_max_in_width(0)) ||
+ (t->input.crop.h > soc_max_in_height())) {
+ ret = IPU_CHECK_ERR_INPUT_OVER_LIMIT;
+ goto done;
+ }
+ }
+
+ /* check task mode */
+ t->set.mode = NULL_MODE;
+ t->set.split_mode = NO_SPLIT;
+
+ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
+ /*output swap*/
+ tmp = t->output.crop.w;
+ t->output.crop.w = t->output.crop.h;
+ t->output.crop.h = tmp;
+ }
+
+ if (t->output.rotate >= IPU_ROTATE_90_RIGHT)
+ t->set.mode |= ROT_MODE;
+
+ /*need resize or CSC?*/
+ if ((t->input.crop.w != t->output.crop.w) ||
+ (t->input.crop.h != t->output.crop.h) ||
+ need_csc(t->input.format, t->output.format))
+ t->set.mode |= IC_MODE;
+
+ /*need cropping?*/
+ if ((t->input.crop.w != t->input.width) ||
+ (t->input.crop.h != t->input.height) ||
+ (t->output.crop.w != t->output.width) ||
+ (t->output.crop.h != t->output.height))
+ t->set.mode |= IC_MODE;
+
+ /*need flip?*/
+ if ((t->set.mode == NULL_MODE) && (t->output.rotate > IPU_ROTATE_NONE))
+ t->set.mode |= IC_MODE;
+
+ /*need IDMAC do format(same color space)?*/
+ if ((t->set.mode == NULL_MODE) && (t->input.format != t->output.format))
+ t->set.mode |= IC_MODE;
+
+ /*overlay support*/
+ if (t->overlay_en)
+ t->set.mode |= IC_MODE;
+
+ /*deinterlace*/
+ if (t->input.deinterlace.enable) {
+ t->set.mode &= ~IC_MODE;
+ t->set.mode |= VDI_MODE;
+ }
+ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
+ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
+ if (t->set.mode & ROT_MODE) {
+ ret = IPU_CHECK_ERR_NOT_SUPPORT;
+ goto done;
+ }
+ t->set.mode |= VDOA_MODE;
+ if (IPU_PIX_FMT_TILED_NV12F == t->input.format)
+ t->set.mode |= VDOA_BAND_MODE;
+ t->set.mode &= ~IC_MODE;
+ }
+
+ if ((t->set.mode & (IC_MODE | VDI_MODE)) &&
+ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
+ if (t->output.crop.w > soc_max_out_width())
+ t->set.split_mode |= RL_SPLIT;
+ if (t->output.crop.h > soc_max_out_height())
+ t->set.split_mode |= UD_SPLIT;
+ if (!t->set.split_mode && (t->set.mode & VDI_MODE) &&
+ (t->input.crop.w >
+ soc_max_vdi_in_width(t->ipu))) {
+ t->set.split_mode |= RL_SPLIT;
+ vdi_split = true;
+ }
+ if (t->set.split_mode) {
+ if ((t->set.split_mode == RL_SPLIT) ||
+ (t->set.split_mode == UD_SPLIT))
+ timeout = DEF_TIMEOUT_MS * 2 + DEF_DELAY_MS;
+ else
+ timeout = DEF_TIMEOUT_MS * 4 + DEF_DELAY_MS;
+ if (t->timeout < timeout)
+ t->timeout = timeout;
+
+ ret = update_split_setting(t, vdi_split);
+ if (ret > IPU_CHECK_ERR_MIN)
+ goto done;
+ }
+ }
+
+ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
+ /*output swap*/
+ tmp = t->output.crop.w;
+ t->output.crop.w = t->output.crop.h;
+ t->output.crop.h = tmp;
+ }
+
+ if (t->set.mode == NULL_MODE) {
+ ret = IPU_CHECK_ERR_PROC_NO_NEED;
+ goto done;
+ }
+
+ if ((t->set.i_uoff % 8) || (t->set.i_voff % 8))
+ ret |= IPU_CHECK_WARN_INPUT_OFFS_NOT8ALIGN;
+ if ((t->set.o_uoff % 8) || (t->set.o_voff % 8))
+ ret |= IPU_CHECK_WARN_OUTPUT_OFFS_NOT8ALIGN;
+ if (t->overlay_en && ((t->set.ov_uoff % 8) || (t->set.ov_voff % 8)))
+ ret |= IPU_CHECK_WARN_OVERLAY_OFFS_NOT8ALIGN;
+
+done:
+ /* dump msg */
+ if (debug) {
+ if (ret > IPU_CHECK_ERR_MIN)
+ dump_check_err(t->dev, ret);
+ else if (ret != IPU_CHECK_OK)
+ dump_check_warn(t->dev, ret);
+ }
+
+ return ret;
+}
+
+static int prepare_task(struct ipu_task_entry *t)
+{
+ int ret = 0;
+
+ ret = check_task(t);
+ if (ret > IPU_CHECK_ERR_MIN)
+ return -EINVAL;
+
+ if (t->set.mode & VDI_MODE) {
+ t->task_id = IPU_TASK_ID_VF;
+ t->set.task = VDI_VF;
+ if (t->set.mode & ROT_MODE)
+ t->set.task |= ROT_VF;
+ }
+
+ if (VDOA_MODE == t->set.mode) {
+ if (t->set.task != 0) {
+ dev_err(t->dev, "ERR: vdoa only task:0x%x, [0x%p].\n",
+ t->set.task, t);
+ return -EINVAL;
+ }
+ t->set.task |= VDOA_ONLY;
+ }
+
+ if (VDOA_BAND_MODE & t->set.mode) {
+ /* to save band size: 1<<3 = 8 lines */
+ t->set.band_lines = 3;
+ }
+
+ dump_task_info(t);
+
+ return ret;
+}
+
+static uint32_t ic_vf_pp_is_busy(struct ipu_soc *ipu, bool is_vf)
+{
+ uint32_t status;
+ uint32_t status_vf;
+ uint32_t status_rot;
+
+ if (is_vf) {
+ status = ipu_channel_status(ipu, MEM_VDI_PRP_VF_MEM);
+ status_vf = ipu_channel_status(ipu, MEM_PRP_VF_MEM);
+ status_rot = ipu_channel_status(ipu, MEM_ROT_VF_MEM);
+ return status || status_vf || status_rot;
+ } else {
+ status = ipu_channel_status(ipu, MEM_PP_MEM);
+ status_rot = ipu_channel_status(ipu, MEM_ROT_PP_MEM);
+ return status || status_rot;
+ }
+}
+
+static int _get_vdoa_ipu_res(struct ipu_task_entry *t)
+{
+ int i;
+ struct ipu_soc *ipu;
+ u8 *used;
+ uint32_t found_ipu = 0;
+ uint32_t found_vdoa = 0;
+ struct ipu_channel_tabel *tbl = &ipu_ch_tbl;
+
+ mutex_lock(&tbl->lock);
+ if (t->set.mode & VDOA_MODE) {
+ if (NULL != t->vdoa_handle)
+ found_vdoa = 1;
+ else {
+ found_vdoa = tbl->vdoa_used ? 0 : 1;
+ if (found_vdoa) {
+ tbl->vdoa_used = 1;
+ vdoa_get_handle(&t->vdoa_handle);
+ } else
+ /* first get vdoa->ipu resource sequence */
+ goto out;
+ if (t->set.task & VDOA_ONLY)
+ goto out;
+ }
+ }
+
+ for (i = 0; i < max_ipu_no; i++) {
+ ipu = ipu_get_soc(i);
+ if (IS_ERR(ipu))
+ dev_err(t->dev, "no:0x%x,found_vdoa:%d, ipu:%d\n",
+ t->task_no, found_vdoa, i);
+
+ used = &tbl->used[i][IPU_PP_CH_VF];
+ if (t->set.mode & VDI_MODE) {
+ if (0 == *used) {
+ *used = 1;
+ found_ipu = 1;
+ break;
+ }
+ } else if ((t->set.mode & IC_MODE) || only_rot(t->set.mode)) {
+ if (0 == *used) {
+ t->task_id = IPU_TASK_ID_VF;
+ if (t->set.mode & IC_MODE)
+ t->set.task |= IC_VF;
+ if (t->set.mode & ROT_MODE)
+ t->set.task |= ROT_VF;
+ *used = 1;
+ found_ipu = 1;
+ break;
+ }
+ } else
+ dev_err(t->dev, "no:0x%x,found_vdoa:%d, mode:0x%x\n",
+ t->task_no, found_vdoa, t->set.mode);
+ }
+ if (found_ipu)
+ goto next;
+
+ for (i = 0; i < max_ipu_no; i++) {
+ ipu = ipu_get_soc(i);
+ if (IS_ERR(ipu))
+ dev_err(t->dev, "no:0x%x,found_vdoa:%d, ipu:%d\n",
+ t->task_no, found_vdoa, i);
+
+ if ((t->set.mode & IC_MODE) || only_rot(t->set.mode)) {
+ used = &tbl->used[i][IPU_PP_CH_PP];
+ if (0 == *used) {
+ t->task_id = IPU_TASK_ID_PP;
+ if (t->set.mode & IC_MODE)
+ t->set.task |= IC_PP;
+ if (t->set.mode & ROT_MODE)
+ t->set.task |= ROT_PP;
+ *used = 1;
+ found_ipu = 1;
+ break;
+ }
+ }
+ }
+
+next:
+ if (found_ipu) {
+ t->ipu = ipu;
+ t->ipu_id = i;
+ t->dev = ipu->dev;
+ if (atomic_inc_return(&t->res_get) == 2)
+ dev_err(t->dev,
+ "ERR no:0x%x,found_vdoa:%d,get ipu twice\n",
+ t->task_no, found_vdoa);
+ }
+out:
+ dev_dbg(t->dev,
+ "%s:no:0x%x,found_vdoa:%d, found_ipu:%d\n",
+ __func__, t->task_no, found_vdoa, found_ipu);
+ mutex_unlock(&tbl->lock);
+ if (t->set.task & VDOA_ONLY)
+ return found_vdoa;
+ else if (t->set.mode & VDOA_MODE)
+ return found_vdoa && found_ipu;
+ else
+ return found_ipu;
+}
+
+static void put_vdoa_ipu_res(struct ipu_task_entry *tsk, int vdoa_only)
+{
+ int ret;
+ int rel_vdoa = 0, rel_ipu = 0;
+ struct ipu_channel_tabel *tbl = &ipu_ch_tbl;
+
+ mutex_lock(&tbl->lock);
+ if (tsk->set.mode & VDOA_MODE) {
+ if (!tbl->vdoa_used && tsk->vdoa_handle)
+ dev_err(tsk->dev,
+ "ERR no:0x%x,vdoa not used,mode:0x%x\n",
+ tsk->task_no, tsk->set.mode);
+ if (tbl->vdoa_used && tsk->vdoa_handle) {
+ tbl->vdoa_used = 0;
+ vdoa_put_handle(&tsk->vdoa_handle);
+ if (tsk->ipu)
+ tsk->ipu->vdoa_en = 0;
+ rel_vdoa = 1;
+ if (vdoa_only || (tsk->set.task & VDOA_ONLY))
+ goto out;
+ }
+ }
+
+ tbl->used[tsk->ipu_id][tsk->task_id - 1] = 0;
+ rel_ipu = 1;
+ ret = atomic_inc_return(&tsk->res_free);
+ if (ret == 2)
+ dev_err(tsk->dev,
+ "ERR no:0x%x,rel_vdoa:%d,put ipu twice\n",
+ tsk->task_no, rel_vdoa);
+out:
+ dev_dbg(tsk->dev,
+ "%s:no:0x%x,rel_vdoa:%d, rel_ipu:%d\n",
+ __func__, tsk->task_no, rel_vdoa, rel_ipu);
+ mutex_unlock(&tbl->lock);
+}
+
+static int get_vdoa_ipu_res(struct ipu_task_entry *t)
+{
+ int ret;
+ uint32_t found = 0;
+
+ found = _get_vdoa_ipu_res(t);
+ if (!found) {
+ t->ipu_id = -1;
+ t->ipu = NULL;
+ /* blocking to get resource */
+ ret = atomic_inc_return(&req_cnt);
+ dev_dbg(t->dev,
+ "wait_res:no:0x%x,req_cnt:%d\n", t->task_no, ret);
+ ret = wait_event_timeout(res_waitq, _get_vdoa_ipu_res(t),
+ msecs_to_jiffies(t->timeout - DEF_DELAY_MS));
+ if (ret == 0) {
+ dev_err(t->dev, "ERR[0x%p,no-0x%x] wait_res timeout:%dms!\n",
+ t, t->task_no, t->timeout - DEF_DELAY_MS);
+ ret = -ETIMEDOUT;
+ t->state = STATE_RES_TIMEOUT;
+ goto out;
+ } else {
+ if (!(t->set.task & VDOA_ONLY) && (!t->ipu))
+ dev_err(t->dev,
+ "ERR[no-0x%x] can not get ipu!\n",
+ t->task_no);
+ ret = atomic_read(&req_cnt);
+ if (ret > 0)
+ ret = atomic_dec_return(&req_cnt);
+ else
+ dev_err(t->dev,
+ "ERR[no-0x%x] req_cnt:%d mismatch!\n",
+ t->task_no, ret);
+ dev_dbg(t->dev, "no-0x%x,[0x%p],req_cnt:%d, got_res!\n",
+ t->task_no, t, ret);
+ found = 1;
+ }
+ }
+
+out:
+ return found;
+}
+
+static struct ipu_task_entry *create_task_entry(struct ipu_task *task)
+{
+ struct ipu_task_entry *tsk;
+
+ tsk = kzalloc(sizeof(struct ipu_task_entry), GFP_KERNEL);
+ if (!tsk)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&tsk->refcount);
+ tsk->state = -EINVAL;
+ tsk->ipu_id = -1;
+ tsk->dev = ipu_dev;
+ tsk->input = task->input;
+ tsk->output = task->output;
+ tsk->overlay_en = task->overlay_en;
+ if (tsk->overlay_en)
+ tsk->overlay = task->overlay;
+ if (task->timeout > DEF_TIMEOUT_MS)
+ tsk->timeout = task->timeout;
+ else
+ tsk->timeout = DEF_TIMEOUT_MS;
+
+ return tsk;
+}
+
+static void task_mem_free(struct kref *ref)
+{
+ struct ipu_task_entry *tsk =
+ container_of(ref, struct ipu_task_entry, refcount);
+ kfree(tsk);
+}
+
+int create_split_child_task(struct ipu_split_task *sp_task)
+{
+ int ret = 0;
+ struct ipu_task_entry *tsk;
+
+ tsk = create_task_entry(&sp_task->task);
+ if (IS_ERR(tsk))
+ return PTR_ERR(tsk);
+
+ sp_task->child_task = tsk;
+ tsk->task_no = sp_task->task_no;
+
+ ret = prepare_task(tsk);
+ if (ret < 0)
+ goto err;
+
+ tsk->parent = sp_task->parent_task;
+ tsk->set.sp_setting = sp_task->parent_task->set.sp_setting;
+
+ list_add(&tsk->node, &tsk->parent->split_list);
+ dev_dbg(tsk->dev, "[0x%p] sp_tsk Q list,no-0x%x\n", tsk, tsk->task_no);
+ tsk->state = STATE_QUEUE;
+ CHECK_PERF(&tsk->ts_queue);
+err:
+ return ret;
+}
+
+static inline int sp_task_check_done(struct ipu_split_task *sp_task,
+ struct ipu_task_entry *parent, int num, int *idx)
+{
+ int i;
+ int ret = 0;
+ struct ipu_task_entry *tsk;
+ struct mutex *lock = &parent->split_lock;
+
+ *idx = -EINVAL;
+ mutex_lock(lock);
+ for (i = 0; i < num; i++) {
+ tsk = sp_task[i].child_task;
+ if (tsk && tsk->split_done) {
+ *idx = i;
+ ret = 1;
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(lock);
+ return ret;
+}
+
+static int create_split_task(
+ int stripe,
+ struct ipu_split_task *sp_task)
+{
+ struct ipu_task *task = &(sp_task->task);
+ struct ipu_task_entry *t = sp_task->parent_task;
+ int ret;
+
+ sp_task->task_no |= stripe;
+
+ task->input = t->input;
+ task->output = t->output;
+ task->overlay_en = t->overlay_en;
+ if (task->overlay_en)
+ task->overlay = t->overlay;
+ task->task_id = t->task_id;
+ if ((t->set.split_mode == RL_SPLIT) ||
+ (t->set.split_mode == UD_SPLIT))
+ task->timeout = t->timeout / 2;
+ else
+ task->timeout = t->timeout / 4;
+
+ task->input.crop.w = t->set.sp_setting.iw;
+ task->input.crop.h = t->set.sp_setting.ih;
+ if (task->overlay_en) {
+ task->overlay.crop.w = t->set.sp_setting.ow;
+ task->overlay.crop.h = t->set.sp_setting.oh;
+ }
+ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
+ task->output.crop.w = t->set.sp_setting.oh;
+ task->output.crop.h = t->set.sp_setting.ow;
+ t->set.sp_setting.rl_split_line = t->set.sp_setting.o_bottom_pos;
+ t->set.sp_setting.ud_split_line = t->set.sp_setting.o_right_pos;
+
+ } else {
+ task->output.crop.w = t->set.sp_setting.ow;
+ task->output.crop.h = t->set.sp_setting.oh;
+ t->set.sp_setting.rl_split_line = t->set.sp_setting.o_right_pos;
+ t->set.sp_setting.ud_split_line = t->set.sp_setting.o_bottom_pos;
+ }
+
+ if (stripe & LEFT_STRIPE)
+ task->input.crop.pos.x += t->set.sp_setting.i_left_pos;
+ else if (stripe & RIGHT_STRIPE)
+ task->input.crop.pos.x += t->set.sp_setting.i_right_pos;
+ if (stripe & UP_STRIPE)
+ task->input.crop.pos.y += t->set.sp_setting.i_top_pos;
+ else if (stripe & DOWN_STRIPE)
+ task->input.crop.pos.y += t->set.sp_setting.i_bottom_pos;
+
+ if (task->overlay_en) {
+ if (stripe & LEFT_STRIPE)
+ task->overlay.crop.pos.x += t->set.sp_setting.o_left_pos;
+ else if (stripe & RIGHT_STRIPE)
+ task->overlay.crop.pos.x += t->set.sp_setting.o_right_pos;
+ if (stripe & UP_STRIPE)
+ task->overlay.crop.pos.y += t->set.sp_setting.o_top_pos;
+ else if (stripe & DOWN_STRIPE)
+ task->overlay.crop.pos.y += t->set.sp_setting.o_bottom_pos;
+ }
+
+ switch (t->output.rotate) {
+ case IPU_ROTATE_NONE:
+ if (stripe & LEFT_STRIPE)
+ task->output.crop.pos.x += t->set.sp_setting.o_left_pos;
+ else if (stripe & RIGHT_STRIPE)
+ task->output.crop.pos.x += t->set.sp_setting.o_right_pos;
+ if (stripe & UP_STRIPE)
+ task->output.crop.pos.y += t->set.sp_setting.o_top_pos;
+ else if (stripe & DOWN_STRIPE)
+ task->output.crop.pos.y += t->set.sp_setting.o_bottom_pos;
+ break;
+ case IPU_ROTATE_VERT_FLIP:
+ if (stripe & LEFT_STRIPE)
+ task->output.crop.pos.x += t->set.sp_setting.o_left_pos;
+ else if (stripe & RIGHT_STRIPE)
+ task->output.crop.pos.x += t->set.sp_setting.o_right_pos;
+ if (stripe & UP_STRIPE)
+ task->output.crop.pos.y =
+ t->output.crop.pos.y + t->output.crop.h
+ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
+ else if (stripe & DOWN_STRIPE)
+ task->output.crop.pos.y =
+ t->output.crop.pos.y + t->output.crop.h
+ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
+ break;
+ case IPU_ROTATE_HORIZ_FLIP:
+ if (stripe & LEFT_STRIPE)
+ task->output.crop.pos.x =
+ t->output.crop.pos.x + t->output.crop.w
+ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
+ else if (stripe & RIGHT_STRIPE)
+ task->output.crop.pos.x =
+ t->output.crop.pos.x + t->output.crop.w
+ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
+ if (stripe & UP_STRIPE)
+ task->output.crop.pos.y += t->set.sp_setting.o_top_pos;
+ else if (stripe & DOWN_STRIPE)
+ task->output.crop.pos.y += t->set.sp_setting.o_bottom_pos;
+ break;
+ case IPU_ROTATE_180:
+ if (stripe & LEFT_STRIPE)
+ task->output.crop.pos.x =
+ t->output.crop.pos.x + t->output.crop.w
+ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
+ else if (stripe & RIGHT_STRIPE)
+ task->output.crop.pos.x =
+ t->output.crop.pos.x + t->output.crop.w
+ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
+ if (stripe & UP_STRIPE)
+ task->output.crop.pos.y =
+ t->output.crop.pos.y + t->output.crop.h
+ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
+ else if (stripe & DOWN_STRIPE)
+ task->output.crop.pos.y =
+ t->output.crop.pos.y + t->output.crop.h
+ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
+ break;
+ case IPU_ROTATE_90_RIGHT:
+ if (stripe & UP_STRIPE)
+ task->output.crop.pos.x =
+ t->output.crop.pos.x + t->output.crop.w
+ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
+ else if (stripe & DOWN_STRIPE)
+ task->output.crop.pos.x =
+ t->output.crop.pos.x + t->output.crop.w
+ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
+ if (stripe & LEFT_STRIPE)
+ task->output.crop.pos.y += t->set.sp_setting.o_left_pos;
+ else if (stripe & RIGHT_STRIPE)
+ task->output.crop.pos.y += t->set.sp_setting.o_right_pos;
+ break;
+ case IPU_ROTATE_90_RIGHT_HFLIP:
+ if (stripe & UP_STRIPE)
+ task->output.crop.pos.x += t->set.sp_setting.o_top_pos;
+ else if (stripe & DOWN_STRIPE)
+ task->output.crop.pos.x += t->set.sp_setting.o_bottom_pos;
+ if (stripe & LEFT_STRIPE)
+ task->output.crop.pos.y += t->set.sp_setting.o_left_pos;
+ else if (stripe & RIGHT_STRIPE)
+ task->output.crop.pos.y += t->set.sp_setting.o_right_pos;
+ break;
+ case IPU_ROTATE_90_RIGHT_VFLIP:
+ if (stripe & UP_STRIPE)
+ task->output.crop.pos.x =
+ t->output.crop.pos.x + t->output.crop.w
+ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
+ else if (stripe & DOWN_STRIPE)
+ task->output.crop.pos.x =
+ t->output.crop.pos.x + t->output.crop.w
+ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
+ if (stripe & LEFT_STRIPE)
+ task->output.crop.pos.y =
+ t->output.crop.pos.y + t->output.crop.h
+ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
+ else if (stripe & RIGHT_STRIPE)
+ task->output.crop.pos.y =
+ t->output.crop.pos.y + t->output.crop.h
+ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
+ break;
+ case IPU_ROTATE_90_LEFT:
+ if (stripe & UP_STRIPE)
+ task->output.crop.pos.x += t->set.sp_setting.o_top_pos;
+ else if (stripe & DOWN_STRIPE)
+ task->output.crop.pos.x += t->set.sp_setting.o_bottom_pos;
+ if (stripe & LEFT_STRIPE)
+ task->output.crop.pos.y =
+ t->output.crop.pos.y + t->output.crop.h
+ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
+ else if (stripe & RIGHT_STRIPE)
+ task->output.crop.pos.y =
+ t->output.crop.pos.y + t->output.crop.h
+ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
+ break;
+ default:
+ dev_err(t->dev, "ERR:should not be here\n");
+ break;
+ }
+
+ ret = create_split_child_task(sp_task);
+ if (ret < 0)
+ dev_err(t->dev, "ERR:create_split_child_task() ret:%d\n", ret);
+ return ret;
+}
+
+static int queue_split_task(struct ipu_task_entry *t,
+ struct ipu_split_task *sp_task, uint32_t size)
+{
+ int err[4];
+ int ret = 0;
+ int i, j;
+ struct ipu_task_entry *tsk = NULL;
+ struct mutex *lock = &t->split_lock;
+ struct mutex *vdic_lock = &t->vdic_lock;
+
+ dev_dbg(t->dev, "Split task 0x%p, no-0x%x, size:%d\n",
+ t, t->task_no, size);
+ mutex_init(lock);
+ mutex_init(vdic_lock);
+ init_waitqueue_head(&t->split_waitq);
+ INIT_LIST_HEAD(&t->split_list);
+ for (j = 0; j < size; j++) {
+ memset(&sp_task[j], 0, sizeof(*sp_task));
+ sp_task[j].parent_task = t;
+ sp_task[j].task_no = t->task_no;
+ }
+
+ if (t->set.split_mode == RL_SPLIT) {
+ i = 0;
+ err[i] = create_split_task(RIGHT_STRIPE, &sp_task[i]);
+ if (err[i] < 0)
+ goto err_start;
+ i = 1;
+ err[i] = create_split_task(LEFT_STRIPE, &sp_task[i]);
+ } else if (t->set.split_mode == UD_SPLIT) {
+ i = 0;
+ err[i] = create_split_task(DOWN_STRIPE, &sp_task[i]);
+ if (err[i] < 0)
+ goto err_start;
+ i = 1;
+ err[i] = create_split_task(UP_STRIPE, &sp_task[i]);
+ } else {
+ i = 0;
+ err[i] = create_split_task(RIGHT_STRIPE | DOWN_STRIPE, &sp_task[i]);
+ if (err[i] < 0)
+ goto err_start;
+ i = 1;
+ err[i] = create_split_task(LEFT_STRIPE | DOWN_STRIPE, &sp_task[i]);
+ if (err[i] < 0)
+ goto err_start;
+ i = 2;
+ err[i] = create_split_task(RIGHT_STRIPE | UP_STRIPE, &sp_task[i]);
+ if (err[i] < 0)
+ goto err_start;
+ i = 3;
+ err[i] = create_split_task(LEFT_STRIPE | UP_STRIPE, &sp_task[i]);
+ }
+
+err_start:
+ for (j = 0; j < (i + 1); j++) {
+ if (err[j] < 0) {
+ if (sp_task[j].child_task)
+ dev_err(t->dev,
+ "sp_task[%d],no-0x%x fail state:%d, queue err:%d.\n",
+ j, sp_task[j].child_task->task_no,
+ sp_task[j].child_task->state, err[j]);
+ goto err_exit;
+ }
+ dev_dbg(t->dev, "[0x%p] sp_task[%d], no-0x%x state:%s, queue ret:%d.\n",
+ sp_task[j].child_task, j, sp_task[j].child_task->task_no,
+ state_msg[sp_task[j].child_task->state].msg, err[j]);
+ }
+
+ return ret;
+
+err_exit:
+ for (j = 0; j < (i + 1); j++) {
+ if (err[j] < 0 && !ret)
+ ret = err[j];
+ tsk = sp_task[j].child_task;
+ if (!tsk)
+ continue;
+ kfree(tsk);
+ }
+ t->state = STATE_ERR;
+ return ret;
+
+}
+
+static int init_tiled_buf(struct ipu_soc *ipu, struct ipu_task_entry *t,
+ ipu_channel_t channel, uint32_t ch_type)
+{
+ int ret = 0;
+ int i;
+ uint32_t ipu_fmt;
+ dma_addr_t inbuf_base = 0;
+ u32 field_size;
+ struct vdoa_params param;
+ struct vdoa_ipu_buf buf;
+ struct ipu_soc *ipu_idx;
+ u32 ipu_stride, obuf_size;
+ u32 height, width;
+ ipu_buffer_t type;
+
+ if ((IPU_PIX_FMT_YUYV != t->output.format) &&
+ (IPU_PIX_FMT_NV12 != t->output.format)) {
+ dev_err(t->dev, "ERR:[0x%d] output format\n", t->task_no);
+ return -EINVAL;
+ }
+
+ memset(&param, 0, sizeof(param));
+ /* init channel tiled bufs */
+ if (deinterlace_3_field(t) &&
+ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
+ field_size = tiled_filed_size(t);
+ if (INPUT_CHAN_VDI_P == ch_type) {
+ inbuf_base = t->input.paddr + field_size;
+ param.vfield_buf.prev_veba = inbuf_base + t->set.i_off;
+ } else if (INPUT_CHAN == ch_type) {
+ inbuf_base = t->input.paddr_n;
+ param.vfield_buf.cur_veba = inbuf_base + t->set.i_off;
+ } else if (INPUT_CHAN_VDI_N == ch_type) {
+ inbuf_base = t->input.paddr_n + field_size;
+ param.vfield_buf.next_veba = inbuf_base + t->set.i_off;
+ } else
+ return -EINVAL;
+ height = t->input.crop.h >> 1; /* field format for vdoa */
+ width = t->input.crop.w;
+ param.vfield_buf.vubo = t->set.i_uoff;
+ param.interlaced = 1;
+ param.scan_order = 1;
+ type = IPU_INPUT_BUFFER;
+ } else if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) &&
+ (INPUT_CHAN == ch_type)) {
+ height = t->input.crop.h;
+ width = t->input.crop.w;
+ param.vframe_buf.veba = t->input.paddr + t->set.i_off;
+ param.vframe_buf.vubo = t->set.i_uoff;
+ type = IPU_INPUT_BUFFER;
+ } else
+ return -EINVAL;
+
+ param.band_mode = (t->set.mode & VDOA_BAND_MODE) ? 1 : 0;
+ if (param.band_mode && (t->set.band_lines != 3) &&
+ (t->set.band_lines != 4) && (t->set.band_lines != 5))
+ return -EINVAL;
+ else if (param.band_mode)
+ param.band_lines = (1 << t->set.band_lines);
+ for (i = 0; i < max_ipu_no; i++) {
+ ipu_idx = ipu_get_soc(i);
+ if (!IS_ERR(ipu_idx) && ipu_idx == ipu)
+ break;
+ }
+ if (t->set.task & VDOA_ONLY)
+ /* dummy, didn't need ipu res */
+ i = 0;
+ if (max_ipu_no == i) {
+ dev_err(t->dev, "ERR:[0x%p] get ipu num\n", t);
+ return -EINVAL;
+ }
+
+ param.ipu_num = i;
+ param.vpu_stride = t->input.width;
+ param.height = height;
+ param.width = width;
+ if (IPU_PIX_FMT_NV12 == t->output.format)
+ param.pfs = VDOA_PFS_NV12;
+ else
+ param.pfs = VDOA_PFS_YUYV;
+ ipu_fmt = (param.pfs == VDOA_PFS_YUYV) ? IPU_PIX_FMT_YUYV :
+ IPU_PIX_FMT_NV12;
+ ipu_stride = param.width * bytes_per_pixel(ipu_fmt);
+ obuf_size = PAGE_ALIGN(param.width * param.height *
+ fmt_to_bpp(ipu_fmt)/8);
+ dev_dbg(t->dev, "band_mode:%d, band_lines:%d\n",
+ param.band_mode, param.band_lines);
+ if (!param.band_mode) {
+ /* note: if only for tiled -> raster convert and
+ no other post-processing, we don't need alloc buf
+ and use output buffer directly.
+ */
+ if (t->set.task & VDOA_ONLY)
+ param.ieba0 = t->output.paddr;
+ else {
+ dev_err(t->dev, "ERR:[0x%d] vdoa task\n", t->task_no);
+ return -EINVAL;
+ }
+ } else {
+ if (IPU_PIX_FMT_TILED_NV12F != t->input.format) {
+ dev_err(t->dev, "ERR [0x%d] vdoa task\n", t->task_no);
+ return -EINVAL;
+ }
+ }
+ ret = vdoa_setup(t->vdoa_handle, &param);
+ if (ret)
+ goto done;
+ vdoa_get_output_buf(t->vdoa_handle, &buf);
+ if (t->set.task & VDOA_ONLY)
+ goto done;
+
+ ret = ipu_init_channel_buffer(ipu,
+ channel,
+ type,
+ ipu_fmt,
+ width,
+ height,
+ ipu_stride,
+ IPU_ROTATE_NONE,
+ buf.ieba0,
+ buf.ieba1,
+ 0,
+ buf.iubo,
+ 0);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_BUF_FAIL;
+ goto done;
+ }
+
+ if (param.band_mode) {
+ ret = ipu_set_channel_bandmode(ipu, channel,
+ type, t->set.band_lines);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_BAND_FAIL;
+ goto done;
+ }
+ }
+done:
+ return ret;
+}
+
+static int init_tiled_ch_bufs(struct ipu_soc *ipu, struct ipu_task_entry *t)
+{
+ int ret = 0;
+
+ if (IPU_PIX_FMT_TILED_NV12 == t->input.format) {
+ ret = init_tiled_buf(ipu, t, t->set.ic_chan, INPUT_CHAN);
+ CHECK_RETCODE(ret < 0, "init tiled_ch", t->state, done, ret);
+ } else if (IPU_PIX_FMT_TILED_NV12F == t->input.format) {
+ ret = init_tiled_buf(ipu, t, t->set.ic_chan, INPUT_CHAN);
+ CHECK_RETCODE(ret < 0, "init tiled_ch-c", t->state, done, ret);
+ ret = init_tiled_buf(ipu, t, t->set.vdi_ic_p_chan,
+ INPUT_CHAN_VDI_P);
+ CHECK_RETCODE(ret < 0, "init tiled_ch-p", t->state, done, ret);
+ ret = init_tiled_buf(ipu, t, t->set.vdi_ic_n_chan,
+ INPUT_CHAN_VDI_N);
+ CHECK_RETCODE(ret < 0, "init tiled_ch-n", t->state, done, ret);
+ } else {
+ ret = -EINVAL;
+ dev_err(t->dev, "ERR[no-0x%x] invalid fmt:0x%x!\n",
+ t->task_no, t->input.format);
+ }
+
+done:
+ return ret;
+}
+
+static int init_ic(struct ipu_soc *ipu, struct ipu_task_entry *t)
+{
+ int ret = 0;
+ ipu_channel_params_t params;
+ dma_addr_t inbuf = 0, ovbuf = 0, ov_alp_buf = 0;
+ dma_addr_t inbuf_p = 0, inbuf_n = 0;
+ dma_addr_t outbuf = 0;
+ int out_uoff = 0, out_voff = 0, out_rot;
+ int out_w = 0, out_h = 0, out_stride;
+ int out_fmt;
+ u32 vdi_frame_idx = 0;
+
+ memset(&params, 0, sizeof(params));
+
+ /* is it need link a rot channel */
+ if (ic_and_rot(t->set.mode)) {
+ outbuf = t->set.r_paddr;
+ out_w = t->set.r_width;
+ out_h = t->set.r_height;
+ out_stride = t->set.r_stride;
+ out_fmt = t->set.r_fmt;
+ out_uoff = 0;
+ out_voff = 0;
+ out_rot = IPU_ROTATE_NONE;
+ } else {
+ outbuf = t->output.paddr + t->set.o_off;
+ out_w = t->output.crop.w;
+ out_h = t->output.crop.h;
+ out_stride = t->set.ostride;
+ out_fmt = t->output.format;
+ out_uoff = t->set.o_uoff;
+ out_voff = t->set.o_voff;
+ out_rot = t->output.rotate;
+ }
+
+ /* settings */
+ params.mem_prp_vf_mem.in_width = t->input.crop.w;
+ params.mem_prp_vf_mem.out_width = out_w;
+ params.mem_prp_vf_mem.in_height = t->input.crop.h;
+ params.mem_prp_vf_mem.out_height = out_h;
+ params.mem_prp_vf_mem.in_pixel_fmt = t->input.format;
+ params.mem_prp_vf_mem.out_pixel_fmt = out_fmt;
+ params.mem_prp_vf_mem.motion_sel = t->input.deinterlace.motion;
+
+ params.mem_prp_vf_mem.outh_resize_ratio =
+ t->set.sp_setting.outh_resize_ratio;
+ params.mem_prp_vf_mem.outv_resize_ratio =
+ t->set.sp_setting.outv_resize_ratio;
+
+ if (t->overlay_en) {
+ params.mem_prp_vf_mem.in_g_pixel_fmt = t->overlay.format;
+ params.mem_prp_vf_mem.graphics_combine_en = 1;
+ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_GLOBAL)
+ params.mem_prp_vf_mem.global_alpha_en = 1;
+ else if (t->overlay.alpha.loc_alp_paddr)
+ params.mem_prp_vf_mem.alpha_chan_en = 1;
+ /* otherwise, alpha bending per pixel is used. */
+ params.mem_prp_vf_mem.alpha = t->overlay.alpha.gvalue;
+ if (t->overlay.colorkey.enable) {
+ params.mem_prp_vf_mem.key_color_en = 1;
+ params.mem_prp_vf_mem.key_color = t->overlay.colorkey.value;
+ }
+ }
+
+ if (t->input.deinterlace.enable) {
+ if (t->input.deinterlace.field_fmt & IPU_DEINTERLACE_FIELD_MASK)
+ params.mem_prp_vf_mem.field_fmt =
+ IPU_DEINTERLACE_FIELD_BOTTOM;
+ else
+ params.mem_prp_vf_mem.field_fmt =
+ IPU_DEINTERLACE_FIELD_TOP;
+
+ if (t->input.deinterlace.field_fmt & IPU_DEINTERLACE_RATE_EN)
+ vdi_frame_idx = t->input.deinterlace.field_fmt &
+ IPU_DEINTERLACE_RATE_FRAME1;
+ }
+
+ if (t->set.mode & VDOA_MODE)
+ ipu->vdoa_en = 1;
+
+ /* init channels */
+ if (!(t->set.task & VDOA_ONLY)) {
+ ret = ipu_init_channel(ipu, t->set.ic_chan, &params);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_FAIL;
+ goto done;
+ }
+ }
+
+ if (deinterlace_3_field(t)) {
+ ret = ipu_init_channel(ipu, t->set.vdi_ic_p_chan, &params);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_FAIL;
+ goto done;
+ }
+ ret = ipu_init_channel(ipu, t->set.vdi_ic_n_chan, &params);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_FAIL;
+ goto done;
+ }
+ }
+
+ /* init channel bufs */
+ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
+ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
+ ret = init_tiled_ch_bufs(ipu, t);
+ if (ret < 0)
+ goto done;
+ } else {
+ if ((deinterlace_3_field(t)) &&
+ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
+ if (params.mem_prp_vf_mem.field_fmt ==
+ IPU_DEINTERLACE_FIELD_TOP) {
+ if (vdi_frame_idx) {
+ inbuf_p = t->input.paddr + t->set.istride +
+ t->set.i_off;
+ inbuf = t->input.paddr_n + t->set.i_off;
+ inbuf_n = t->input.paddr_n + t->set.istride +
+ t->set.i_off;
+ params.mem_prp_vf_mem.field_fmt =
+ IPU_DEINTERLACE_FIELD_BOTTOM;
+ } else {
+ inbuf_p = t->input.paddr + t->set.i_off;
+ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
+ inbuf_n = t->input.paddr_n + t->set.i_off;
+ }
+ } else {
+ if (vdi_frame_idx) {
+ inbuf_p = t->input.paddr + t->set.i_off;
+ inbuf = t->input.paddr_n + t->set.istride + t->set.i_off;
+ inbuf_n = t->input.paddr_n + t->set.i_off;
+ params.mem_prp_vf_mem.field_fmt =
+ IPU_DEINTERLACE_FIELD_TOP;
+ } else {
+ inbuf_p = t->input.paddr + t->set.istride +
+ t->set.i_off;
+ inbuf = t->input.paddr + t->set.i_off;
+ inbuf_n = t->input.paddr_n + t->set.istride +
+ t->set.i_off;
+ }
+ }
+ } else {
+ if (t->input.deinterlace.enable) {
+ if (params.mem_prp_vf_mem.field_fmt ==
+ IPU_DEINTERLACE_FIELD_TOP) {
+ if (vdi_frame_idx) {
+ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
+ params.mem_prp_vf_mem.field_fmt =
+ IPU_DEINTERLACE_FIELD_BOTTOM;
+ } else
+ inbuf = t->input.paddr + t->set.i_off;
+ } else {
+ if (vdi_frame_idx) {
+ inbuf = t->input.paddr + t->set.i_off;
+ params.mem_prp_vf_mem.field_fmt =
+ IPU_DEINTERLACE_FIELD_TOP;
+ } else
+ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
+ }
+ } else
+ inbuf = t->input.paddr + t->set.i_off;
+ }
+
+ if (t->overlay_en)
+ ovbuf = t->overlay.paddr + t->set.ov_off;
+ }
+ if (t->overlay_en && (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL))
+ ov_alp_buf = t->overlay.alpha.loc_alp_paddr
+ + t->set.ov_alpha_off;
+
+ if ((IPU_PIX_FMT_TILED_NV12 != t->input.format) &&
+ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
+ ret = ipu_init_channel_buffer(ipu,
+ t->set.ic_chan,
+ IPU_INPUT_BUFFER,
+ t->input.format,
+ t->input.crop.w,
+ t->input.crop.h,
+ t->set.istride,
+ IPU_ROTATE_NONE,
+ inbuf,
+ 0,
+ 0,
+ t->set.i_uoff,
+ t->set.i_voff);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_BUF_FAIL;
+ goto done;
+ }
+ }
+ if (deinterlace_3_field(t) &&
+ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
+ ret = ipu_init_channel_buffer(ipu,
+ t->set.vdi_ic_p_chan,
+ IPU_INPUT_BUFFER,
+ t->input.format,
+ t->input.crop.w,
+ t->input.crop.h,
+ t->set.istride,
+ IPU_ROTATE_NONE,
+ inbuf_p,
+ 0,
+ 0,
+ t->set.i_uoff,
+ t->set.i_voff);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_BUF_FAIL;
+ goto done;
+ }
+
+ ret = ipu_init_channel_buffer(ipu,
+ t->set.vdi_ic_n_chan,
+ IPU_INPUT_BUFFER,
+ t->input.format,
+ t->input.crop.w,
+ t->input.crop.h,
+ t->set.istride,
+ IPU_ROTATE_NONE,
+ inbuf_n,
+ 0,
+ 0,
+ t->set.i_uoff,
+ t->set.i_voff);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_BUF_FAIL;
+ goto done;
+ }
+ }
+
+ if (t->overlay_en) {
+ ret = ipu_init_channel_buffer(ipu,
+ t->set.ic_chan,
+ IPU_GRAPH_IN_BUFFER,
+ t->overlay.format,
+ t->overlay.crop.w,
+ t->overlay.crop.h,
+ t->set.ovstride,
+ IPU_ROTATE_NONE,
+ ovbuf,
+ 0,
+ 0,
+ t->set.ov_uoff,
+ t->set.ov_voff);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_BUF_FAIL;
+ goto done;
+ }
+ }
+
+ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
+ ret = ipu_init_channel_buffer(ipu,
+ t->set.ic_chan,
+ IPU_ALPHA_IN_BUFFER,
+ IPU_PIX_FMT_GENERIC,
+ t->overlay.crop.w,
+ t->overlay.crop.h,
+ t->set.ov_alpha_stride,
+ IPU_ROTATE_NONE,
+ ov_alp_buf,
+ 0,
+ 0,
+ 0, 0);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_BUF_FAIL;
+ goto done;
+ }
+ }
+
+ if (!(t->set.task & VDOA_ONLY)) {
+ ret = ipu_init_channel_buffer(ipu,
+ t->set.ic_chan,
+ IPU_OUTPUT_BUFFER,
+ out_fmt,
+ out_w,
+ out_h,
+ out_stride,
+ out_rot,
+ outbuf,
+ 0,
+ 0,
+ out_uoff,
+ out_voff);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_BUF_FAIL;
+ goto done;
+ }
+ }
+
+ if ((t->set.mode & VDOA_BAND_MODE) && (t->set.task & VDI_VF)) {
+ ret = ipu_link_channels(ipu, MEM_VDOA_MEM, t->set.ic_chan);
+ CHECK_RETCODE(ret < 0, "ipu_link_ch vdoa_ic",
+ STATE_LINK_CHAN_FAIL, done, ret);
+ }
+
+done:
+ return ret;
+}
+
+static void uninit_ic(struct ipu_soc *ipu, struct ipu_task_entry *t)
+{
+ int ret;
+
+ if ((t->set.mode & VDOA_BAND_MODE) && (t->set.task & VDI_VF)) {
+ ret = ipu_unlink_channels(ipu, MEM_VDOA_MEM, t->set.ic_chan);
+ CHECK_RETCODE_CONT(ret < 0, "ipu_unlink_ch vdoa_ic",
+ STATE_UNLINK_CHAN_FAIL, ret);
+ }
+ ipu_uninit_channel(ipu, t->set.ic_chan);
+ if (deinterlace_3_field(t)) {
+ ipu_uninit_channel(ipu, t->set.vdi_ic_p_chan);
+ ipu_uninit_channel(ipu, t->set.vdi_ic_n_chan);
+ }
+}
+
+static int init_rot(struct ipu_soc *ipu, struct ipu_task_entry *t)
+{
+ int ret = 0;
+ dma_addr_t inbuf = 0, outbuf = 0;
+ int in_uoff = 0, in_voff = 0;
+ int in_fmt, in_width, in_height, in_stride;
+
+ /* init channel */
+ ret = ipu_init_channel(ipu, t->set.rot_chan, NULL);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_FAIL;
+ goto done;
+ }
+
+ /* init channel buf */
+ /* is it need link to a ic channel */
+ if (ic_and_rot(t->set.mode)) {
+ in_fmt = t->set.r_fmt;
+ in_width = t->set.r_width;
+ in_height = t->set.r_height;
+ in_stride = t->set.r_stride;
+ inbuf = t->set.r_paddr;
+ in_uoff = 0;
+ in_voff = 0;
+ } else {
+ in_fmt = t->input.format;
+ in_width = t->input.crop.w;
+ in_height = t->input.crop.h;
+ in_stride = t->set.istride;
+ inbuf = t->input.paddr + t->set.i_off;
+ in_uoff = t->set.i_uoff;
+ in_voff = t->set.i_voff;
+ }
+ outbuf = t->output.paddr + t->set.o_off;
+
+ ret = ipu_init_channel_buffer(ipu,
+ t->set.rot_chan,
+ IPU_INPUT_BUFFER,
+ in_fmt,
+ in_width,
+ in_height,
+ in_stride,
+ t->output.rotate,
+ inbuf,
+ 0,
+ 0,
+ in_uoff,
+ in_voff);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_BUF_FAIL;
+ goto done;
+ }
+
+ ret = ipu_init_channel_buffer(ipu,
+ t->set.rot_chan,
+ IPU_OUTPUT_BUFFER,
+ t->output.format,
+ t->output.crop.w,
+ t->output.crop.h,
+ t->set.ostride,
+ IPU_ROTATE_NONE,
+ outbuf,
+ 0,
+ 0,
+ t->set.o_uoff,
+ t->set.o_voff);
+ if (ret < 0) {
+ t->state = STATE_INIT_CHAN_BUF_FAIL;
+ goto done;
+ }
+
+done:
+ return ret;
+}
+
+static void uninit_rot(struct ipu_soc *ipu, struct ipu_task_entry *t)
+{
+ ipu_uninit_channel(ipu, t->set.rot_chan);
+}
+
+static int get_irq(struct ipu_task_entry *t)
+{
+ int irq;
+ ipu_channel_t chan;
+
+ if (only_ic(t->set.mode))
+ chan = t->set.ic_chan;
+ else
+ chan = t->set.rot_chan;
+
+ switch (chan) {
+ case MEM_ROT_VF_MEM:
+ irq = IPU_IRQ_PRP_VF_ROT_OUT_EOF;
+ break;
+ case MEM_ROT_PP_MEM:
+ irq = IPU_IRQ_PP_ROT_OUT_EOF;
+ break;
+ case MEM_VDI_PRP_VF_MEM:
+ case MEM_PRP_VF_MEM:
+ irq = IPU_IRQ_PRP_VF_OUT_EOF;
+ break;
+ case MEM_PP_MEM:
+ irq = IPU_IRQ_PP_OUT_EOF;
+ break;
+ case MEM_VDI_MEM:
+ irq = IPU_IRQ_VDIC_OUT_EOF;
+ break;
+ default:
+ irq = -EINVAL;
+ }
+
+ return irq;
+}
+
+static irqreturn_t task_irq_handler(int irq, void *dev_id)
+{
+ struct ipu_task_entry *prev_tsk = dev_id;
+
+ CHECK_PERF(&prev_tsk->ts_inirq);
+ complete(&prev_tsk->irq_comp);
+ dev_dbg(prev_tsk->dev, "[0x%p] no-0x%x in-irq!",
+ prev_tsk, prev_tsk->task_no);
+
+ return IRQ_HANDLED;
+}
+
+/* Fix deinterlace up&down split mode medium line */
+static void vdi_split_process(struct ipu_soc *ipu, struct ipu_task_entry *t)
+{
+ u32 vdi_size;
+ u32 vdi_save_lines;
+ u32 stripe_mode;
+ u32 task_no;
+ u32 i, offset_addr;
+ u32 line_size;
+ unsigned char *base_off;
+ struct ipu_task_entry *parent = t->parent;
+ struct mutex *lock = &parent->vdic_lock;
+
+ if (!parent) {
+ dev_err(t->dev, "ERR[0x%x]invalid parent\n", t->task_no);
+ return;
+ }
+ mutex_lock(lock);
+ stripe_mode = t->task_no & 0xf;
+ task_no = t->task_no >> 4;
+
+ /* Save both luma and chroma part for interleaved YUV(e.g. YUYV).
+ * Save luma part for non-interleaved and partial-interleaved
+ * YUV format (e.g NV12 and YV12). */
+ if (t->output.format == IPU_PIX_FMT_YUYV ||
+ t->output.format == IPU_PIX_FMT_UYVY)
+ line_size = t->output.crop.w * fmt_to_bpp(t->output.format)/8;
+ else
+ line_size = t->output.crop.w;
+
+ vdi_save_lines = (t->output.crop.h - t->set.sp_setting.ud_split_line)/2;
+ vdi_size = vdi_save_lines * line_size;
+ if (vdi_save_lines <= 0) {
+ dev_err(t->dev, "[0x%p] vdi_save_line error\n", (void *)t);
+ mutex_unlock(lock);
+ return;
+ }
+
+ /*check vditmpbuf buffer have alloced or buffer size is changed */
+ if ((vdi_save_lines != parent->old_save_lines) ||
+ (vdi_size != parent->old_size)) {
+ if (parent->vditmpbuf[0] != NULL)
+ kfree(parent->vditmpbuf[0]);
+ if (parent->vditmpbuf[1] != NULL)
+ kfree(parent->vditmpbuf[1]);
+
+ parent->vditmpbuf[0] = kmalloc(vdi_size, GFP_KERNEL);
+ if (parent->vditmpbuf[0] == NULL) {
+ dev_err(t->dev,
+ "[0x%p]Falied Alloc vditmpbuf[0]\n", (void *)t);
+ mutex_unlock(lock);
+ return;
+ }
+ memset(parent->vditmpbuf[0], 0, vdi_size);
+
+ parent->vditmpbuf[1] = kmalloc(vdi_size, GFP_KERNEL);
+ if (parent->vditmpbuf[1] == NULL) {
+ dev_err(t->dev,
+ "[0x%p]Falied Alloc vditmpbuf[1]\n", (void *)t);
+ mutex_unlock(lock);
+ return;
+ }
+ memset(parent->vditmpbuf[1], 0, vdi_size);
+
+ parent->old_save_lines = vdi_save_lines;
+ parent->old_size = vdi_size;
+ }
+
+ if (pfn_valid(t->output.paddr >> PAGE_SHIFT)) {
+ base_off = page_address(pfn_to_page(t->output.paddr >> PAGE_SHIFT));
+ base_off += t->output.paddr & ((1 << PAGE_SHIFT) - 1);
+ } else {
+ base_off = (char *)ioremap_nocache(t->output.paddr,
+ t->output.width * t->output.height *
+ fmt_to_bpp(t->output.format)/8);
+ }
+ if (base_off == NULL) {
+ dev_err(t->dev, "ERR[0x%p]Failed get virtual address\n", t);
+ mutex_unlock(lock);
+ return;
+ }
+
+ /* UP stripe or UP&LEFT stripe */
+ if ((stripe_mode == UP_STRIPE) ||
+ (stripe_mode == (UP_STRIPE | LEFT_STRIPE))) {
+ if (!parent->buf0filled) {
+ offset_addr = t->set.o_off +
+ t->set.sp_setting.ud_split_line*t->set.ostride;
+ dmac_flush_range(base_off + offset_addr,
+ base_off + offset_addr + vdi_size);
+ outer_flush_range(t->output.paddr + offset_addr,
+ t->output.paddr + offset_addr + vdi_size);
+
+ for (i = 0; i < vdi_save_lines; i++)
+ memcpy(parent->vditmpbuf[0] + i*line_size,
+ base_off + offset_addr +
+ i*t->set.ostride, line_size);
+ parent->buf0filled = true;
+ } else {
+ offset_addr = t->set.o_off + (t->output.crop.h -
+ vdi_save_lines) * t->set.ostride;
+ for (i = 0; i < vdi_save_lines; i++)
+ memcpy(base_off + offset_addr + i*t->set.ostride,
+ parent->vditmpbuf[0] + i*line_size, line_size);
+
+ dmac_flush_range(base_off + offset_addr,
+ base_off + offset_addr + i*t->set.ostride);
+ outer_flush_range(t->output.paddr + offset_addr,
+ t->output.paddr + offset_addr + i*t->set.ostride);
+ parent->buf0filled = false;
+ }
+ }
+ /*Down stripe or Down&Left stripe*/
+ else if ((stripe_mode == DOWN_STRIPE) ||
+ (stripe_mode == (DOWN_STRIPE | LEFT_STRIPE))) {
+ if (!parent->buf0filled) {
+ offset_addr = t->set.o_off + vdi_save_lines*t->set.ostride;
+ dmac_flush_range(base_off + offset_addr,
+ base_off + offset_addr + vdi_size);
+ outer_flush_range(t->output.paddr + offset_addr,
+ t->output.paddr + offset_addr + vdi_size);
+
+ for (i = 0; i < vdi_save_lines; i++)
+ memcpy(parent->vditmpbuf[0] + i*line_size,
+ base_off + offset_addr + i*t->set.ostride,
+ line_size);
+ parent->buf0filled = true;
+ } else {
+ offset_addr = t->set.o_off;
+ for (i = 0; i < vdi_save_lines; i++)
+ memcpy(base_off + offset_addr + i*t->set.ostride,
+ parent->vditmpbuf[0] + i*line_size,
+ line_size);
+
+ dmac_flush_range(base_off + offset_addr,
+ base_off + offset_addr + i*t->set.ostride);
+ outer_flush_range(t->output.paddr + offset_addr,
+ t->output.paddr + offset_addr + i*t->set.ostride);
+ parent->buf0filled = false;
+ }
+ }
+ /*Up&Right stripe*/
+ else if (stripe_mode == (UP_STRIPE | RIGHT_STRIPE)) {
+ if (!parent->buf1filled) {
+ offset_addr = t->set.o_off +
+ t->set.sp_setting.ud_split_line*t->set.ostride;
+ dmac_flush_range(base_off + offset_addr,
+ base_off + offset_addr + vdi_size);
+ outer_flush_range(t->output.paddr + offset_addr,
+ t->output.paddr + offset_addr + vdi_size);
+
+ for (i = 0; i < vdi_save_lines; i++)
+ memcpy(parent->vditmpbuf[1] + i*line_size,
+ base_off + offset_addr + i*t->set.ostride,
+ line_size);
+ parent->buf1filled = true;
+ } else {
+ offset_addr = t->set.o_off +
+ (t->output.crop.h - vdi_save_lines)*t->set.ostride;
+ for (i = 0; i < vdi_save_lines; i++)
+ memcpy(base_off + offset_addr + i*t->set.ostride,
+ parent->vditmpbuf[1] + i*line_size,
+ line_size);
+
+ dmac_flush_range(base_off + offset_addr,
+ base_off + offset_addr + i*t->set.ostride);
+ outer_flush_range(t->output.paddr + offset_addr,
+ t->output.paddr + offset_addr + i*t->set.ostride);
+ parent->buf1filled = false;
+ }
+ }
+ /*Down stripe or Down&Right stript*/
+ else if (stripe_mode == (DOWN_STRIPE | RIGHT_STRIPE)) {
+ if (!parent->buf1filled) {
+ offset_addr = t->set.o_off + vdi_save_lines*t->set.ostride;
+ dmac_flush_range(base_off + offset_addr,
+ base_off + offset_addr + vdi_save_lines*t->set.ostride);
+ outer_flush_range(t->output.paddr + offset_addr,
+ t->output.paddr + offset_addr + vdi_save_lines*t->set.ostride);
+
+ for (i = 0; i < vdi_save_lines; i++)
+ memcpy(parent->vditmpbuf[1] + i*line_size,
+ base_off + offset_addr + i*t->set.ostride,
+ line_size);
+ parent->buf1filled = true;
+ } else {
+ offset_addr = t->set.o_off;
+ for (i = 0; i < vdi_save_lines; i++)
+ memcpy(base_off + offset_addr + i*t->set.ostride,
+ parent->vditmpbuf[1] + i*line_size,
+ line_size);
+
+ dmac_flush_range(base_off + offset_addr,
+ base_off + offset_addr + vdi_save_lines*t->set.ostride);
+ outer_flush_range(t->output.paddr + offset_addr,
+ t->output.paddr + offset_addr + vdi_save_lines*t->set.ostride);
+ parent->buf1filled = false;
+ }
+ }
+ if (!pfn_valid(t->output.paddr >> PAGE_SHIFT))
+ iounmap(base_off);
+ mutex_unlock(lock);
+}
+
+static void do_task_release(struct ipu_task_entry *t, int fail)
+{
+ int ret;
+ struct ipu_soc *ipu = t->ipu;
+
+ if (t->input.deinterlace.enable && !fail &&
+ (t->task_no & (UP_STRIPE | DOWN_STRIPE)))
+ vdi_split_process(ipu, t);
+
+ ipu_free_irq(ipu, t->irq, t);
+
+ if (t->vdoa_dma.vaddr)
+ dma_free_coherent(t->dev,
+ t->vdoa_dma.size,
+ t->vdoa_dma.vaddr,
+ t->vdoa_dma.paddr);
+
+ if (only_ic(t->set.mode)) {
+ ret = ipu_disable_channel(ipu, t->set.ic_chan, true);
+ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic",
+ STATE_DISABLE_CHAN_FAIL, ret);
+ if (deinterlace_3_field(t)) {
+ ret = ipu_disable_channel(ipu, t->set.vdi_ic_p_chan,
+ true);
+ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic_p",
+ STATE_DISABLE_CHAN_FAIL, ret);
+ ret = ipu_disable_channel(ipu, t->set.vdi_ic_n_chan,
+ true);
+ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic_n",
+ STATE_DISABLE_CHAN_FAIL, ret);
+ }
+ } else if (only_rot(t->set.mode)) {
+ ret = ipu_disable_channel(ipu, t->set.rot_chan, true);
+ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_rot",
+ STATE_DISABLE_CHAN_FAIL, ret);
+ } else if (ic_and_rot(t->set.mode)) {
+ ret = ipu_unlink_channels(ipu, t->set.ic_chan, t->set.rot_chan);
+ CHECK_RETCODE_CONT(ret < 0, "ipu_unlink_ch",
+ STATE_UNLINK_CHAN_FAIL, ret);
+ ret = ipu_disable_channel(ipu, t->set.rot_chan, true);
+ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch ic_and_rot-rot",
+ STATE_DISABLE_CHAN_FAIL, ret);
+ ret = ipu_disable_channel(ipu, t->set.ic_chan, true);
+ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch ic_and_rot-ic",
+ STATE_DISABLE_CHAN_FAIL, ret);
+ if (deinterlace_3_field(t)) {
+ ret = ipu_disable_channel(ipu, t->set.vdi_ic_p_chan,
+ true);
+ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch icrot-ic-p",
+ STATE_DISABLE_CHAN_FAIL, ret);
+ ret = ipu_disable_channel(ipu, t->set.vdi_ic_n_chan,
+ true);
+ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch icrot-ic-n",
+ STATE_DISABLE_CHAN_FAIL, ret);
+ }
+ }
+
+ if (only_ic(t->set.mode))
+ uninit_ic(ipu, t);
+ else if (only_rot(t->set.mode))
+ uninit_rot(ipu, t);
+ else if (ic_and_rot(t->set.mode)) {
+ uninit_ic(ipu, t);
+ uninit_rot(ipu, t);
+ }
+
+ t->state = STATE_OK;
+ CHECK_PERF(&t->ts_rel);
+ return;
+}
+
+static void do_task_vdoa_only(struct ipu_task_entry *t)
+{
+ int ret;
+
+ ret = init_tiled_ch_bufs(NULL, t);
+ CHECK_RETCODE(ret < 0, "do_vdoa_only", STATE_ERR, out, ret);
+ ret = vdoa_start(t->vdoa_handle, VDOA_DEF_TIMEOUT_MS);
+ vdoa_stop(t->vdoa_handle);
+ CHECK_RETCODE(ret < 0, "vdoa_wait4complete, do_vdoa_only",
+ STATE_VDOA_IRQ_TIMEOUT, out, ret);
+
+ t->state = STATE_OK;
+out:
+ return;
+}
+
+static void do_task(struct ipu_task_entry *t)
+{
+ int r_size;
+ int irq;
+ int ret;
+ uint32_t busy;
+ struct ipu_soc *ipu = t->ipu;
+
+ CHECK_PERF(&t->ts_dotask);
+
+ if (!ipu) {
+ t->state = STATE_NO_IPU;
+ return;
+ }
+
+ init_completion(&t->irq_comp);
+ dev_dbg(ipu->dev, "[0x%p]Do task no:0x%x: id %d\n", (void *)t,
+ t->task_no, t->task_id);
+ dump_task_info(t);
+
+ if (t->set.task & IC_PP) {
+ t->set.ic_chan = MEM_PP_MEM;
+ dev_dbg(ipu->dev, "[0x%p]ic channel MEM_PP_MEM\n", (void *)t);
+ } else if (t->set.task & IC_VF) {
+ t->set.ic_chan = MEM_PRP_VF_MEM;
+ dev_dbg(ipu->dev, "[0x%p]ic channel MEM_PRP_VF_MEM\n", (void *)t);
+ } else if (t->set.task & VDI_VF) {
+ if (t->set.mode & VDOA_BAND_MODE) {
+ t->set.ic_chan = MEM_VDI_MEM;
+ if (deinterlace_3_field(t)) {
+ t->set.vdi_ic_p_chan = MEM_VDI_MEM_P;
+ t->set.vdi_ic_n_chan = MEM_VDI_MEM_N;
+ }
+ dev_dbg(ipu->dev, "[0x%p]ic ch MEM_VDI_MEM\n",
+ (void *)t);
+ } else {
+ t->set.ic_chan = MEM_VDI_PRP_VF_MEM;
+ if (deinterlace_3_field(t)) {
+ t->set.vdi_ic_p_chan = MEM_VDI_PRP_VF_MEM_P;
+ t->set.vdi_ic_n_chan = MEM_VDI_PRP_VF_MEM_N;
+ }
+ dev_dbg(ipu->dev,
+ "[0x%p]ic ch MEM_VDI_PRP_VF_MEM\n", t);
+ }
+ }
+
+ if (t->set.task & ROT_PP) {
+ t->set.rot_chan = MEM_ROT_PP_MEM;
+ dev_dbg(ipu->dev, "[0x%p]rot channel MEM_ROT_PP_MEM\n", (void *)t);
+ } else if (t->set.task & ROT_VF) {
+ t->set.rot_chan = MEM_ROT_VF_MEM;
+ dev_dbg(ipu->dev, "[0x%p]rot channel MEM_ROT_VF_MEM\n", (void *)t);
+ }
+
+ if (t->task_id == IPU_TASK_ID_VF)
+ busy = ic_vf_pp_is_busy(ipu, true);
+ else if (t->task_id == IPU_TASK_ID_PP)
+ busy = ic_vf_pp_is_busy(ipu, false);
+ else {
+ dev_err(ipu->dev, "ERR[no:0x%x]ipu task_id:%d invalid!\n",
+ t->task_no, t->task_id);
+ return;
+ }
+ if (busy) {
+ dev_err(ipu->dev, "ERR[0x%p-no:0x%x]ipu task_id:%d busy!\n",
+ (void *)t, t->task_no, t->task_id);
+ t->state = STATE_IPU_BUSY;
+ return;
+ }
+
+ irq = get_irq(t);
+ if (irq < 0) {
+ t->state = STATE_NO_IRQ;
+ return;
+ }
+ t->irq = irq;
+
+ /* channel setup */
+ if (only_ic(t->set.mode)) {
+ dev_dbg(t->dev, "[0x%p]only ic mode\n", (void *)t);
+ ret = init_ic(ipu, t);
+ CHECK_RETCODE(ret < 0, "init_ic only_ic",
+ t->state, chan_setup, ret);
+ } else if (only_rot(t->set.mode)) {
+ dev_dbg(t->dev, "[0x%p]only rot mode\n", (void *)t);
+ ret = init_rot(ipu, t);
+ CHECK_RETCODE(ret < 0, "init_rot only_rot",
+ t->state, chan_setup, ret);
+ } else if (ic_and_rot(t->set.mode)) {
+ int rot_idx = (t->task_id == IPU_TASK_ID_VF) ? 0 : 1;
+
+ dev_dbg(t->dev, "[0x%p]ic + rot mode\n", (void *)t);
+ t->set.r_fmt = t->output.format;
+ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
+ t->set.r_width = t->output.crop.h;
+ t->set.r_height = t->output.crop.w;
+ } else {
+ t->set.r_width = t->output.crop.w;
+ t->set.r_height = t->output.crop.h;
+ }
+ t->set.r_stride = t->set.r_width *
+ bytes_per_pixel(t->set.r_fmt);
+ r_size = PAGE_ALIGN(t->set.r_width * t->set.r_height
+ * fmt_to_bpp(t->set.r_fmt)/8);
+
+ if (r_size > ipu->rot_dma[rot_idx].size) {
+ dev_dbg(t->dev, "[0x%p]realloc rot buffer\n", (void *)t);
+
+ if (ipu->rot_dma[rot_idx].vaddr)
+ dma_free_coherent(t->dev,
+ ipu->rot_dma[rot_idx].size,
+ ipu->rot_dma[rot_idx].vaddr,
+ ipu->rot_dma[rot_idx].paddr);
+
+ ipu->rot_dma[rot_idx].size = r_size;
+ ipu->rot_dma[rot_idx].vaddr = dma_alloc_coherent(t->dev,
+ r_size,
+ &ipu->rot_dma[rot_idx].paddr,
+ GFP_DMA | GFP_KERNEL);
+ CHECK_RETCODE(ipu->rot_dma[rot_idx].vaddr == NULL,
+ "ic_and_rot", STATE_SYS_NO_MEM,
+ chan_setup, -ENOMEM);
+ }
+ t->set.r_paddr = ipu->rot_dma[rot_idx].paddr;
+
+ dev_dbg(t->dev, "[0x%p]rotation:\n", (void *)t);
+ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->set.r_fmt);
+ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->set.r_width);
+ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->set.r_height);
+ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->set.r_paddr);
+ dev_dbg(t->dev, "[0x%p]\trstride = %d\n", (void *)t, t->set.r_stride);
+
+ ret = init_ic(ipu, t);
+ CHECK_RETCODE(ret < 0, "init_ic ic_and_rot",
+ t->state, chan_setup, ret);
+ ret = init_rot(ipu, t);
+ CHECK_RETCODE(ret < 0, "init_rot ic_and_rot",
+ t->state, chan_setup, ret);
+ ret = ipu_link_channels(ipu, t->set.ic_chan,
+ t->set.rot_chan);
+ CHECK_RETCODE(ret < 0, "ipu_link_ch ic_and_rot",
+ STATE_LINK_CHAN_FAIL, chan_setup, ret);
+ } else {
+ dev_err(t->dev, "ERR [0x%p]do task: should not be here\n", t);
+ t->state = STATE_ERR;
+ return;
+ }
+
+ ret = ipu_request_irq(ipu, irq, task_irq_handler, 0, NULL, t);
+ CHECK_RETCODE(ret < 0, "ipu_req_irq",
+ STATE_IRQ_FAIL, chan_setup, ret);
+
+ /* enable/start channel */
+ if (only_ic(t->set.mode)) {
+ ret = ipu_enable_channel(ipu, t->set.ic_chan);
+ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic",
+ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
+ if (deinterlace_3_field(t)) {
+ ret = ipu_enable_channel(ipu, t->set.vdi_ic_p_chan);
+ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic_p",
+ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
+ ret = ipu_enable_channel(ipu, t->set.vdi_ic_n_chan);
+ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic_n",
+ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
+ }
+
+ ret = ipu_select_buffer(ipu, t->set.ic_chan, IPU_OUTPUT_BUFFER,
+ 0);
+ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic",
+ STATE_SEL_BUF_FAIL, chan_buf, ret);
+ if (t->overlay_en) {
+ ret = ipu_select_buffer(ipu, t->set.ic_chan,
+ IPU_GRAPH_IN_BUFFER, 0);
+ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_g",
+ STATE_SEL_BUF_FAIL, chan_buf, ret);
+ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
+ ret = ipu_select_buffer(ipu, t->set.ic_chan,
+ IPU_ALPHA_IN_BUFFER, 0);
+ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_a",
+ STATE_SEL_BUF_FAIL, chan_buf,
+ ret);
+ }
+ }
+ if (!(t->set.mode & VDOA_BAND_MODE)) {
+ if (deinterlace_3_field(t))
+ ipu_select_multi_vdi_buffer(ipu, 0);
+ else {
+ ret = ipu_select_buffer(ipu, t->set.ic_chan,
+ IPU_INPUT_BUFFER, 0);
+ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_i",
+ STATE_SEL_BUF_FAIL, chan_buf, ret);
+ }
+ }
+ } else if (only_rot(t->set.mode)) {
+ ret = ipu_enable_channel(ipu, t->set.rot_chan);
+ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_rot",
+ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
+ ret = ipu_select_buffer(ipu, t->set.rot_chan,
+ IPU_OUTPUT_BUFFER, 0);
+ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_rot_o",
+ STATE_SEL_BUF_FAIL, chan_buf, ret);
+ ret = ipu_select_buffer(ipu, t->set.rot_chan,
+ IPU_INPUT_BUFFER, 0);
+ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_rot_i",
+ STATE_SEL_BUF_FAIL, chan_buf, ret);
+ } else if (ic_and_rot(t->set.mode)) {
+ ret = ipu_enable_channel(ipu, t->set.rot_chan);
+ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-rot",
+ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
+ ret = ipu_enable_channel(ipu, t->set.ic_chan);
+ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-ic",
+ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
+ if (deinterlace_3_field(t)) {
+ ret = ipu_enable_channel(ipu, t->set.vdi_ic_p_chan);
+ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-p",
+ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
+ ret = ipu_enable_channel(ipu, t->set.vdi_ic_n_chan);
+ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-n",
+ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
+ }
+
+ ret = ipu_select_buffer(ipu, t->set.rot_chan,
+ IPU_OUTPUT_BUFFER, 0);
+ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-rot-o",
+ STATE_SEL_BUF_FAIL, chan_buf, ret);
+ if (t->overlay_en) {
+ ret = ipu_select_buffer(ipu, t->set.ic_chan,
+ IPU_GRAPH_IN_BUFFER, 0);
+ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-g",
+ STATE_SEL_BUF_FAIL, chan_buf, ret);
+ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
+ ret = ipu_select_buffer(ipu, t->set.ic_chan,
+ IPU_ALPHA_IN_BUFFER, 0);
+ CHECK_RETCODE(ret < 0, "ipu_sel_buf icrot-ic-a",
+ STATE_SEL_BUF_FAIL,
+ chan_buf, ret);
+ }
+ }
+ ret = ipu_select_buffer(ipu, t->set.ic_chan,
+ IPU_OUTPUT_BUFFER, 0);
+ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-o",
+ STATE_SEL_BUF_FAIL, chan_buf, ret);
+ if (deinterlace_3_field(t))
+ ipu_select_multi_vdi_buffer(ipu, 0);
+ else {
+ ret = ipu_select_buffer(ipu, t->set.ic_chan,
+ IPU_INPUT_BUFFER, 0);
+ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-i",
+ STATE_SEL_BUF_FAIL, chan_buf, ret);
+ }
+ }
+
+ if (need_split(t))
+ t->state = STATE_IN_PROGRESS;
+
+ if (t->set.mode & VDOA_BAND_MODE) {
+ ret = vdoa_start(t->vdoa_handle, VDOA_DEF_TIMEOUT_MS);
+ CHECK_RETCODE(ret < 0, "vdoa_wait4complete, do_vdoa_band",
+ STATE_VDOA_IRQ_TIMEOUT, chan_rel, ret);
+ }
+
+ CHECK_PERF(&t->ts_waitirq);
+ ret = wait_for_completion_timeout(&t->irq_comp,
+ msecs_to_jiffies(t->timeout - DEF_DELAY_MS));
+ CHECK_PERF(&t->ts_wakeup);
+ CHECK_RETCODE(ret == 0, "wait_for_comp_timeout",
+ STATE_IRQ_TIMEOUT, chan_rel, ret);
+ dev_dbg(t->dev, "[0x%p] no-0x%x ipu irq done!", t, t->task_no);
+
+chan_rel:
+chan_buf:
+chan_en:
+chan_setup:
+ if (t->set.mode & VDOA_BAND_MODE)
+ vdoa_stop(t->vdoa_handle);
+ do_task_release(t, t->state >= STATE_ERR);
+ return;
+}
+
+static void do_task_vdoa_vdi(struct ipu_task_entry *t)
+{
+ int i;
+ int ret;
+ u32 stripe_width;
+
+ /* FIXME: crop mode not support now */
+ stripe_width = t->input.width >> 1;
+ t->input.crop.pos.x = 0;
+ t->input.crop.pos.y = 0;
+ t->input.crop.w = stripe_width;
+ t->input.crop.h = t->input.height;
+ t->output.crop.w = stripe_width;
+ t->output.crop.h = t->input.height;
+
+ for (i = 0; i < 2; i++) {
+ t->input.crop.pos.x = t->input.crop.pos.x + i * stripe_width;
+ t->output.crop.pos.x = t->output.crop.pos.x + i * stripe_width;
+ /* check input */
+ ret = set_crop(&t->input.crop, t->input.width, t->input.height,
+ t->input.format);
+ if (ret < 0) {
+ ret = STATE_ERR;
+ goto done;
+ } else
+ update_offset(t->input.format,
+ t->input.width, t->input.height,
+ t->input.crop.pos.x,
+ t->input.crop.pos.y,
+ &t->set.i_off, &t->set.i_uoff,
+ &t->set.i_voff, &t->set.istride);
+ dev_dbg(t->dev, "i_off:0x%x, i_uoff:0x%x, istride:%d.\n",
+ t->set.i_off, t->set.i_uoff, t->set.istride);
+ /* check output */
+ ret = set_crop(&t->output.crop, t->input.width,
+ t->output.height, t->output.format);
+ if (ret < 0) {
+ ret = STATE_ERR;
+ goto done;
+ } else
+ update_offset(t->output.format,
+ t->output.width, t->output.height,
+ t->output.crop.pos.x,
+ t->output.crop.pos.y,
+ &t->set.o_off, &t->set.o_uoff,
+ &t->set.o_voff, &t->set.ostride);
+
+ dev_dbg(t->dev, "o_off:0x%x, o_uoff:0x%x, ostride:%d.\n",
+ t->set.o_off, t->set.o_uoff, t->set.ostride);
+
+ do_task(t);
+ }
+
+ return;
+done:
+ dev_err(t->dev, "ERR %s set_crop.\n", __func__);
+ t->state = ret;
+ return;
+}
+
+static void get_res_do_task(struct ipu_task_entry *t)
+{
+ uint32_t found;
+ uint32_t split_child;
+ struct mutex *lock;
+
+ found = get_vdoa_ipu_res(t);
+ if (!found) {
+ dev_err(t->dev, "ERR:[0x%p] no-0x%x can not get res\n",
+ t, t->task_no);
+ return;
+ } else {
+ if (t->set.task & VDOA_ONLY)
+ do_task_vdoa_only(t);
+ else if ((IPU_PIX_FMT_TILED_NV12F == t->input.format) &&
+ (t->set.mode & VDOA_BAND_MODE) &&
+ (t->input.crop.w >
+ soc_max_vdi_in_width(t->ipu)))
+ do_task_vdoa_vdi(t);
+ else
+ do_task(t);
+ put_vdoa_ipu_res(t, 0);
+ }
+ if (t->state != STATE_OK) {
+ dev_err(t->dev, "ERR:[0x%p] no-0x%x state: %s\n",
+ t, t->task_no, state_msg[t->state].msg);
+ }
+
+ split_child = need_split(t) && t->parent;
+ if (split_child) {
+ lock = &t->parent->split_lock;
+ mutex_lock(lock);
+ t->split_done = 1;
+ mutex_unlock(lock);
+ wake_up(&t->parent->split_waitq);
+ }
+
+ return;
+}
+
+static void wait_split_task_complete(struct ipu_task_entry *parent,
+ struct ipu_split_task *sp_task, uint32_t size)
+{
+ struct ipu_task_entry *tsk = NULL;
+ int ret = 0, rc;
+ int j, idx = -1;
+ unsigned long flags;
+ struct mutex *lock = &parent->split_lock;
+ int k, busy_vf, busy_pp;
+ struct ipu_soc *ipu;
+ DECLARE_PERF_VAR;
+
+ for (j = 0; j < size; j++) {
+ rc = wait_event_timeout(
+ parent->split_waitq,
+ sp_task_check_done(sp_task, parent, size, &idx),
+ msecs_to_jiffies(parent->timeout - DEF_DELAY_MS));
+ if (!rc) {
+ dev_err(parent->dev,
+ "ERR:[0x%p] no-0x%x, split_task timeout,j:%d,"
+ "size:%d.\n",
+ parent, parent->task_no, j, size);
+ ret = -ETIMEDOUT;
+ goto out;
+ } else {
+ if (idx < 0) {
+ dev_err(parent->dev,
+ "ERR:[0x%p] no-0x%x, invalid task idx:%d\n",
+ parent, parent->task_no, idx);
+ continue;
+ }
+ tsk = sp_task[idx].child_task;
+ mutex_lock(lock);
+ if (!tsk->split_done || !tsk->ipu)
+ dev_err(tsk->dev,
+ "ERR:no-0x%x,split not done:%d/null ipu:0x%p\n",
+ tsk->task_no, tsk->split_done, tsk->ipu);
+ tsk->split_done = 0;
+ mutex_unlock(lock);
+
+ dev_dbg(tsk->dev,
+ "[0x%p] no-0x%x sp_tsk[%d] done,state:%d.\n",
+ tsk, tsk->task_no, idx, tsk->state);
+ #ifdef DBG_IPU_PERF
+ CHECK_PERF(&tsk->ts_rel);
+ PRINT_TASK_STATISTICS;
+ #endif
+ }
+ }
+
+out:
+ if (ret == -ETIMEDOUT) {
+ /* debug */
+ for (k = 0; k < max_ipu_no; k++) {
+ ipu = ipu_get_soc(k);
+ if (IS_ERR(ipu)) {
+ dev_err(parent->dev, "no:0x%x, null ipu:%d\n",
+ parent->task_no, k);
+ } else {
+ busy_vf = ic_vf_pp_is_busy(ipu, true);
+ busy_pp = ic_vf_pp_is_busy(ipu, false);
+ dev_err(parent->dev,
+ "ERR:ipu[%d] busy_vf:%d, busy_pp:%d.\n",
+ k, busy_vf, busy_pp);
+ }
+ }
+ for (k = 0; k < size; k++) {
+ tsk = sp_task[k].child_task;
+ if (!tsk)
+ continue;
+ dev_err(parent->dev,
+ "ERR: sp_task[%d][0x%p] no-0x%x done:%d,"
+ "state:%s,on_list:%d, ipu:0x%p,timeout!\n",
+ k, tsk, tsk->task_no, tsk->split_done,
+ state_msg[tsk->state].msg, tsk->task_in_list,
+ tsk->ipu);
+ }
+ }
+
+ for (j = 0; j < size; j++) {
+ tsk = sp_task[j].child_task;
+ if (!tsk)
+ continue;
+ spin_lock_irqsave(&ipu_task_list_lock, flags);
+ if (tsk->task_in_list) {
+ list_del(&tsk->node);
+ tsk->task_in_list = 0;
+ dev_dbg(tsk->dev,
+ "[0x%p] no-0x%x,id:%d sp_tsk timeout list_del.\n",
+ tsk, tsk->task_no, tsk->task_id);
+ }
+ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
+ if (!tsk->ipu)
+ continue;
+ if (tsk->state != STATE_OK) {
+ dev_err(tsk->dev,
+ "ERR:[0x%p] no-0x%x,id:%d, sp_tsk state: %s\n",
+ tsk, tsk->task_no, tsk->task_id,
+ state_msg[tsk->state].msg);
+ }
+ kref_put(&tsk->refcount, task_mem_free);
+ }
+
+ kfree(parent->vditmpbuf[0]);
+ kfree(parent->vditmpbuf[1]);
+
+ if (ret < 0)
+ parent->state = STATE_TIMEOUT;
+ else
+ parent->state = STATE_OK;
+ return;
+}
+
+static inline int find_task(struct ipu_task_entry **t, int thread_id)
+{
+ int found;
+ unsigned long flags;
+ struct ipu_task_entry *tsk;
+ struct list_head *task_list = &ipu_task_list;
+
+ *t = NULL;
+ spin_lock_irqsave(&ipu_task_list_lock, flags);
+ found = !list_empty(task_list);
+ if (found) {
+ tsk = list_first_entry(task_list, struct ipu_task_entry, node);
+ if (tsk->task_in_list) {
+ list_del(&tsk->node);
+ tsk->task_in_list = 0;
+ *t = tsk;
+ kref_get(&tsk->refcount);
+ dev_dbg(tsk->dev,
+ "thread_id:%d,[0x%p] task_no:0x%x,mode:0x%x list_del\n",
+ thread_id, tsk, tsk->task_no, tsk->set.mode);
+ } else
+ dev_err(tsk->dev,
+ "thread_id:%d,task_no:0x%x,mode:0x%x not on list_del\n",
+ thread_id, tsk->task_no, tsk->set.mode);
+ }
+ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
+
+ return found;
+}
+
+static int ipu_task_thread(void *argv)
+{
+ struct ipu_task_entry *tsk;
+ struct ipu_task_entry *sp_tsk0;
+ struct ipu_split_task sp_task[4];
+ /* priority lower than irq_thread */
+ const struct sched_param param = {
+ .sched_priority = MAX_USER_RT_PRIO/2 - 1,
+ };
+ int ret;
+ int curr_thread_id;
+ uint32_t size;
+ unsigned long flags;
+ unsigned int cpu;
+ struct cpumask cpu_mask;
+ struct ipu_thread_data *data = (struct ipu_thread_data *)argv;
+
+ thread_id++;
+ curr_thread_id = thread_id;
+ sched_setscheduler(current, SCHED_FIFO, &param);
+
+ if (!data->is_vdoa) {
+ cpu = cpumask_first(cpu_online_mask);
+ cpumask_set_cpu(cpu, &cpu_mask);
+ ret = sched_setaffinity(data->ipu->thread[data->id]->pid,
+ &cpu_mask);
+ if (ret < 0) {
+ pr_err("%s: sched_setaffinity fail:%d.\n", __func__, ret);
+ }
+ pr_debug("%s: sched_setaffinity cpu:%d.\n", __func__, cpu);
+ }
+
+ while (!kthread_should_stop()) {
+ int split_fail = 0;
+ int split_parent;
+ int split_child;
+
+ wait_event_interruptible(thread_waitq, find_task(&tsk, curr_thread_id));
+
+ if (!tsk) {
+ pr_err("thread:%d can not find task.\n",
+ curr_thread_id);
+ continue;
+ }
+
+ /* note: other threads run split child task */
+ split_parent = need_split(tsk) && !tsk->parent;
+ split_child = need_split(tsk) && tsk->parent;
+ if (split_parent) {
+ if ((tsk->set.split_mode == RL_SPLIT) ||
+ (tsk->set.split_mode == UD_SPLIT))
+ size = 2;
+ else
+ size = 4;
+ ret = queue_split_task(tsk, sp_task, size);
+ if (ret < 0) {
+ split_fail = 1;
+ } else {
+ struct list_head *pos;
+
+ spin_lock_irqsave(&ipu_task_list_lock, flags);
+
+ sp_tsk0 = list_first_entry(&tsk->split_list,
+ struct ipu_task_entry, node);
+ list_del(&sp_tsk0->node);
+
+ list_for_each(pos, &tsk->split_list) {
+ struct ipu_task_entry *tmp;
+
+ tmp = list_entry(pos,
+ struct ipu_task_entry, node);
+ tmp->task_in_list = 1;
+ dev_dbg(tmp->dev,
+ "[0x%p] no-0x%x,id:%d sp_tsk "
+ "add_to_list.\n", tmp,
+ tmp->task_no, tmp->task_id);
+ }
+ /* add to global list */
+ list_splice(&tsk->split_list, &ipu_task_list);
+
+ spin_unlock_irqrestore(&ipu_task_list_lock,
+ flags);
+ /* let the parent thread do the first sp_task */
+ /* FIXME: ensure the correct sequence for split
+ 4size: 5/6->9/a*/
+ if (!sp_tsk0)
+ dev_err(tsk->dev,
+ "ERR: no-0x%x,can not get split_tsk0\n",
+ tsk->task_no);
+ wake_up_interruptible(&thread_waitq);
+ get_res_do_task(sp_tsk0);
+ dev_dbg(sp_tsk0->dev,
+ "thread:%d complete tsk no:0x%x.\n",
+ curr_thread_id, sp_tsk0->task_no);
+ ret = atomic_read(&req_cnt);
+ if (ret > 0) {
+ wake_up(&res_waitq);
+ dev_dbg(sp_tsk0->dev,
+ "sp_tsk0 sche thread:%d no:0x%x,"
+ "req_cnt:%d\n", curr_thread_id,
+ sp_tsk0->task_no, ret);
+ /* For other threads to get_res */
+ schedule();
+ }
+ }
+ } else
+ get_res_do_task(tsk);
+
+ /* wait for all 4 sp_task finished here or timeout
+ and then release all resources */
+ if (split_parent && !split_fail)
+ wait_split_task_complete(tsk, sp_task, size);
+
+ if (!split_child) {
+ atomic_inc(&tsk->done);
+ wake_up(&tsk->task_waitq);
+ }
+
+ dev_dbg(tsk->dev, "thread:%d complete tsk no:0x%x-[0x%p].\n",
+ curr_thread_id, tsk->task_no, tsk);
+ ret = atomic_read(&req_cnt);
+ if (ret > 0) {
+ wake_up(&res_waitq);
+ dev_dbg(tsk->dev, "sche thread:%d no:0x%x,req_cnt:%d\n",
+ curr_thread_id, tsk->task_no, ret);
+ /* note: give cpu to other threads to get_res */
+ schedule();
+ }
+
+ kref_put(&tsk->refcount, task_mem_free);
+ }
+
+ pr_info("ERR %s exit.\n", __func__);
+ return 0;
+}
+
+int ipu_check_task(struct ipu_task *task)
+{
+ struct ipu_task_entry *tsk;
+ int ret = 0;
+
+ tsk = create_task_entry(task);
+ if (IS_ERR(tsk))
+ return PTR_ERR(tsk);
+
+ ret = check_task(tsk);
+
+ task->input = tsk->input;
+ task->output = tsk->output;
+ task->overlay = tsk->overlay;
+ dump_task_info(tsk);
+
+ kref_put(&tsk->refcount, task_mem_free);
+ if (ret != 0)
+ pr_debug("%s ret:%d.\n", __func__, ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_check_task);
+
+int ipu_queue_task(struct ipu_task *task)
+{
+ struct ipu_task_entry *tsk;
+ unsigned long flags;
+ int ret;
+ u32 tmp_task_no;
+ DECLARE_PERF_VAR;
+
+ tsk = create_task_entry(task);
+ if (IS_ERR(tsk))
+ return PTR_ERR(tsk);
+
+ CHECK_PERF(&tsk->ts_queue);
+ ret = prepare_task(tsk);
+ if (ret < 0)
+ goto done;
+
+ if (need_split(tsk)) {
+ CHECK_PERF(&tsk->ts_dotask);
+ CHECK_PERF(&tsk->ts_waitirq);
+ CHECK_PERF(&tsk->ts_inirq);
+ CHECK_PERF(&tsk->ts_wakeup);
+ }
+
+ /* task_no last four bits for split task type*/
+ tmp_task_no = atomic_inc_return(&frame_no);
+ tsk->task_no = tmp_task_no << 4;
+ init_waitqueue_head(&tsk->task_waitq);
+
+ spin_lock_irqsave(&ipu_task_list_lock, flags);
+ list_add_tail(&tsk->node, &ipu_task_list);
+ tsk->task_in_list = 1;
+ dev_dbg(tsk->dev, "[0x%p,no-0x%x] list_add_tail\n", tsk, tsk->task_no);
+ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
+ wake_up_interruptible(&thread_waitq);
+
+ ret = wait_event_timeout(tsk->task_waitq, atomic_read(&tsk->done),
+ msecs_to_jiffies(tsk->timeout));
+ if (0 == ret) {
+ /* note: the timeout should larger than the internal timeout!*/
+ ret = -ETIMEDOUT;
+ dev_err(tsk->dev, "ERR: [0x%p] no-0x%x, timeout:%dms!\n",
+ tsk, tsk->task_no, tsk->timeout);
+ } else {
+ if (STATE_OK != tsk->state) {
+ dev_err(tsk->dev, "ERR: [0x%p] no-0x%x,state %d: %s\n",
+ tsk, tsk->task_no, tsk->state,
+ state_msg[tsk->state].msg);
+ ret = -ECANCELED;
+ } else
+ ret = 0;
+ }
+
+ spin_lock_irqsave(&ipu_task_list_lock, flags);
+ if (tsk->task_in_list) {
+ list_del(&tsk->node);
+ tsk->task_in_list = 0;
+ dev_dbg(tsk->dev, "[0x%p] no:0x%x list_del\n",
+ tsk, tsk->task_no);
+ }
+ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
+
+#ifdef DBG_IPU_PERF
+ CHECK_PERF(&tsk->ts_rel);
+ PRINT_TASK_STATISTICS;
+ if (ts_frame_avg == 0)
+ ts_frame_avg = ts_frame.tv_nsec / NSEC_PER_USEC +
+ ts_frame.tv_sec * USEC_PER_SEC;
+ else
+ ts_frame_avg = (ts_frame_avg + ts_frame.tv_nsec / NSEC_PER_USEC
+ + ts_frame.tv_sec * USEC_PER_SEC)/2;
+ if (timespec_compare(&ts_frame, &ts_frame_max) > 0)
+ ts_frame_max = ts_frame;
+
+ atomic_inc(&frame_cnt);
+
+ if ((atomic_read(&frame_cnt) % 1000) == 0)
+ pr_debug("ipu_dev: max frame time:%ldus, avg frame time:%dus,"
+ "frame_cnt:%d\n", ts_frame_max.tv_nsec / NSEC_PER_USEC
+ + ts_frame_max.tv_sec * USEC_PER_SEC,
+ ts_frame_avg, atomic_read(&frame_cnt));
+#endif
+done:
+ if (ret < 0)
+ dev_err(tsk->dev, "ERR: no-0x%x,ipu_queue_task err:%d\n",
+ tsk->task_no, ret);
+
+ kref_put(&tsk->refcount, task_mem_free);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_queue_task);
+
+static int mxc_ipu_open(struct inode *inode, struct file *file)
+{
+ file->private_data = (void *)atomic_inc_return(&file_index);
+ return 0;
+}
+
+static long mxc_ipu_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int __user *argp = (void __user *)arg;
+ int ret = 0;
+
+ switch (cmd) {
+ case IPU_CHECK_TASK:
+ {
+ struct ipu_task task;
+
+ if (copy_from_user
+ (&task, (struct ipu_task *) arg,
+ sizeof(struct ipu_task)))
+ return -EFAULT;
+ ret = ipu_check_task(&task);
+ if (copy_to_user((struct ipu_task *) arg,
+ &task, sizeof(struct ipu_task)))
+ return -EFAULT;
+ break;
+ }
+ case IPU_QUEUE_TASK:
+ {
+ struct ipu_task task;
+
+ if (copy_from_user
+ (&task, (struct ipu_task *) arg,
+ sizeof(struct ipu_task)))
+ return -EFAULT;
+ ret = ipu_queue_task(&task);
+ break;
+ }
+ case IPU_ALLOC:
+ {
+ int size;
+ struct ipu_alloc_list *mem;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (mem == NULL)
+ return -ENOMEM;
+
+ if (get_user(size, argp))
+ return -EFAULT;
+
+ mem->size = PAGE_ALIGN(size);
+
+ mem->cpu_addr = dma_alloc_coherent(ipu_dev, size,
+ &mem->phy_addr,
+ GFP_DMA | GFP_KERNEL);
+ if (mem->cpu_addr == NULL) {
+ kfree(mem);
+ return -ENOMEM;
+ }
+ mem->file_index = file->private_data;
+ mutex_lock(&ipu_alloc_lock);
+ list_add(&mem->list, &ipu_alloc_list);
+ mutex_unlock(&ipu_alloc_lock);
+
+ dev_dbg(ipu_dev, "allocated %d bytes @ 0x%08X\n",
+ mem->size, mem->phy_addr);
+
+ if (put_user(mem->phy_addr, argp))
+ return -EFAULT;
+
+ break;
+ }
+ case IPU_FREE:
+ {
+ unsigned long offset;
+ struct ipu_alloc_list *mem;
+
+ if (get_user(offset, argp))
+ return -EFAULT;
+
+ ret = -EINVAL;
+ mutex_lock(&ipu_alloc_lock);
+ list_for_each_entry(mem, &ipu_alloc_list, list) {
+ if (mem->phy_addr == offset) {
+ list_del(&mem->list);
+ dma_free_coherent(ipu_dev,
+ mem->size,
+ mem->cpu_addr,
+ mem->phy_addr);
+ kfree(mem);
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ipu_alloc_lock);
+ if (0 == ret)
+ dev_dbg(ipu_dev, "free %d bytes @ 0x%08X\n",
+ mem->size, mem->phy_addr);
+
+ break;
+ }
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int mxc_ipu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ bool found = false;
+ u32 len;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ struct ipu_alloc_list *mem;
+
+ mutex_lock(&ipu_alloc_lock);
+ list_for_each_entry(mem, &ipu_alloc_list, list) {
+ if (offset == mem->phy_addr) {
+ found = true;
+ len = mem->size;
+ break;
+ }
+ }
+ mutex_unlock(&ipu_alloc_lock);
+ if (!found)
+ return -EINVAL;
+
+ if (vma->vm_end - vma->vm_start > len)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ printk(KERN_ERR
+ "mmap failed!\n");
+ return -ENOBUFS;
+ }
+ return 0;
+}
+
+static int mxc_ipu_release(struct inode *inode, struct file *file)
+{
+ struct ipu_alloc_list *mem;
+ struct ipu_alloc_list *n;
+
+ mutex_lock(&ipu_alloc_lock);
+ list_for_each_entry_safe(mem, n, &ipu_alloc_list, list) {
+ if ((mem->cpu_addr != 0) &&
+ (file->private_data == mem->file_index)) {
+ list_del(&mem->list);
+ dma_free_coherent(ipu_dev,
+ mem->size,
+ mem->cpu_addr,
+ mem->phy_addr);
+ dev_dbg(ipu_dev, "rel-free %d bytes @ 0x%08X\n",
+ mem->size, mem->phy_addr);
+ kfree(mem);
+ }
+ }
+ mutex_unlock(&ipu_alloc_lock);
+ atomic_dec(&file_index);
+
+ return 0;
+}
+
+static struct file_operations mxc_ipu_fops = {
+ .owner = THIS_MODULE,
+ .open = mxc_ipu_open,
+ .mmap = mxc_ipu_mmap,
+ .release = mxc_ipu_release,
+ .unlocked_ioctl = mxc_ipu_ioctl,
+};
+
+int register_ipu_device(struct ipu_soc *ipu, int id)
+{
+ int ret = 0;
+ static int idx;
+ static struct ipu_thread_data thread_data[5];
+
+ if (!major) {
+ major = register_chrdev(0, "mxc_ipu", &mxc_ipu_fops);
+ if (major < 0) {
+ printk(KERN_ERR "Unable to register mxc_ipu as a char device\n");
+ ret = major;
+ goto register_cdev_fail;
+ }
+
+ ipu_class = class_create(THIS_MODULE, "mxc_ipu");
+ if (IS_ERR(ipu_class)) {
+ ret = PTR_ERR(ipu_class);
+ goto ipu_class_fail;
+ }
+
+ ipu_dev = device_create(ipu_class, NULL, MKDEV(major, 0),
+ NULL, "mxc_ipu");
+ if (IS_ERR(ipu_dev)) {
+ ret = PTR_ERR(ipu_dev);
+ goto dev_create_fail;
+ }
+ ipu_dev->dma_mask = kmalloc(sizeof(*ipu_dev->dma_mask), GFP_KERNEL);
+ *ipu_dev->dma_mask = DMA_BIT_MASK(32);
+ ipu_dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ mutex_init(&ipu_ch_tbl.lock);
+ }
+ max_ipu_no = ++id;
+ ipu->rot_dma[0].size = 0;
+ ipu->rot_dma[1].size = 0;
+
+ thread_data[idx].ipu = ipu;
+ thread_data[idx].id = 0;
+ thread_data[idx].is_vdoa = 0;
+ ipu->thread[0] = kthread_run(ipu_task_thread, &thread_data[idx++],
+ "ipu%d_task", id);
+ if (IS_ERR(ipu->thread[0])) {
+ ret = PTR_ERR(ipu->thread[0]);
+ goto kthread0_fail;
+ }
+
+ thread_data[idx].ipu = ipu;
+ thread_data[idx].id = 1;
+ thread_data[idx].is_vdoa = 0;
+ ipu->thread[1] = kthread_run(ipu_task_thread, &thread_data[idx++],
+ "ipu%d_task", id);
+ if (IS_ERR(ipu->thread[1])) {
+ ret = PTR_ERR(ipu->thread[1]);
+ goto kthread1_fail;
+ }
+
+
+ return ret;
+
+kthread1_fail:
+ kthread_stop(ipu->thread[0]);
+kthread0_fail:
+ if (id == 0)
+ device_destroy(ipu_class, MKDEV(major, 0));
+dev_create_fail:
+ if (id == 0) {
+ class_destroy(ipu_class);
+ }
+ipu_class_fail:
+ if (id == 0)
+ unregister_chrdev(major, "mxc_ipu");
+register_cdev_fail:
+ return ret;
+}
+
+void unregister_ipu_device(struct ipu_soc *ipu, int id)
+{
+ int i;
+
+ kthread_stop(ipu->thread[0]);
+ kthread_stop(ipu->thread[1]);
+ for (i = 0; i < 2; i++) {
+ if (ipu->rot_dma[i].vaddr)
+ dma_free_coherent(ipu_dev,
+ ipu->rot_dma[i].size,
+ ipu->rot_dma[i].vaddr,
+ ipu->rot_dma[i].paddr);
+ }
+
+ if (major) {
+ device_destroy(ipu_class, MKDEV(major, 0));
+ class_destroy(ipu_class);
+ unregister_chrdev(major, "mxc_ipu");
+ major = 0;
+ }
+}
diff --git a/drivers/mxc/ipu3/ipu_disp.c b/drivers/mxc/ipu3/ipu_disp.c
new file mode 100644
index 000000000000..f7044c297389
--- /dev/null
+++ b/drivers/mxc/ipu3/ipu_disp.c
@@ -0,0 +1,1959 @@
+/*
+ * Copyright 2005-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file ipu_disp.c
+ *
+ * @brief IPU display submodule API functions
+ *
+ * @ingroup IPU
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ipu-v3.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+
+#include "ipu_param_mem.h"
+#include "ipu_regs.h"
+
+struct dp_csc_param_t {
+ int mode;
+ void *coeff;
+};
+
+#define SYNC_WAVE 0
+#define NULL_WAVE (-1)
+#define ASYNC_SER_WAVE 6
+
+/* DC display ID assignments */
+#define DC_DISP_ID_SYNC(di) (di)
+#define DC_DISP_ID_SERIAL 2
+#define DC_DISP_ID_ASYNC 3
+
+int dmfc_type_setup;
+
+void _ipu_dmfc_init(struct ipu_soc *ipu, int dmfc_type, int first)
+{
+ u32 dmfc_wr_chan, dmfc_dp_chan;
+
+ if (first) {
+ if (dmfc_type_setup > dmfc_type)
+ dmfc_type = dmfc_type_setup;
+ else
+ dmfc_type_setup = dmfc_type;
+
+ /* disable DMFC-IC channel*/
+ ipu_dmfc_write(ipu, 0x2, DMFC_IC_CTRL);
+ } else if (dmfc_type_setup >= DMFC_HIGH_RESOLUTION_DC) {
+ dev_dbg(ipu->dev, "DMFC high resolution has set, will not change\n");
+ return;
+ } else
+ dmfc_type_setup = dmfc_type;
+
+ if (dmfc_type == DMFC_HIGH_RESOLUTION_DC) {
+ /* 1 - segment 0~3;
+ * 5B - segement 4, 5;
+ * 5F - segement 6, 7;
+ * 1C, 2C and 6B, 6F unused;
+ */
+ dev_info(ipu->dev, "IPU DMFC DC HIGH RESOLUTION: 1(0~3), 5B(4,5), 5F(6,7)\n");
+ dmfc_wr_chan = 0x00000088;
+ dmfc_dp_chan = 0x00009694;
+ ipu->dmfc_size_28 = 256*4;
+ ipu->dmfc_size_29 = 0;
+ ipu->dmfc_size_24 = 0;
+ ipu->dmfc_size_27 = 128*4;
+ ipu->dmfc_size_23 = 128*4;
+ } else if (dmfc_type == DMFC_HIGH_RESOLUTION_DP) {
+ /* 1 - segment 0, 1;
+ * 5B - segement 2~5;
+ * 5F - segement 6,7;
+ * 1C, 2C and 6B, 6F unused;
+ */
+ dev_info(ipu->dev, "IPU DMFC DP HIGH RESOLUTION: 1(0,1), 5B(2~5), 5F(6,7)\n");
+ dmfc_wr_chan = 0x00000090;
+ dmfc_dp_chan = 0x0000968a;
+ ipu->dmfc_size_28 = 128*4;
+ ipu->dmfc_size_29 = 0;
+ ipu->dmfc_size_24 = 0;
+ ipu->dmfc_size_27 = 128*4;
+ ipu->dmfc_size_23 = 256*4;
+ } else if (dmfc_type == DMFC_HIGH_RESOLUTION_ONLY_DP) {
+ /* 5B - segement 0~3;
+ * 5F - segement 4~7;
+ * 1, 1C, 2C and 6B, 6F unused;
+ */
+ dev_info(ipu->dev, "IPU DMFC ONLY-DP HIGH RESOLUTION: 5B(0~3), 5F(4~7)\n");
+ dmfc_wr_chan = 0x00000000;
+ dmfc_dp_chan = 0x00008c88;
+ ipu->dmfc_size_28 = 0;
+ ipu->dmfc_size_29 = 0;
+ ipu->dmfc_size_24 = 0;
+ ipu->dmfc_size_27 = 256*4;
+ ipu->dmfc_size_23 = 256*4;
+ } else {
+ /* 1 - segment 0, 1;
+ * 5B - segement 4, 5;
+ * 5F - segement 6, 7;
+ * 1C, 2C and 6B, 6F unused;
+ */
+ dev_info(ipu->dev, "IPU DMFC NORMAL mode: 1(0~1), 5B(4,5), 5F(6,7)\n");
+ dmfc_wr_chan = 0x00000090;
+ dmfc_dp_chan = 0x00009694;
+ ipu->dmfc_size_28 = 128*4;
+ ipu->dmfc_size_29 = 0;
+ ipu->dmfc_size_24 = 0;
+ ipu->dmfc_size_27 = 128*4;
+ ipu->dmfc_size_23 = 128*4;
+ }
+ ipu_dmfc_write(ipu, dmfc_wr_chan, DMFC_WR_CHAN);
+ ipu_dmfc_write(ipu, 0x202020F6, DMFC_WR_CHAN_DEF);
+ ipu_dmfc_write(ipu, dmfc_dp_chan, DMFC_DP_CHAN);
+ /* Enable chan 5 watermark set at 5 bursts and clear at 7 bursts */
+ ipu_dmfc_write(ipu, 0x2020F6F6, DMFC_DP_CHAN_DEF);
+}
+
+static int __init dmfc_setup(char *options)
+{
+ get_option(&options, &dmfc_type_setup);
+ if (dmfc_type_setup > DMFC_HIGH_RESOLUTION_ONLY_DP)
+ dmfc_type_setup = DMFC_HIGH_RESOLUTION_ONLY_DP;
+ return 1;
+}
+__setup("dmfc=", dmfc_setup);
+
+void _ipu_dmfc_set_wait4eot(struct ipu_soc *ipu, int dma_chan, int width)
+{
+ u32 dmfc_gen1 = ipu_dmfc_read(ipu, DMFC_GENERAL1);
+
+ if (width >= HIGH_RESOLUTION_WIDTH) {
+ if (dma_chan == 23)
+ _ipu_dmfc_init(ipu, DMFC_HIGH_RESOLUTION_DP, 0);
+ else if (dma_chan == 28)
+ _ipu_dmfc_init(ipu, DMFC_HIGH_RESOLUTION_DC, 0);
+ }
+
+ if (dma_chan == 23) { /*5B*/
+ if (ipu->dmfc_size_23/width > 3)
+ dmfc_gen1 |= 1UL << 20;
+ else
+ dmfc_gen1 &= ~(1UL << 20);
+ } else if (dma_chan == 24) { /*6B*/
+ if (ipu->dmfc_size_24/width > 1)
+ dmfc_gen1 |= 1UL << 22;
+ else
+ dmfc_gen1 &= ~(1UL << 22);
+ } else if (dma_chan == 27) { /*5F*/
+ if (ipu->dmfc_size_27/width > 2)
+ dmfc_gen1 |= 1UL << 21;
+ else
+ dmfc_gen1 &= ~(1UL << 21);
+ } else if (dma_chan == 28) { /*1*/
+ if (ipu->dmfc_size_28/width > 2)
+ dmfc_gen1 |= 1UL << 16;
+ else
+ dmfc_gen1 &= ~(1UL << 16);
+ } else if (dma_chan == 29) { /*6F*/
+ if (ipu->dmfc_size_29/width > 1)
+ dmfc_gen1 |= 1UL << 23;
+ else
+ dmfc_gen1 &= ~(1UL << 23);
+ }
+
+ ipu_dmfc_write(ipu, dmfc_gen1, DMFC_GENERAL1);
+}
+
+void _ipu_dmfc_set_burst_size(struct ipu_soc *ipu, int dma_chan, int burst_size)
+{
+ u32 dmfc_wr_chan = ipu_dmfc_read(ipu, DMFC_WR_CHAN);
+ u32 dmfc_dp_chan = ipu_dmfc_read(ipu, DMFC_DP_CHAN);
+ int dmfc_bs = 0;
+
+ switch (burst_size) {
+ case 64:
+ dmfc_bs = 0x40;
+ break;
+ case 32:
+ case 20:
+ dmfc_bs = 0x80;
+ break;
+ case 16:
+ dmfc_bs = 0xc0;
+ break;
+ default:
+ dev_err(ipu->dev, "Unsupported burst size %d\n",
+ burst_size);
+ return;
+ }
+
+ if (dma_chan == 23) { /*5B*/
+ dmfc_dp_chan &= ~(0xc0);
+ dmfc_dp_chan |= dmfc_bs;
+ } else if (dma_chan == 27) { /*5F*/
+ dmfc_dp_chan &= ~(0xc000);
+ dmfc_dp_chan |= (dmfc_bs << 8);
+ } else if (dma_chan == 28) { /*1*/
+ dmfc_wr_chan &= ~(0xc0);
+ dmfc_wr_chan |= dmfc_bs;
+ }
+
+ ipu_dmfc_write(ipu, dmfc_wr_chan, DMFC_WR_CHAN);
+ ipu_dmfc_write(ipu, dmfc_dp_chan, DMFC_DP_CHAN);
+}
+
+static void _ipu_di_data_wave_config(struct ipu_soc *ipu,
+ int di, int wave_gen,
+ int access_size, int component_size)
+{
+ u32 reg;
+ reg = (access_size << DI_DW_GEN_ACCESS_SIZE_OFFSET) |
+ (component_size << DI_DW_GEN_COMPONENT_SIZE_OFFSET);
+ ipu_di_write(ipu, di, reg, DI_DW_GEN(wave_gen));
+}
+
+static void _ipu_di_data_pin_config(struct ipu_soc *ipu,
+ int di, int wave_gen, int di_pin, int set,
+ int up, int down)
+{
+ u32 reg;
+
+ reg = ipu_di_read(ipu, di, DI_DW_GEN(wave_gen));
+ reg &= ~(0x3 << (di_pin * 2));
+ reg |= set << (di_pin * 2);
+ ipu_di_write(ipu, di, reg, DI_DW_GEN(wave_gen));
+
+ ipu_di_write(ipu, di, (down << 16) | up, DI_DW_SET(wave_gen, set));
+}
+
+static void _ipu_di_sync_config(struct ipu_soc *ipu,
+ int di, int wave_gen,
+ int run_count, int run_src,
+ int offset_count, int offset_src,
+ int repeat_count, int cnt_clr_src,
+ int cnt_polarity_gen_en,
+ int cnt_polarity_clr_src,
+ int cnt_polarity_trigger_src,
+ int cnt_up, int cnt_down)
+{
+ u32 reg;
+
+ if ((run_count >= 0x1000) || (offset_count >= 0x1000) || (repeat_count >= 0x1000) ||
+ (cnt_up >= 0x400) || (cnt_down >= 0x400)) {
+ dev_err(ipu->dev, "DI%d counters out of range.\n", di);
+ return;
+ }
+
+ reg = (run_count << 19) | (++run_src << 16) |
+ (offset_count << 3) | ++offset_src;
+ ipu_di_write(ipu, di, reg, DI_SW_GEN0(wave_gen));
+ reg = (cnt_polarity_gen_en << 29) | (++cnt_clr_src << 25) |
+ (++cnt_polarity_trigger_src << 12) | (++cnt_polarity_clr_src << 9);
+ reg |= (cnt_down << 16) | cnt_up;
+ if (repeat_count == 0) {
+ /* Enable auto reload */
+ reg |= 0x10000000;
+ }
+ ipu_di_write(ipu, di, reg, DI_SW_GEN1(wave_gen));
+ reg = ipu_di_read(ipu, di, DI_STP_REP(wave_gen));
+ reg &= ~(0xFFFF << (16 * ((wave_gen - 1) & 0x1)));
+ reg |= repeat_count << (16 * ((wave_gen - 1) & 0x1));
+ ipu_di_write(ipu, di, reg, DI_STP_REP(wave_gen));
+}
+
+static void _ipu_dc_map_link(struct ipu_soc *ipu,
+ int current_map,
+ int base_map_0, int buf_num_0,
+ int base_map_1, int buf_num_1,
+ int base_map_2, int buf_num_2)
+{
+ int ptr_0 = base_map_0 * 3 + buf_num_0;
+ int ptr_1 = base_map_1 * 3 + buf_num_1;
+ int ptr_2 = base_map_2 * 3 + buf_num_2;
+ int ptr;
+ u32 reg;
+ ptr = (ptr_2 << 10) + (ptr_1 << 5) + ptr_0;
+
+ reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(current_map));
+ reg &= ~(0x1F << ((16 * (current_map & 0x1))));
+ reg |= ptr << ((16 * (current_map & 0x1)));
+ ipu_dc_write(ipu, reg, DC_MAP_CONF_PTR(current_map));
+}
+
+static void _ipu_dc_map_config(struct ipu_soc *ipu,
+ int map, int byte_num, int offset, int mask)
+{
+ int ptr = map * 3 + byte_num;
+ u32 reg;
+
+ reg = ipu_dc_read(ipu, DC_MAP_CONF_VAL(ptr));
+ reg &= ~(0xFFFF << (16 * (ptr & 0x1)));
+ reg |= ((offset << 8) | mask) << (16 * (ptr & 0x1));
+ ipu_dc_write(ipu, reg, DC_MAP_CONF_VAL(ptr));
+
+ reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(map));
+ reg &= ~(0x1F << ((16 * (map & 0x1)) + (5 * byte_num)));
+ reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num));
+ ipu_dc_write(ipu, reg, DC_MAP_CONF_PTR(map));
+}
+
+static void _ipu_dc_map_clear(struct ipu_soc *ipu, int map)
+{
+ u32 reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(map));
+ ipu_dc_write(ipu, reg & ~(0xFFFF << (16 * (map & 0x1))),
+ DC_MAP_CONF_PTR(map));
+}
+
+static void _ipu_dc_write_tmpl(struct ipu_soc *ipu,
+ int word, u32 opcode, u32 operand, int map,
+ int wave, int glue, int sync, int stop)
+{
+ u32 reg;
+
+ if (opcode == WRG) {
+ reg = sync;
+ reg |= (glue << 4);
+ reg |= (++wave << 11);
+ reg |= ((operand & 0x1FFFF) << 15);
+ ipu_dc_tmpl_write(ipu, reg, word * 8);
+
+ reg = (operand >> 17);
+ reg |= opcode << 7;
+ reg |= (stop << 9);
+ ipu_dc_tmpl_write(ipu, reg, word * 8 + 4);
+ } else {
+ reg = sync;
+ reg |= (glue << 4);
+ reg |= (++wave << 11);
+ reg |= (++map << 15);
+ reg |= (operand << 20) & 0xFFF00000;
+ ipu_dc_tmpl_write(ipu, reg, word * 8);
+
+ reg = (operand >> 12);
+ reg |= opcode << 4;
+ reg |= (stop << 9);
+ ipu_dc_tmpl_write(ipu, reg, word * 8 + 4);
+ }
+}
+
+static void _ipu_dc_link_event(struct ipu_soc *ipu,
+ int chan, int event, int addr, int priority)
+{
+ u32 reg;
+ u32 address_shift;
+ if (event < DC_EVEN_UGDE0) {
+ reg = ipu_dc_read(ipu, DC_RL_CH(chan, event));
+ reg &= ~(0xFFFF << (16 * (event & 0x1)));
+ reg |= ((addr << 8) | priority) << (16 * (event & 0x1));
+ ipu_dc_write(ipu, reg, DC_RL_CH(chan, event));
+ } else {
+ reg = ipu_dc_read(ipu, DC_UGDE_0((event - DC_EVEN_UGDE0) / 2));
+ if ((event - DC_EVEN_UGDE0) & 0x1) {
+ reg &= ~(0x2FF << 16);
+ reg |= (addr << 16);
+ reg |= priority ? (2 << 24) : 0x0;
+ } else {
+ reg &= ~0xFC00FFFF;
+ if (priority)
+ chan = (chan >> 1) +
+ ((((chan & 0x1) + ((chan & 0x2) >> 1))) | (chan >> 3));
+ else
+ chan = 0x7;
+ address_shift = ((event - DC_EVEN_UGDE0) >> 1) ? 7 : 8;
+ reg |= (addr << address_shift) | (priority << 3) | chan;
+ }
+ ipu_dc_write(ipu, reg, DC_UGDE_0((event - DC_EVEN_UGDE0) / 2));
+ }
+}
+
+/* Y = R * 1.200 + G * 2.343 + B * .453 + 0.250;
+ U = R * -.672 + G * -1.328 + B * 2.000 + 512.250.;
+ V = R * 2.000 + G * -1.672 + B * -.328 + 512.250.;*/
+static const int rgb2ycbcr_coeff[5][3] = {
+ {0x4D, 0x96, 0x1D},
+ {-0x2B, -0x55, 0x80},
+ {0x80, -0x6B, -0x15},
+ {0x0000, 0x0200, 0x0200}, /* B0, B1, B2 */
+ {0x2, 0x2, 0x2}, /* S0, S1, S2 */
+};
+
+/* R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128));
+ G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128));
+ B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128); */
+static const int ycbcr2rgb_coeff[5][3] = {
+ {0x095, 0x000, 0x0CC},
+ {0x095, 0x3CE, 0x398},
+ {0x095, 0x0FF, 0x000},
+ {0x3E42, 0x010A, 0x3DD6}, /*B0,B1,B2 */
+ {0x1, 0x1, 0x1}, /*S0,S1,S2 */
+};
+
+#define mask_a(a) ((u32)(a) & 0x3FF)
+#define mask_b(b) ((u32)(b) & 0x3FFF)
+
+/* Pls keep S0, S1 and S2 as 0x2 by using this convertion */
+static int _rgb_to_yuv(int n, int red, int green, int blue)
+{
+ int c;
+ c = red * rgb2ycbcr_coeff[n][0];
+ c += green * rgb2ycbcr_coeff[n][1];
+ c += blue * rgb2ycbcr_coeff[n][2];
+ c /= 16;
+ c += rgb2ycbcr_coeff[3][n] * 4;
+ c += 8;
+ c /= 16;
+ if (c < 0)
+ c = 0;
+ if (c > 255)
+ c = 255;
+ return c;
+}
+
+/*
+ * Row is for BG: RGB2YUV YUV2RGB RGB2RGB YUV2YUV CSC_NONE
+ * Column is for FG: RGB2YUV YUV2RGB RGB2RGB YUV2YUV CSC_NONE
+ */
+static struct dp_csc_param_t dp_csc_array[CSC_NUM][CSC_NUM] = {
+{{DP_COM_CONF_CSC_DEF_BOTH, &rgb2ycbcr_coeff}, {0, 0}, {0, 0}, {DP_COM_CONF_CSC_DEF_BG, &rgb2ycbcr_coeff}, {DP_COM_CONF_CSC_DEF_BG, &rgb2ycbcr_coeff} },
+{{0, 0}, {DP_COM_CONF_CSC_DEF_BOTH, &ycbcr2rgb_coeff}, {DP_COM_CONF_CSC_DEF_BG, &ycbcr2rgb_coeff}, {0, 0}, {DP_COM_CONF_CSC_DEF_BG, &ycbcr2rgb_coeff} },
+{{0, 0}, {DP_COM_CONF_CSC_DEF_FG, &ycbcr2rgb_coeff}, {0, 0}, {0, 0}, {0, 0} },
+{{DP_COM_CONF_CSC_DEF_FG, &rgb2ycbcr_coeff}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+{{DP_COM_CONF_CSC_DEF_FG, &rgb2ycbcr_coeff}, {DP_COM_CONF_CSC_DEF_FG, &ycbcr2rgb_coeff}, {0, 0}, {0, 0}, {0, 0} }
+};
+
+void __ipu_dp_csc_setup(struct ipu_soc *ipu,
+ int dp, struct dp_csc_param_t dp_csc_param,
+ bool srm_mode_update)
+{
+ u32 reg;
+ const int (*coeff)[5][3];
+
+ if (dp_csc_param.mode >= 0) {
+ reg = ipu_dp_read(ipu, DP_COM_CONF(dp));
+ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+ reg |= dp_csc_param.mode;
+ ipu_dp_write(ipu, reg, DP_COM_CONF(dp));
+ }
+
+ coeff = dp_csc_param.coeff;
+
+ if (coeff) {
+ ipu_dp_write(ipu, mask_a((*coeff)[0][0]) |
+ (mask_a((*coeff)[0][1]) << 16), DP_CSC_A_0(dp));
+ ipu_dp_write(ipu, mask_a((*coeff)[0][2]) |
+ (mask_a((*coeff)[1][0]) << 16), DP_CSC_A_1(dp));
+ ipu_dp_write(ipu, mask_a((*coeff)[1][1]) |
+ (mask_a((*coeff)[1][2]) << 16), DP_CSC_A_2(dp));
+ ipu_dp_write(ipu, mask_a((*coeff)[2][0]) |
+ (mask_a((*coeff)[2][1]) << 16), DP_CSC_A_3(dp));
+ ipu_dp_write(ipu, mask_a((*coeff)[2][2]) |
+ (mask_b((*coeff)[3][0]) << 16) |
+ ((*coeff)[4][0] << 30), DP_CSC_0(dp));
+ ipu_dp_write(ipu, mask_b((*coeff)[3][1]) | ((*coeff)[4][1] << 14) |
+ (mask_b((*coeff)[3][2]) << 16) |
+ ((*coeff)[4][2] << 30), DP_CSC_1(dp));
+ }
+
+ if (srm_mode_update) {
+ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
+ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
+ }
+}
+
+int _ipu_dp_init(struct ipu_soc *ipu,
+ ipu_channel_t channel, uint32_t in_pixel_fmt,
+ uint32_t out_pixel_fmt)
+{
+ int in_fmt, out_fmt;
+ int dp;
+ int partial = false;
+ uint32_t reg;
+
+ if (channel == MEM_FG_SYNC) {
+ dp = DP_SYNC;
+ partial = true;
+ } else if (channel == MEM_BG_SYNC) {
+ dp = DP_SYNC;
+ partial = false;
+ } else if (channel == MEM_BG_ASYNC0) {
+ dp = DP_ASYNC0;
+ partial = false;
+ } else {
+ return -EINVAL;
+ }
+
+ in_fmt = format_to_colorspace(in_pixel_fmt);
+ out_fmt = format_to_colorspace(out_pixel_fmt);
+
+ if (partial) {
+ if (in_fmt == RGB) {
+ if (out_fmt == RGB)
+ ipu->fg_csc_type = RGB2RGB;
+ else
+ ipu->fg_csc_type = RGB2YUV;
+ } else {
+ if (out_fmt == RGB)
+ ipu->fg_csc_type = YUV2RGB;
+ else
+ ipu->fg_csc_type = YUV2YUV;
+ }
+ } else {
+ if (in_fmt == RGB) {
+ if (out_fmt == RGB)
+ ipu->bg_csc_type = RGB2RGB;
+ else
+ ipu->bg_csc_type = RGB2YUV;
+ } else {
+ if (out_fmt == RGB)
+ ipu->bg_csc_type = YUV2RGB;
+ else
+ ipu->bg_csc_type = YUV2YUV;
+ }
+ }
+
+ /* Transform color key from rgb to yuv if CSC is enabled */
+ reg = ipu_dp_read(ipu, DP_COM_CONF(dp));
+ if (ipu->color_key_4rgb && (reg & DP_COM_CONF_GWCKE) &&
+ (((ipu->fg_csc_type == RGB2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
+ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == RGB2YUV)) ||
+ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
+ ((ipu->fg_csc_type == YUV2RGB) && (ipu->bg_csc_type == YUV2RGB)))) {
+ int red, green, blue;
+ int y, u, v;
+ uint32_t color_key = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(dp)) & 0xFFFFFFL;
+
+ dev_dbg(ipu->dev, "_ipu_dp_init color key 0x%x need change to yuv fmt!\n", color_key);
+
+ red = (color_key >> 16) & 0xFF;
+ green = (color_key >> 8) & 0xFF;
+ blue = color_key & 0xFF;
+
+ y = _rgb_to_yuv(0, red, green, blue);
+ u = _rgb_to_yuv(1, red, green, blue);
+ v = _rgb_to_yuv(2, red, green, blue);
+ color_key = (y << 16) | (u << 8) | v;
+
+ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(dp)) & 0xFF000000L;
+ ipu_dp_write(ipu, reg | color_key, DP_GRAPH_WIND_CTRL(dp));
+ ipu->color_key_4rgb = false;
+
+ dev_dbg(ipu->dev, "_ipu_dp_init color key change to yuv fmt 0x%x!\n", color_key);
+ }
+
+ __ipu_dp_csc_setup(ipu, dp,
+ dp_csc_array[ipu->bg_csc_type][ipu->fg_csc_type],
+ false);
+
+ return 0;
+}
+
+void _ipu_dp_uninit(struct ipu_soc *ipu, ipu_channel_t channel)
+{
+ int dp;
+ int partial = false;
+
+ if (channel == MEM_FG_SYNC) {
+ dp = DP_SYNC;
+ partial = true;
+ } else if (channel == MEM_BG_SYNC) {
+ dp = DP_SYNC;
+ partial = false;
+ } else if (channel == MEM_BG_ASYNC0) {
+ dp = DP_ASYNC0;
+ partial = false;
+ } else {
+ return;
+ }
+
+ if (partial)
+ ipu->fg_csc_type = CSC_NONE;
+ else
+ ipu->bg_csc_type = CSC_NONE;
+
+ __ipu_dp_csc_setup(ipu, dp, dp_csc_array[ipu->bg_csc_type][ipu->fg_csc_type], false);
+}
+
+void _ipu_dc_init(struct ipu_soc *ipu, int dc_chan, int di, bool interlaced, uint32_t pixel_fmt)
+{
+ u32 reg = 0;
+
+ if ((dc_chan == 1) || (dc_chan == 5)) {
+ if (interlaced) {
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 0, 3);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 0, 2);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 0, 1);
+ } else {
+ if (di) {
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 2, 3);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 3, 2);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 1, 1);
+ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
+ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
+ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
+ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
+ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE1, 9, 5);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE1, 8, 5);
+ }
+ } else {
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 5, 3);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 6, 2);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 12, 1);
+ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
+ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
+ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
+ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
+ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE0, 10, 5);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE0, 11, 5);
+ }
+ }
+ }
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NF, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NFIELD, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOF, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOFIELD, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR, 0, 0);
+
+ reg = 0x2;
+ reg |= DC_DISP_ID_SYNC(di) << DC_WR_CH_CONF_PROG_DISP_ID_OFFSET;
+ reg |= di << 2;
+ if (interlaced)
+ reg |= DC_WR_CH_CONF_FIELD_MODE;
+ } else if ((dc_chan == 8) || (dc_chan == 9)) {
+ /* async channels */
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_0, 0x64, 1);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_1, 0x64, 1);
+
+ reg = 0x3;
+ reg |= DC_DISP_ID_SERIAL << DC_WR_CH_CONF_PROG_DISP_ID_OFFSET;
+ }
+ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
+
+ ipu_dc_write(ipu, 0x00000000, DC_WR_CH_ADDR(dc_chan));
+
+ ipu_dc_write(ipu, 0x00000084, DC_GEN);
+}
+
+void _ipu_dc_uninit(struct ipu_soc *ipu, int dc_chan)
+{
+ if ((dc_chan == 1) || (dc_chan == 5)) {
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NF, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NFIELD, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOF, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOFIELD, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE0, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE0, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE1, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE1, 0, 0);
+ } else if ((dc_chan == 8) || (dc_chan == 9)) {
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_W_0, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_W_1, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_W_0, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_W_1, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_0, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_1, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_R_0, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_R_1, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_R_0, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_R_1, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_R_0, 0, 0);
+ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_R_1, 0, 0);
+ }
+}
+
+int _ipu_disp_chan_is_interlaced(struct ipu_soc *ipu, ipu_channel_t channel)
+{
+ if (channel == MEM_DC_SYNC)
+ return !!(ipu_dc_read(ipu, DC_WR_CH_CONF_1) &
+ DC_WR_CH_CONF_FIELD_MODE);
+ else if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))
+ return !!(ipu_dc_read(ipu, DC_WR_CH_CONF_5) &
+ DC_WR_CH_CONF_FIELD_MODE);
+ return 0;
+}
+
+void _ipu_dp_dc_enable(struct ipu_soc *ipu, ipu_channel_t channel)
+{
+ int di;
+ uint32_t reg;
+ uint32_t dc_chan;
+ int irq = 0;
+
+ if (channel == MEM_FG_SYNC)
+ irq = IPU_IRQ_DP_SF_END;
+ else if (channel == MEM_DC_SYNC)
+ dc_chan = 1;
+ else if (channel == MEM_BG_SYNC)
+ dc_chan = 5;
+ else
+ return;
+
+ if (channel == MEM_FG_SYNC) {
+ /* Enable FG channel */
+ reg = ipu_dp_read(ipu, DP_COM_CONF(DP_SYNC));
+ ipu_dp_write(ipu, reg | DP_COM_CONF_FG_EN, DP_COM_CONF(DP_SYNC));
+
+ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
+ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
+ return;
+ } else if (channel == MEM_BG_SYNC) {
+ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
+ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
+ }
+
+ di = ipu->dc_di_assignment[dc_chan];
+
+ /* Make sure other DC sync channel is not assigned same DI */
+ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(6 - dc_chan));
+ if ((di << 2) == (reg & DC_WR_CH_CONF_PROG_DI_ID)) {
+ reg &= ~DC_WR_CH_CONF_PROG_DI_ID;
+ reg |= di ? 0 : DC_WR_CH_CONF_PROG_DI_ID;
+ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(6 - dc_chan));
+ }
+
+ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
+ reg |= 4 << DC_WR_CH_CONF_PROG_TYPE_OFFSET;
+ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
+
+ clk_prepare_enable(ipu->pixel_clk[di]);
+ ipu->pixel_clk_en[ipu->dc_di_assignment[dc_chan]] = true;
+}
+
+static irqreturn_t dc_irq_handler(int irq, void *dev_id)
+{
+ struct ipu_soc *ipu = dev_id;
+ struct completion *comp = &ipu->dc_comp;
+ uint32_t reg;
+ uint32_t dc_chan;
+
+ if (irq == IPU_IRQ_DC_FC_1)
+ dc_chan = 1;
+ else
+ dc_chan = 5;
+
+ if (!ipu->dc_swap) {
+ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
+ reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
+ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
+
+ reg = ipu_cm_read(ipu, IPU_DISP_GEN);
+ if (ipu->dc_di_assignment[dc_chan])
+ reg &= ~DI1_COUNTER_RELEASE;
+ else
+ reg &= ~DI0_COUNTER_RELEASE;
+ ipu_cm_write(ipu, reg, IPU_DISP_GEN);
+ }
+
+ complete(comp);
+ return IRQ_HANDLED;
+}
+
+void _ipu_dp_dc_disable(struct ipu_soc *ipu, ipu_channel_t channel, bool swap)
+{
+ int ret;
+ uint32_t reg;
+ uint32_t csc;
+ uint32_t dc_chan;
+ int irq = 0;
+ int timeout = 50;
+
+ ipu->dc_swap = swap;
+
+ if (channel == MEM_DC_SYNC) {
+ dc_chan = 1;
+ irq = IPU_IRQ_DC_FC_1;
+ } else if (channel == MEM_BG_SYNC) {
+ dc_chan = 5;
+ irq = IPU_IRQ_DP_SF_END;
+ } else if (channel == MEM_FG_SYNC) {
+ /* Disable FG channel */
+ dc_chan = 5;
+
+ reg = ipu_dp_read(ipu, DP_COM_CONF(DP_SYNC));
+ csc = reg & DP_COM_CONF_CSC_DEF_MASK;
+ if (csc == DP_COM_CONF_CSC_DEF_FG)
+ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+
+ reg &= ~DP_COM_CONF_FG_EN;
+ ipu_dp_write(ipu, reg, DP_COM_CONF(DP_SYNC));
+
+ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
+ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
+
+ if (ipu_is_channel_busy(ipu, MEM_BG_SYNC)) {
+ ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_DP_SF_END),
+ IPUIRQ_2_STATREG(ipu->devtype,
+ IPU_IRQ_DP_SF_END));
+ while ((ipu_cm_read(ipu,
+ IPUIRQ_2_STATREG(ipu->devtype,
+ IPU_IRQ_DP_SF_END)) &
+ IPUIRQ_2_MASK(IPU_IRQ_DP_SF_END)) == 0) {
+ msleep(2);
+ timeout -= 2;
+ if (timeout <= 0)
+ break;
+ }
+ }
+ return;
+ } else {
+ return;
+ }
+
+ init_completion(&ipu->dc_comp);
+ ret = ipu_request_irq(ipu, irq, dc_irq_handler, 0, NULL, ipu);
+ if (ret < 0) {
+ dev_err(ipu->dev, "DC irq %d in use\n", irq);
+ return;
+ }
+ ret = wait_for_completion_timeout(&ipu->dc_comp, msecs_to_jiffies(50));
+ ipu_free_irq(ipu, irq, ipu);
+ dev_dbg(ipu->dev, "DC stop timeout - %d * 10ms\n", 5 - ret);
+
+ if (ipu->dc_swap) {
+ /* Swap DC channel 1 and 5 settings, and disable old dc chan */
+ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
+ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(6 - dc_chan));
+ reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
+ reg ^= DC_WR_CH_CONF_PROG_DI_ID;
+ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
+ }
+}
+
+void _ipu_init_dc_mappings(struct ipu_soc *ipu)
+{
+ /* IPU_PIX_FMT_RGB24 */
+ _ipu_dc_map_clear(ipu, 0);
+ _ipu_dc_map_config(ipu, 0, 0, 7, 0xFF);
+ _ipu_dc_map_config(ipu, 0, 1, 15, 0xFF);
+ _ipu_dc_map_config(ipu, 0, 2, 23, 0xFF);
+
+ /* IPU_PIX_FMT_RGB666 */
+ _ipu_dc_map_clear(ipu, 1);
+ _ipu_dc_map_config(ipu, 1, 0, 5, 0xFC);
+ _ipu_dc_map_config(ipu, 1, 1, 11, 0xFC);
+ _ipu_dc_map_config(ipu, 1, 2, 17, 0xFC);
+
+ /* IPU_PIX_FMT_YUV444 */
+ _ipu_dc_map_clear(ipu, 2);
+ _ipu_dc_map_config(ipu, 2, 0, 15, 0xFF);
+ _ipu_dc_map_config(ipu, 2, 1, 23, 0xFF);
+ _ipu_dc_map_config(ipu, 2, 2, 7, 0xFF);
+
+ /* IPU_PIX_FMT_RGB565 */
+ _ipu_dc_map_clear(ipu, 3);
+ _ipu_dc_map_config(ipu, 3, 0, 4, 0xF8);
+ _ipu_dc_map_config(ipu, 3, 1, 10, 0xFC);
+ _ipu_dc_map_config(ipu, 3, 2, 15, 0xF8);
+
+ /* IPU_PIX_FMT_LVDS666 */
+ _ipu_dc_map_clear(ipu, 4);
+ _ipu_dc_map_config(ipu, 4, 0, 5, 0xFC);
+ _ipu_dc_map_config(ipu, 4, 1, 13, 0xFC);
+ _ipu_dc_map_config(ipu, 4, 2, 21, 0xFC);
+
+ /* IPU_PIX_FMT_VYUY 16bit width */
+ _ipu_dc_map_clear(ipu, 5);
+ _ipu_dc_map_config(ipu, 5, 0, 7, 0xFF);
+ _ipu_dc_map_config(ipu, 5, 1, 0, 0x0);
+ _ipu_dc_map_config(ipu, 5, 2, 15, 0xFF);
+ _ipu_dc_map_clear(ipu, 6);
+ _ipu_dc_map_config(ipu, 6, 0, 0, 0x0);
+ _ipu_dc_map_config(ipu, 6, 1, 7, 0xFF);
+ _ipu_dc_map_config(ipu, 6, 2, 15, 0xFF);
+
+ /* IPU_PIX_FMT_UYUV 16bit width */
+ _ipu_dc_map_clear(ipu, 7);
+ _ipu_dc_map_link(ipu, 7, 6, 0, 6, 1, 6, 2);
+ _ipu_dc_map_clear(ipu, 8);
+ _ipu_dc_map_link(ipu, 8, 5, 0, 5, 1, 5, 2);
+
+ /* IPU_PIX_FMT_YUYV 16bit width */
+ _ipu_dc_map_clear(ipu, 9);
+ _ipu_dc_map_link(ipu, 9, 5, 2, 5, 1, 5, 0);
+ _ipu_dc_map_clear(ipu, 10);
+ _ipu_dc_map_link(ipu, 10, 5, 1, 5, 2, 5, 0);
+
+ /* IPU_PIX_FMT_YVYU 16bit width */
+ _ipu_dc_map_clear(ipu, 11);
+ _ipu_dc_map_link(ipu, 11, 5, 1, 5, 2, 5, 0);
+ _ipu_dc_map_clear(ipu, 12);
+ _ipu_dc_map_link(ipu, 12, 5, 2, 5, 1, 5, 0);
+
+ /* IPU_PIX_FMT_GBR24 */
+ /* IPU_PIX_FMT_VYU444 */
+ _ipu_dc_map_clear(ipu, 13);
+ _ipu_dc_map_link(ipu, 13, 0, 2, 0, 0, 0, 1);
+
+ /* IPU_PIX_FMT_BGR24 */
+ _ipu_dc_map_clear(ipu, 14);
+ _ipu_dc_map_link(ipu, 14, 0, 2, 0, 1, 0, 0);
+}
+
+int _ipu_pixfmt_to_map(uint32_t fmt)
+{
+ switch (fmt) {
+ case IPU_PIX_FMT_GENERIC:
+ case IPU_PIX_FMT_RGB24:
+ return 0;
+ case IPU_PIX_FMT_RGB666:
+ return 1;
+ case IPU_PIX_FMT_YUV444:
+ return 2;
+ case IPU_PIX_FMT_RGB565:
+ return 3;
+ case IPU_PIX_FMT_LVDS666:
+ return 4;
+ case IPU_PIX_FMT_VYUY:
+ return 6;
+ case IPU_PIX_FMT_UYVY:
+ return 8;
+ case IPU_PIX_FMT_YUYV:
+ return 10;
+ case IPU_PIX_FMT_YVYU:
+ return 12;
+ case IPU_PIX_FMT_GBR24:
+ case IPU_PIX_FMT_VYU444:
+ return 13;
+ case IPU_PIX_FMT_BGR24:
+ return 14;
+ }
+
+ return -1;
+}
+
+/*!
+ * This function sets the colorspace for of dp.
+ * modes.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param param If it's not NULL, update the csc table
+ * with this parameter.
+ *
+ * @return N/A
+ */
+void _ipu_dp_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3])
+{
+ int dp;
+ struct dp_csc_param_t dp_csc_param;
+
+ if (channel == MEM_FG_SYNC)
+ dp = DP_SYNC;
+ else if (channel == MEM_BG_SYNC)
+ dp = DP_SYNC;
+ else if (channel == MEM_BG_ASYNC0)
+ dp = DP_ASYNC0;
+ else
+ return;
+
+ dp_csc_param.mode = -1;
+ dp_csc_param.coeff = param;
+ __ipu_dp_csc_setup(ipu, dp, dp_csc_param, true);
+}
+
+void ipu_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3])
+{
+ _ipu_dp_set_csc_coefficients(ipu, channel, param);
+}
+EXPORT_SYMBOL(ipu_set_csc_coefficients);
+
+/*!
+ * This function is called to adapt synchronous LCD panel to IPU restriction.
+ *
+ */
+void adapt_panel_to_ipu_restricitions(struct ipu_soc *ipu, uint16_t *v_start_width,
+ uint16_t *v_sync_width,
+ uint16_t *v_end_width)
+{
+ if (*v_end_width < 2) {
+ uint16_t diff = 2 - *v_end_width;
+ if (*v_start_width >= diff) {
+ *v_end_width = 2;
+ *v_start_width = *v_start_width - diff;
+ } else if (*v_sync_width > diff) {
+ *v_end_width = 2;
+ *v_sync_width = *v_sync_width - diff;
+ } else
+ dev_err(ipu->dev, "WARNING: try to adapt timming, but failed\n");
+ dev_err(ipu->dev, "WARNING: adapt panel end blank lines\n");
+ }
+}
+
+/*!
+ * This function is called to initialize a synchronous LCD panel.
+ *
+ * @param ipu ipu handler
+ * @param disp The DI the panel is attached to.
+ *
+ * @param pixel_clk Desired pixel clock frequency in Hz.
+ *
+ * @param pixel_fmt Input parameter for pixel format of buffer.
+ * Pixel format is a FOURCC ASCII code.
+ *
+ * @param width The width of panel in pixels.
+ *
+ * @param height The height of panel in pixels.
+ *
+ * @param hStartWidth The number of pixel clocks between the HSYNC
+ * signal pulse and the start of valid data.
+ *
+ * @param hSyncWidth The width of the HSYNC signal in units of pixel
+ * clocks.
+ *
+ * @param hEndWidth The number of pixel clocks between the end of
+ * valid data and the HSYNC signal for next line.
+ *
+ * @param vStartWidth The number of lines between the VSYNC
+ * signal pulse and the start of valid data.
+ *
+ * @param vSyncWidth The width of the VSYNC signal in units of lines
+ *
+ * @param vEndWidth The number of lines between the end of valid
+ * data and the VSYNC signal for next frame.
+ *
+ * @param sig Bitfield of signal polarities for LCD interface.
+ *
+ * @return This function returns 0 on success or negative error code on
+ * fail.
+ */
+int32_t ipu_init_sync_panel(struct ipu_soc *ipu, int disp, uint32_t pixel_clk,
+ uint16_t width, uint16_t height,
+ uint32_t pixel_fmt,
+ uint16_t h_start_width, uint16_t h_sync_width,
+ uint16_t h_end_width, uint16_t v_start_width,
+ uint16_t v_sync_width, uint16_t v_end_width,
+ uint32_t v_to_h_sync, ipu_di_signal_cfg_t sig)
+{
+ uint32_t field0_offset = 0;
+ uint32_t field1_offset;
+ uint32_t reg;
+ uint32_t di_gen, vsync_cnt;
+ uint32_t div, rounded_pixel_clk;
+ uint32_t h_total, v_total;
+ int map;
+ int ret;
+ struct clk *ldb_di0_clk, *ldb_di1_clk;
+ struct clk *di_parent;
+
+ dev_dbg(ipu->dev, "panel size = %d x %d\n", width, height);
+
+ if ((v_sync_width == 0) || (h_sync_width == 0))
+ return -EINVAL;
+
+ adapt_panel_to_ipu_restricitions(ipu, &v_start_width, &v_sync_width, &v_end_width);
+ h_total = width + h_sync_width + h_start_width + h_end_width;
+ v_total = height + v_sync_width + v_start_width + v_end_width;
+
+ /* Init clocking */
+ dev_dbg(ipu->dev, "pixel clk = %d\n", pixel_clk);
+
+ di_parent = clk_get_parent(ipu->di_clk_sel[disp]);
+ if (!di_parent) {
+ dev_err(ipu->dev, "get di clk parent fail\n");
+ return -EINVAL;
+ }
+ ldb_di0_clk = clk_get(ipu->dev, "ldb_di0");
+ if (IS_ERR(ldb_di0_clk)) {
+ dev_err(ipu->dev, "clk_get di0 failed");
+ return PTR_ERR(ldb_di0_clk);
+ }
+ ldb_di1_clk = clk_get(ipu->dev, "ldb_di1");
+ if (IS_ERR(ldb_di1_clk)) {
+ dev_err(ipu->dev, "clk_get di1 failed");
+ return PTR_ERR(ldb_di1_clk);
+ }
+
+ if (ldb_di0_clk == di_parent || ldb_di1_clk == di_parent) {
+ /* if di clk parent is tve/ldb, then keep it;*/
+ dev_dbg(ipu->dev, "use special clk parent\n");
+ ret = clk_set_parent(ipu->pixel_clk_sel[disp], ipu->di_clk[disp]);
+ if (ret) {
+ dev_err(ipu->dev, "set pixel clk error:%d\n", ret);
+ return ret;
+ }
+ clk_put(ldb_di0_clk);
+ clk_put(ldb_di1_clk);
+ } else {
+ /* try ipu clk first*/
+ dev_dbg(ipu->dev, "try ipu internal clk\n");
+ ret = clk_set_parent(ipu->pixel_clk_sel[disp], ipu->ipu_clk);
+ if (ret) {
+ dev_err(ipu->dev, "set pixel clk error:%d\n", ret);
+ return ret;
+ }
+ rounded_pixel_clk = clk_round_rate(ipu->pixel_clk[disp], pixel_clk);
+ dev_dbg(ipu->dev, "rounded pix clk:%d\n", rounded_pixel_clk);
+ /*
+ * we will only use 1/2 fraction for ipu clk,
+ * so if the clk rate is not fit, try ext clk.
+ */
+ if (!sig.int_clk &&
+ ((rounded_pixel_clk >= pixel_clk + pixel_clk/200) ||
+ (rounded_pixel_clk <= pixel_clk - pixel_clk/200))) {
+ dev_dbg(ipu->dev, "try ipu ext di clk\n");
+
+ rounded_pixel_clk =
+ clk_round_rate(ipu->di_clk[disp], pixel_clk);
+ ret = clk_set_rate(ipu->di_clk[disp],
+ rounded_pixel_clk);
+ if (ret) {
+ dev_err(ipu->dev,
+ "set di clk rate error:%d\n", ret);
+ return ret;
+ }
+ dev_dbg(ipu->dev, "di clk:%d\n", rounded_pixel_clk);
+ ret = clk_set_parent(ipu->pixel_clk_sel[disp],
+ ipu->di_clk[disp]);
+ if (ret) {
+ dev_err(ipu->dev,
+ "set pixel clk parent error:%d\n", ret);
+ return ret;
+ }
+ }
+ }
+ rounded_pixel_clk = clk_round_rate(ipu->pixel_clk[disp], pixel_clk);
+ dev_dbg(ipu->dev, "round pixel clk:%d\n", rounded_pixel_clk);
+ ret = clk_set_rate(ipu->pixel_clk[disp], rounded_pixel_clk);
+ if (ret) {
+ dev_err(ipu->dev, "set pixel clk rate error:%d\n", ret);
+ return ret;
+ }
+ msleep(5);
+ /* Get integer portion of divider */
+ div = clk_get_rate(clk_get_parent(ipu->pixel_clk_sel[disp])) / rounded_pixel_clk;
+ dev_dbg(ipu->dev, "div:%d\n", div);
+ if (!div) {
+ dev_err(ipu->dev, "invalid pixel clk div = 0\n");
+ return -EINVAL;
+ }
+
+
+ mutex_lock(&ipu->mutex_lock);
+
+ _ipu_di_data_wave_config(ipu, disp, SYNC_WAVE, div - 1, div - 1);
+ _ipu_di_data_pin_config(ipu, disp, SYNC_WAVE, DI_PIN15, 3, 0, div * 2);
+
+ map = _ipu_pixfmt_to_map(pixel_fmt);
+ if (map < 0) {
+ dev_dbg(ipu->dev, "IPU_DISP: No MAP\n");
+ mutex_unlock(&ipu->mutex_lock);
+ return -EINVAL;
+ }
+
+ /*clear DI*/
+ di_gen = ipu_di_read(ipu, disp, DI_GENERAL);
+ di_gen &= (0x3 << 20);
+ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
+
+ if (sig.interlaced) {
+ if (ipu->devtype >= IPUv3EX) {
+ /* Setup internal HSYNC waveform */
+ _ipu_di_sync_config(ipu,
+ disp, /* display */
+ 1, /* counter */
+ h_total/2 - 1, /* run count */
+ DI_SYNC_CLK, /* run_resolution */
+ 0, /* offset */
+ DI_SYNC_NONE, /* offset resolution */
+ 0, /* repeat count */
+ DI_SYNC_NONE, /* CNT_CLR_SEL */
+ 0, /* CNT_POLARITY_GEN_EN */
+ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
+ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
+ 0, /* COUNT UP */
+ 0 /* COUNT DOWN */
+ );
+
+ /* Field 1 VSYNC waveform */
+ _ipu_di_sync_config(ipu,
+ disp, /* display */
+ 2, /* counter */
+ h_total - 1, /* run count */
+ DI_SYNC_CLK, /* run_resolution */
+ 0, /* offset */
+ DI_SYNC_NONE, /* offset resolution */
+ 0, /* repeat count */
+ DI_SYNC_NONE, /* CNT_CLR_SEL */
+ 0, /* CNT_POLARITY_GEN_EN */
+ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
+ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
+ 0, /* COUNT UP */
+ 2*div /* COUNT DOWN */
+ );
+
+ /* Setup internal HSYNC waveform */
+ _ipu_di_sync_config(ipu,
+ disp, /* display */
+ 3, /* counter */
+ v_total*2 - 1, /* run count */
+ DI_SYNC_INT_HSYNC, /* run_resolution */
+ 1, /* offset */
+ DI_SYNC_INT_HSYNC, /* offset resolution */
+ 0, /* repeat count */
+ DI_SYNC_NONE, /* CNT_CLR_SEL */
+ 0, /* CNT_POLARITY_GEN_EN */
+ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
+ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
+ 0, /* COUNT UP */
+ 2*div /* COUNT DOWN */
+ );
+
+ /* Active Field ? */
+ _ipu_di_sync_config(ipu,
+ disp, /* display */
+ 4, /* counter */
+ v_total/2 - 1, /* run count */
+ DI_SYNC_HSYNC, /* run_resolution */
+ v_start_width, /* offset */
+ DI_SYNC_HSYNC, /* offset resolution */
+ 2, /* repeat count */
+ DI_SYNC_VSYNC, /* CNT_CLR_SEL */
+ 0, /* CNT_POLARITY_GEN_EN */
+ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
+ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
+ 0, /* COUNT UP */
+ 0 /* COUNT DOWN */
+ );
+
+ /* Active Line */
+ _ipu_di_sync_config(ipu,
+ disp, /* display */
+ 5, /* counter */
+ 0, /* run count */
+ DI_SYNC_HSYNC, /* run_resolution */
+ 0, /* offset */
+ DI_SYNC_NONE, /* offset resolution */
+ height/2, /* repeat count */
+ 4, /* CNT_CLR_SEL */
+ 0, /* CNT_POLARITY_GEN_EN */
+ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
+ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
+ 0, /* COUNT UP */
+ 0 /* COUNT DOWN */
+ );
+
+ /* Field 0 VSYNC waveform */
+ _ipu_di_sync_config(ipu,
+ disp, /* display */
+ 6, /* counter */
+ v_total - 1, /* run count */
+ DI_SYNC_HSYNC, /* run_resolution */
+ 0, /* offset */
+ DI_SYNC_NONE, /* offset resolution */
+ 0, /* repeat count */
+ DI_SYNC_NONE, /* CNT_CLR_SEL */
+ 0, /* CNT_POLARITY_GEN_EN */
+ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
+ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
+ 0, /* COUNT UP */
+ 0 /* COUNT DOWN */
+ );
+
+ /* DC VSYNC waveform */
+ vsync_cnt = 7;
+ _ipu_di_sync_config(ipu,
+ disp, /* display */
+ 7, /* counter */
+ v_total/2 - 1, /* run count */
+ DI_SYNC_HSYNC, /* run_resolution */
+ 9, /* offset */
+ DI_SYNC_HSYNC, /* offset resolution */
+ 2, /* repeat count */
+ DI_SYNC_VSYNC, /* CNT_CLR_SEL */
+ 0, /* CNT_POLARITY_GEN_EN */
+ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
+ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
+ 0, /* COUNT UP */
+ 0 /* COUNT DOWN */
+ );
+
+ /* active pixel waveform */
+ _ipu_di_sync_config(ipu,
+ disp, /* display */
+ 8, /* counter */
+ 0, /* run count */
+ DI_SYNC_CLK, /* run_resolution */
+ h_start_width, /* offset */
+ DI_SYNC_CLK, /* offset resolution */
+ width, /* repeat count */
+ 5, /* CNT_CLR_SEL */
+ 0, /* CNT_POLARITY_GEN_EN */
+ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
+ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
+ 0, /* COUNT UP */
+ 0 /* COUNT DOWN */
+ );
+
+ /* Second VSYNC */
+ _ipu_di_sync_config(ipu,
+ disp, /* display */
+ 9, /* counter */
+ v_total - 1, /* run count */
+ DI_SYNC_INT_HSYNC, /* run_resolution */
+ v_total/2, /* offset */
+ DI_SYNC_INT_HSYNC, /* offset resolution */
+ 0, /* repeat count */
+ DI_SYNC_HSYNC, /* CNT_CLR_SEL */
+ 0, /* CNT_POLARITY_GEN_EN */
+ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
+ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
+ 0, /* COUNT UP */
+ 2*div /* COUNT DOWN */
+ );
+
+ /* set gentime select and tag sel */
+ reg = ipu_di_read(ipu, disp, DI_SW_GEN1(9));
+ reg &= 0x1FFFFFFF;
+ reg |= (3-1)<<29 | 0x00008000;
+ ipu_di_write(ipu, disp, reg, DI_SW_GEN1(9));
+
+ ipu_di_write(ipu, disp, v_total / 2 - 1, DI_SCR_CONF);
+
+ /* set y_sel = 1 */
+ di_gen |= 0x10000000;
+ di_gen |= DI_GEN_POLARITY_5;
+ di_gen |= DI_GEN_POLARITY_8;
+ } else {
+ /* Setup internal HSYNC waveform */
+ _ipu_di_sync_config(ipu, disp, 1, h_total - 1, DI_SYNC_CLK,
+ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
+ DI_SYNC_NONE, 0, 0);
+
+ field1_offset = v_sync_width + v_start_width + height / 2 +
+ v_end_width;
+ if (sig.odd_field_first) {
+ field0_offset = field1_offset - 1;
+ field1_offset = 0;
+ }
+ v_total += v_start_width + v_end_width;
+
+ /* Field 1 VSYNC waveform */
+ _ipu_di_sync_config(ipu, disp, 2, v_total - 1, 1,
+ field0_offset,
+ field0_offset ? 1 : DI_SYNC_NONE,
+ 0, DI_SYNC_NONE, 0,
+ DI_SYNC_NONE, DI_SYNC_NONE, 0, 4);
+
+ /* Setup internal HSYNC waveform */
+ _ipu_di_sync_config(ipu, disp, 3, h_total - 1, DI_SYNC_CLK,
+ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0,
+ DI_SYNC_NONE, DI_SYNC_NONE, 0, 4);
+
+ /* Active Field ? */
+ _ipu_di_sync_config(ipu, disp, 4,
+ field0_offset ?
+ field0_offset : field1_offset - 2,
+ 1, v_start_width + v_sync_width, 1, 2, 2,
+ 0, DI_SYNC_NONE, DI_SYNC_NONE, 0, 0);
+
+ /* Active Line */
+ _ipu_di_sync_config(ipu, disp, 5, 0, 1,
+ 0, DI_SYNC_NONE,
+ height / 2, 4, 0, DI_SYNC_NONE,
+ DI_SYNC_NONE, 0, 0);
+
+ /* Field 0 VSYNC waveform */
+ _ipu_di_sync_config(ipu, disp, 6, v_total - 1, 1,
+ 0, DI_SYNC_NONE,
+ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
+ DI_SYNC_NONE, 0, 0);
+
+ /* DC VSYNC waveform */
+ vsync_cnt = 7;
+ _ipu_di_sync_config(ipu, disp, 7, 0, 1,
+ field1_offset,
+ field1_offset ? 1 : DI_SYNC_NONE,
+ 1, 2, 0, DI_SYNC_NONE, DI_SYNC_NONE, 0, 0);
+
+ /* active pixel waveform */
+ _ipu_di_sync_config(ipu, disp, 8, 0, DI_SYNC_CLK,
+ h_sync_width + h_start_width, DI_SYNC_CLK,
+ width, 5, 0, DI_SYNC_NONE, DI_SYNC_NONE,
+ 0, 0);
+
+ /* ??? */
+ _ipu_di_sync_config(ipu, disp, 9, v_total - 1, 2,
+ 0, DI_SYNC_NONE,
+ 0, DI_SYNC_NONE, 6, DI_SYNC_NONE,
+ DI_SYNC_NONE, 0, 0);
+
+ reg = ipu_di_read(ipu, disp, DI_SW_GEN1(9));
+ reg |= 0x8000;
+ ipu_di_write(ipu, disp, reg, DI_SW_GEN1(9));
+
+ ipu_di_write(ipu, disp, v_sync_width + v_start_width +
+ v_end_width + height / 2 - 1, DI_SCR_CONF);
+ }
+
+ /* Init template microcode */
+ _ipu_dc_write_tmpl(ipu, 0, WROD(0), 0, map, SYNC_WAVE, 0, 8, 1);
+
+ if (sig.Hsync_pol)
+ di_gen |= DI_GEN_POLARITY_3;
+ if (sig.Vsync_pol)
+ di_gen |= DI_GEN_POLARITY_2;
+ } else {
+ /* Setup internal HSYNC waveform */
+ _ipu_di_sync_config(ipu, disp, 1, h_total - 1, DI_SYNC_CLK,
+ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
+ DI_SYNC_NONE, 0, 0);
+
+ /* Setup external (delayed) HSYNC waveform */
+ _ipu_di_sync_config(ipu, disp, DI_SYNC_HSYNC, h_total - 1,
+ DI_SYNC_CLK, div * v_to_h_sync, DI_SYNC_CLK,
+ 0, DI_SYNC_NONE, 1, DI_SYNC_NONE,
+ DI_SYNC_CLK, 0, h_sync_width * 2);
+ /* Setup VSYNC waveform */
+ vsync_cnt = DI_SYNC_VSYNC;
+ _ipu_di_sync_config(ipu, disp, DI_SYNC_VSYNC, v_total - 1,
+ DI_SYNC_INT_HSYNC, 0, DI_SYNC_NONE, 0,
+ DI_SYNC_NONE, 1, DI_SYNC_NONE,
+ DI_SYNC_INT_HSYNC, 0, v_sync_width * 2);
+ ipu_di_write(ipu, disp, v_total - 1, DI_SCR_CONF);
+
+ /* Setup active data waveform to sync with DC */
+ _ipu_di_sync_config(ipu, disp, 4, 0, DI_SYNC_HSYNC,
+ v_sync_width + v_start_width, DI_SYNC_HSYNC, height,
+ DI_SYNC_VSYNC, 0, DI_SYNC_NONE,
+ DI_SYNC_NONE, 0, 0);
+ _ipu_di_sync_config(ipu, disp, 5, 0, DI_SYNC_CLK,
+ h_sync_width + h_start_width, DI_SYNC_CLK,
+ width, 4, 0, DI_SYNC_NONE, DI_SYNC_NONE, 0,
+ 0);
+
+ /* set VGA delayed hsync/vsync no matter VGA enabled */
+ if (disp) {
+ /* couter 7 for VGA delay HSYNC */
+ _ipu_di_sync_config(ipu, disp, 7,
+ h_total - 1, DI_SYNC_CLK,
+ 18, DI_SYNC_CLK,
+ 0, DI_SYNC_NONE,
+ 1, DI_SYNC_NONE, DI_SYNC_CLK,
+ 0, h_sync_width * 2);
+
+ /* couter 8 for VGA delay VSYNC */
+ _ipu_di_sync_config(ipu, disp, 8,
+ v_total - 1, DI_SYNC_INT_HSYNC,
+ 1, DI_SYNC_INT_HSYNC,
+ 0, DI_SYNC_NONE,
+ 1, DI_SYNC_NONE, DI_SYNC_INT_HSYNC,
+ 0, v_sync_width * 2);
+ }
+
+ /* reset all unused counters */
+ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(6));
+ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(6));
+ if (!disp) {
+ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(7));
+ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(7));
+ ipu_di_write(ipu, disp, 0, DI_STP_REP(7));
+ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(8));
+ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(8));
+ ipu_di_write(ipu, disp, 0, DI_STP_REP(8));
+ }
+ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(9));
+ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(9));
+ ipu_di_write(ipu, disp, 0, DI_STP_REP(9));
+
+ reg = ipu_di_read(ipu, disp, DI_STP_REP(6));
+ reg &= 0x0000FFFF;
+ ipu_di_write(ipu, disp, reg, DI_STP_REP(6));
+
+ /* Init template microcode */
+ if (disp) {
+ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
+ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
+ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
+ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
+ _ipu_dc_write_tmpl(ipu, 8, WROD(0), 0, (map - 1), SYNC_WAVE, 0, 5, 1);
+ _ipu_dc_write_tmpl(ipu, 9, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
+ /* configure user events according to DISP NUM */
+ ipu_dc_write(ipu, (width - 1), DC_UGDE_3(disp));
+ }
+ _ipu_dc_write_tmpl(ipu, 2, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
+ _ipu_dc_write_tmpl(ipu, 3, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
+ _ipu_dc_write_tmpl(ipu, 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
+ _ipu_dc_write_tmpl(ipu, 1, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
+
+ } else {
+ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
+ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
+ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
+ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
+ _ipu_dc_write_tmpl(ipu, 10, WROD(0), 0, (map - 1), SYNC_WAVE, 0, 5, 1);
+ _ipu_dc_write_tmpl(ipu, 11, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
+ /* configure user events according to DISP NUM */
+ ipu_dc_write(ipu, width - 1, DC_UGDE_3(disp));
+ }
+ _ipu_dc_write_tmpl(ipu, 5, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
+ _ipu_dc_write_tmpl(ipu, 6, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
+ _ipu_dc_write_tmpl(ipu, 7, WRG, 0, map, NULL_WAVE, 0, 0, 1);
+ _ipu_dc_write_tmpl(ipu, 12, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
+ }
+
+ if (sig.Hsync_pol) {
+ di_gen |= DI_GEN_POLARITY_2;
+ if (disp)
+ di_gen |= DI_GEN_POLARITY_7;
+ }
+ if (sig.Vsync_pol) {
+ di_gen |= DI_GEN_POLARITY_3;
+ if (disp)
+ di_gen |= DI_GEN_POLARITY_8;
+ }
+ }
+ /* changinc DISP_CLK polarity: it can be wrong for some applications */
+ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
+ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
+ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
+ (pixel_fmt == IPU_PIX_FMT_VYUY))
+ di_gen |= 0x00020000;
+
+ if (!sig.clk_pol)
+ di_gen |= DI_GEN_POLARITY_DISP_CLK;
+
+ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
+
+ ipu_di_write(ipu, disp, (--vsync_cnt << DI_VSYNC_SEL_OFFSET) |
+ 0x00000002, DI_SYNC_AS_GEN);
+ reg = ipu_di_read(ipu, disp, DI_POL);
+ reg &= ~(DI_POL_DRDY_DATA_POLARITY | DI_POL_DRDY_POLARITY_15);
+ if (sig.enable_pol)
+ reg |= DI_POL_DRDY_POLARITY_15;
+ if (sig.data_pol)
+ reg |= DI_POL_DRDY_DATA_POLARITY;
+ ipu_di_write(ipu, disp, reg, DI_POL);
+
+ ipu_dc_write(ipu, width, DC_DISP_CONF2(DC_DISP_ID_SYNC(disp)));
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_init_sync_panel);
+
+void ipu_uninit_sync_panel(struct ipu_soc *ipu, int disp)
+{
+ uint32_t reg;
+ uint32_t di_gen;
+
+ if ((disp != 0) || (disp != 1))
+ return;
+
+ mutex_lock(&ipu->mutex_lock);
+
+ di_gen = ipu_di_read(ipu, disp, DI_GENERAL);
+ di_gen |= 0x3ff | DI_GEN_POLARITY_DISP_CLK;
+ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
+
+ reg = ipu_di_read(ipu, disp, DI_POL);
+ reg |= 0x3ffffff;
+ ipu_di_write(ipu, disp, reg, DI_POL);
+
+ mutex_unlock(&ipu->mutex_lock);
+}
+EXPORT_SYMBOL(ipu_uninit_sync_panel);
+
+int ipu_init_async_panel(struct ipu_soc *ipu, int disp, int type, uint32_t cycle_time,
+ uint32_t pixel_fmt, ipu_adc_sig_cfg_t sig)
+{
+ int map;
+ u32 ser_conf = 0;
+ u32 div;
+ u32 di_clk = clk_get_rate(ipu->ipu_clk);
+
+ /* round up cycle_time, then calcalate the divider using scaled math */
+ cycle_time += (1000000000UL / di_clk) - 1;
+ div = (cycle_time * (di_clk / 256UL)) / (1000000000UL / 256UL);
+
+ map = _ipu_pixfmt_to_map(pixel_fmt);
+ if (map < 0)
+ return -EINVAL;
+
+ mutex_lock(&ipu->mutex_lock);
+
+ if (type == IPU_PANEL_SERIAL) {
+ ipu_di_write(ipu, disp, (div << 24) | ((sig.ifc_width - 1) << 4),
+ DI_DW_GEN(ASYNC_SER_WAVE));
+
+ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_CS,
+ 0, 0, (div * 2) + 1);
+ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_SER_CLK,
+ 1, div, div * 2);
+ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_SER_RS,
+ 2, 0, 0);
+
+ _ipu_dc_write_tmpl(ipu, 0x64, WROD(0), 0, map, ASYNC_SER_WAVE, 0, 0, 1);
+
+ /* Configure DC for serial panel */
+ ipu_dc_write(ipu, 0x14, DC_DISP_CONF1(DC_DISP_ID_SERIAL));
+
+ if (sig.clk_pol)
+ ser_conf |= DI_SER_CONF_SERIAL_CLK_POL;
+ if (sig.data_pol)
+ ser_conf |= DI_SER_CONF_SERIAL_DATA_POL;
+ if (sig.rs_pol)
+ ser_conf |= DI_SER_CONF_SERIAL_RS_POL;
+ if (sig.cs_pol)
+ ser_conf |= DI_SER_CONF_SERIAL_CS_POL;
+ ipu_di_write(ipu, disp, ser_conf, DI_SER_CONF);
+ }
+
+ mutex_unlock(&ipu->mutex_lock);
+ return 0;
+}
+EXPORT_SYMBOL(ipu_init_async_panel);
+
+/*!
+ * This function sets the foreground and background plane global alpha blending
+ * modes. This function also sets the DP graphic plane according to the
+ * parameter of IPUv3 DP channel.
+ *
+ * @param ipu ipu handler
+ * @param channel IPUv3 DP channel
+ *
+ * @param enable Boolean to enable or disable global alpha
+ * blending. If disabled, local blending is used.
+ *
+ * @param alpha Global alpha value.
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int32_t ipu_disp_set_global_alpha(struct ipu_soc *ipu, ipu_channel_t channel,
+ bool enable, uint8_t alpha)
+{
+ uint32_t reg;
+ uint32_t flow;
+ bool bg_chan;
+
+ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
+ flow = DP_SYNC;
+ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
+ flow = DP_ASYNC0;
+ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
+ flow = DP_ASYNC1;
+ else
+ return -EINVAL;
+
+ if (channel == MEM_BG_SYNC || channel == MEM_BG_ASYNC0 ||
+ channel == MEM_BG_ASYNC1)
+ bg_chan = true;
+ else
+ bg_chan = false;
+
+ _ipu_get(ipu);
+
+ mutex_lock(&ipu->mutex_lock);
+
+ if (bg_chan) {
+ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
+ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWSEL, DP_COM_CONF(flow));
+ } else {
+ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
+ ipu_dp_write(ipu, reg | DP_COM_CONF_GWSEL, DP_COM_CONF(flow));
+ }
+
+ if (enable) {
+ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(flow)) & 0x00FFFFFFL;
+ ipu_dp_write(ipu, reg | ((uint32_t) alpha << 24),
+ DP_GRAPH_WIND_CTRL(flow));
+
+ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
+ ipu_dp_write(ipu, reg | DP_COM_CONF_GWAM, DP_COM_CONF(flow));
+ } else {
+ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
+ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWAM, DP_COM_CONF(flow));
+ }
+
+ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
+ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ _ipu_put(ipu);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_disp_set_global_alpha);
+
+/*!
+ * This function sets the transparent color key for SDC graphic plane.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param enable Boolean to enable or disable color key
+ *
+ * @param colorKey 24-bit RGB color for transparent color key.
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int32_t ipu_disp_set_color_key(struct ipu_soc *ipu, ipu_channel_t channel,
+ bool enable, uint32_t color_key)
+{
+ uint32_t reg, flow;
+ int y, u, v;
+ int red, green, blue;
+
+ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
+ flow = DP_SYNC;
+ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
+ flow = DP_ASYNC0;
+ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
+ flow = DP_ASYNC1;
+ else
+ return -EINVAL;
+
+ _ipu_get(ipu);
+
+ mutex_lock(&ipu->mutex_lock);
+
+ ipu->color_key_4rgb = true;
+ /* Transform color key from rgb to yuv if CSC is enabled */
+ if (((ipu->fg_csc_type == RGB2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
+ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == RGB2YUV)) ||
+ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
+ ((ipu->fg_csc_type == YUV2RGB) && (ipu->bg_csc_type == YUV2RGB))) {
+
+ dev_dbg(ipu->dev, "color key 0x%x need change to yuv fmt\n", color_key);
+
+ red = (color_key >> 16) & 0xFF;
+ green = (color_key >> 8) & 0xFF;
+ blue = color_key & 0xFF;
+
+ y = _rgb_to_yuv(0, red, green, blue);
+ u = _rgb_to_yuv(1, red, green, blue);
+ v = _rgb_to_yuv(2, red, green, blue);
+ color_key = (y << 16) | (u << 8) | v;
+
+ ipu->color_key_4rgb = false;
+
+ dev_dbg(ipu->dev, "color key change to yuv fmt 0x%x\n", color_key);
+ }
+
+ if (enable) {
+ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(flow)) & 0xFF000000L;
+ ipu_dp_write(ipu, reg | color_key, DP_GRAPH_WIND_CTRL(flow));
+
+ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
+ ipu_dp_write(ipu, reg | DP_COM_CONF_GWCKE, DP_COM_CONF(flow));
+ } else {
+ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
+ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWCKE, DP_COM_CONF(flow));
+ }
+
+ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
+ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ _ipu_put(ipu);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_disp_set_color_key);
+
+/*!
+ * This function sets the gamma correction for DP output.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param enable Boolean to enable or disable gamma correction.
+ *
+ * @param constk Gamma piecewise linear approximation constk coeff.
+ *
+ * @param slopek Gamma piecewise linear approximation slopek coeff.
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int32_t ipu_disp_set_gamma_correction(struct ipu_soc *ipu, ipu_channel_t channel, bool enable, int constk[], int slopek[])
+{
+ uint32_t reg, flow, i;
+
+ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
+ flow = DP_SYNC;
+ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
+ flow = DP_ASYNC0;
+ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
+ flow = DP_ASYNC1;
+ else
+ return -EINVAL;
+
+ _ipu_get(ipu);
+
+ mutex_lock(&ipu->mutex_lock);
+
+ for (i = 0; i < 8; i++)
+ ipu_dp_write(ipu, (constk[2*i] & 0x1ff) | ((constk[2*i+1] & 0x1ff) << 16), DP_GAMMA_C(flow, i));
+ for (i = 0; i < 4; i++)
+ ipu_dp_write(ipu, (slopek[4*i] & 0xff) | ((slopek[4*i+1] & 0xff) << 8) |
+ ((slopek[4*i+2] & 0xff) << 16) | ((slopek[4*i+3] & 0xff) << 24), DP_GAMMA_S(flow, i));
+
+ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
+ if (enable) {
+ if ((ipu->bg_csc_type == RGB2YUV) || (ipu->bg_csc_type == YUV2YUV))
+ reg |= DP_COM_CONF_GAMMA_YUV_EN;
+ else
+ reg &= ~DP_COM_CONF_GAMMA_YUV_EN;
+ ipu_dp_write(ipu, reg | DP_COM_CONF_GAMMA_EN, DP_COM_CONF(flow));
+ } else
+ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GAMMA_EN, DP_COM_CONF(flow));
+
+ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
+ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
+
+ mutex_unlock(&ipu->mutex_lock);
+
+ _ipu_put(ipu);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_disp_set_gamma_correction);
+
+/*!
+ * This function sets the window position of the foreground or background plane.
+ * modes.
+ *
+ * @param ipu ipu handler
+ * @param channel Input parameter for the logical channel ID.
+ *
+ * @param x_pos The X coordinate position to place window at.
+ * The position is relative to the top left corner.
+ *
+ * @param y_pos The Y coordinate position to place window at.
+ * The position is relative to the top left corner.
+ *
+ * @return Returns 0 on success or negative error code on fail
+ */
+int32_t _ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
+ int16_t x_pos, int16_t y_pos)
+{
+ u32 reg;
+ uint32_t flow = 0;
+ uint32_t dp_srm_shift;
+
+ if ((channel == MEM_FG_SYNC) || (channel == MEM_BG_SYNC)) {
+ flow = DP_SYNC;
+ dp_srm_shift = 3;
+ } else if (channel == MEM_FG_ASYNC0) {
+ flow = DP_ASYNC0;
+ dp_srm_shift = 5;
+ } else if (channel == MEM_FG_ASYNC1) {
+ flow = DP_ASYNC1;
+ dp_srm_shift = 7;
+ } else
+ return -EINVAL;
+
+ ipu_dp_write(ipu, (x_pos << 16) | y_pos, DP_FG_POS(flow));
+
+ if (ipu_is_channel_busy(ipu, channel)) {
+ /* controled by FSU if channel enabled */
+ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) & (~(0x3 << dp_srm_shift));
+ reg |= (0x1 << dp_srm_shift);
+ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
+ } else {
+ /* disable auto swap, controled by MCU if channel disabled */
+ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) & (~(0x3 << dp_srm_shift));
+ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
+ }
+
+ return 0;
+}
+
+int32_t ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
+ int16_t x_pos, int16_t y_pos)
+{
+ int ret;
+
+ _ipu_get(ipu);
+ mutex_lock(&ipu->mutex_lock);
+ ret = _ipu_disp_set_window_pos(ipu, channel, x_pos, y_pos);
+ mutex_unlock(&ipu->mutex_lock);
+ _ipu_put(ipu);
+ return ret;
+}
+EXPORT_SYMBOL(ipu_disp_set_window_pos);
+
+int32_t _ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
+ int16_t *x_pos, int16_t *y_pos)
+{
+ u32 reg;
+ uint32_t flow = 0;
+
+ if (channel == MEM_FG_SYNC)
+ flow = DP_SYNC;
+ else if (channel == MEM_FG_ASYNC0)
+ flow = DP_ASYNC0;
+ else if (channel == MEM_FG_ASYNC1)
+ flow = DP_ASYNC1;
+ else
+ return -EINVAL;
+
+ reg = ipu_dp_read(ipu, DP_FG_POS(flow));
+
+ *x_pos = (reg >> 16) & 0x7FF;
+ *y_pos = reg & 0x7FF;
+
+ return 0;
+}
+int32_t ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
+ int16_t *x_pos, int16_t *y_pos)
+{
+ int ret;
+
+ _ipu_get(ipu);
+ mutex_lock(&ipu->mutex_lock);
+ ret = _ipu_disp_get_window_pos(ipu, channel, x_pos, y_pos);
+ mutex_unlock(&ipu->mutex_lock);
+ _ipu_put(ipu);
+ return ret;
+}
+EXPORT_SYMBOL(ipu_disp_get_window_pos);
+
+void ipu_reset_disp_panel(struct ipu_soc *ipu)
+{
+ uint32_t tmp;
+
+ tmp = ipu_di_read(ipu, 1, DI_GENERAL);
+ ipu_di_write(ipu, 1, tmp | 0x08, DI_GENERAL);
+ msleep(10); /* tRES >= 100us */
+ tmp = ipu_di_read(ipu, 1, DI_GENERAL);
+ ipu_di_write(ipu, 1, tmp & ~0x08, DI_GENERAL);
+ msleep(60);
+
+ return;
+}
+EXPORT_SYMBOL(ipu_reset_disp_panel);
+
+void ipu_disp_init(struct ipu_soc *ipu)
+{
+ ipu->fg_csc_type = ipu->bg_csc_type = CSC_NONE;
+ ipu->color_key_4rgb = true;
+ _ipu_init_dc_mappings(ipu);
+ _ipu_dmfc_init(ipu, DMFC_NORMAL, 1);
+}
diff --git a/drivers/mxc/ipu3/ipu_ic.c b/drivers/mxc/ipu3/ipu_ic.c
new file mode 100644
index 000000000000..29519bd00d12
--- /dev/null
+++ b/drivers/mxc/ipu3/ipu_ic.c
@@ -0,0 +1,924 @@
+/*
+ * Copyright 2005-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*
+ * @file ipu_ic.c
+ *
+ * @brief IPU IC functions
+ *
+ * @ingroup IPU
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ipu-v3.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include "ipu_param_mem.h"
+#include "ipu_regs.h"
+
+enum {
+ IC_TASK_VIEWFINDER,
+ IC_TASK_ENCODER,
+ IC_TASK_POST_PROCESSOR
+};
+
+static void _init_csc(struct ipu_soc *ipu, uint8_t ic_task, ipu_color_space_t in_format,
+ ipu_color_space_t out_format, int csc_index);
+
+static int _calc_resize_coeffs(struct ipu_soc *ipu,
+ uint32_t inSize, uint32_t outSize,
+ uint32_t *resizeCoeff,
+ uint32_t *downsizeCoeff);
+
+void _ipu_vdi_set_top_field_man(struct ipu_soc *ipu, bool top_field_0)
+{
+ uint32_t reg;
+
+ reg = ipu_vdi_read(ipu, VDI_C);
+ if (top_field_0)
+ reg &= ~VDI_C_TOP_FIELD_MAN_1;
+ else
+ reg |= VDI_C_TOP_FIELD_MAN_1;
+ ipu_vdi_write(ipu, reg, VDI_C);
+}
+
+void _ipu_vdi_set_motion(struct ipu_soc *ipu, ipu_motion_sel motion_sel)
+{
+ uint32_t reg;
+
+ reg = ipu_vdi_read(ipu, VDI_C);
+ reg &= ~(VDI_C_MOT_SEL_FULL | VDI_C_MOT_SEL_MED | VDI_C_MOT_SEL_LOW);
+ if (motion_sel == HIGH_MOTION)
+ reg |= VDI_C_MOT_SEL_FULL;
+ else if (motion_sel == MED_MOTION)
+ reg |= VDI_C_MOT_SEL_MED;
+ else
+ reg |= VDI_C_MOT_SEL_LOW;
+
+ ipu_vdi_write(ipu, reg, VDI_C);
+ dev_dbg(ipu->dev, "VDI_C = \t0x%08X\n", reg);
+}
+
+void ic_dump_register(struct ipu_soc *ipu)
+{
+ printk(KERN_DEBUG "IC_CONF = \t0x%08X\n", ipu_ic_read(ipu, IC_CONF));
+ printk(KERN_DEBUG "IC_PRP_ENC_RSC = \t0x%08X\n",
+ ipu_ic_read(ipu, IC_PRP_ENC_RSC));
+ printk(KERN_DEBUG "IC_PRP_VF_RSC = \t0x%08X\n",
+ ipu_ic_read(ipu, IC_PRP_VF_RSC));
+ printk(KERN_DEBUG "IC_PP_RSC = \t0x%08X\n", ipu_ic_read(ipu, IC_PP_RSC));
+ printk(KERN_DEBUG "IC_IDMAC_1 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_1));
+ printk(KERN_DEBUG "IC_IDMAC_2 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_2));
+ printk(KERN_DEBUG "IC_IDMAC_3 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_3));
+}
+
+void _ipu_ic_enable_task(struct ipu_soc *ipu, ipu_channel_t channel)
+{
+ uint32_t ic_conf;
+
+ ic_conf = ipu_ic_read(ipu, IC_CONF);
+ switch (channel) {
+ case CSI_PRP_VF_MEM:
+ case MEM_PRP_VF_MEM:
+ ic_conf |= IC_CONF_PRPVF_EN;
+ break;
+ case MEM_VDI_PRP_VF_MEM:
+ ic_conf |= IC_CONF_PRPVF_EN;
+ break;
+ case MEM_VDI_MEM:
+ ic_conf |= IC_CONF_PRPVF_EN | IC_CONF_RWS_EN ;
+ break;
+ case MEM_ROT_VF_MEM:
+ ic_conf |= IC_CONF_PRPVF_ROT_EN;
+ break;
+ case CSI_PRP_ENC_MEM:
+ case MEM_PRP_ENC_MEM:
+ ic_conf |= IC_CONF_PRPENC_EN;
+ break;
+ case MEM_ROT_ENC_MEM:
+ ic_conf |= IC_CONF_PRPENC_ROT_EN;
+ break;
+ case MEM_PP_MEM:
+ ic_conf |= IC_CONF_PP_EN;
+ break;
+ case MEM_ROT_PP_MEM:
+ ic_conf |= IC_CONF_PP_ROT_EN;
+ break;
+ default:
+ break;
+ }
+ ipu_ic_write(ipu, ic_conf, IC_CONF);
+}
+
+void _ipu_ic_disable_task(struct ipu_soc *ipu, ipu_channel_t channel)
+{
+ uint32_t ic_conf;
+
+ ic_conf = ipu_ic_read(ipu, IC_CONF);
+ switch (channel) {
+ case CSI_PRP_VF_MEM:
+ case MEM_PRP_VF_MEM:
+ ic_conf &= ~IC_CONF_PRPVF_EN;
+ break;
+ case MEM_VDI_PRP_VF_MEM:
+ ic_conf &= ~IC_CONF_PRPVF_EN;
+ break;
+ case MEM_VDI_MEM:
+ ic_conf &= ~(IC_CONF_PRPVF_EN | IC_CONF_RWS_EN);
+ break;
+ case MEM_ROT_VF_MEM:
+ ic_conf &= ~IC_CONF_PRPVF_ROT_EN;
+ break;
+ case CSI_PRP_ENC_MEM:
+ case MEM_PRP_ENC_MEM:
+ ic_conf &= ~IC_CONF_PRPENC_EN;
+ break;
+ case MEM_ROT_ENC_MEM:
+ ic_conf &= ~IC_CONF_PRPENC_ROT_EN;
+ break;
+ case MEM_PP_MEM:
+ ic_conf &= ~IC_CONF_PP_EN;
+ break;
+ case MEM_ROT_PP_MEM:
+ ic_conf &= ~IC_CONF_PP_ROT_EN;
+ break;
+ default:
+ break;
+ }
+ ipu_ic_write(ipu, ic_conf, IC_CONF);
+}
+
+void _ipu_vdi_init(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
+{
+ uint32_t reg;
+ uint32_t pixel_fmt;
+ uint32_t pix_per_burst;
+
+ reg = ((params->mem_prp_vf_mem.in_height-1) << 16) |
+ (params->mem_prp_vf_mem.in_width-1);
+ ipu_vdi_write(ipu, reg, VDI_FSIZE);
+
+ /* Full motion, only vertical filter is used
+ Burst size is 4 accesses */
+ if (params->mem_prp_vf_mem.in_pixel_fmt ==
+ IPU_PIX_FMT_UYVY ||
+ params->mem_prp_vf_mem.in_pixel_fmt ==
+ IPU_PIX_FMT_YUYV) {
+ pixel_fmt = VDI_C_CH_422;
+ pix_per_burst = 32;
+ } else {
+ pixel_fmt = VDI_C_CH_420;
+ pix_per_burst = 64;
+ }
+
+ reg = ipu_vdi_read(ipu, VDI_C);
+ reg |= pixel_fmt;
+ switch (channel) {
+ case MEM_VDI_PRP_VF_MEM:
+ reg |= VDI_C_BURST_SIZE2_4;
+ break;
+ case MEM_VDI_PRP_VF_MEM_P:
+ reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_SET_1 | VDI_C_VWM1_CLR_2;
+ break;
+ case MEM_VDI_PRP_VF_MEM_N:
+ reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_SET_1 | VDI_C_VWM3_CLR_2;
+ break;
+
+ case MEM_VDI_MEM:
+ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
+ << VDI_C_BURST_SIZE2_OFFSET;
+ break;
+ case MEM_VDI_MEM_P:
+ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
+ << VDI_C_BURST_SIZE1_OFFSET;
+ reg |= VDI_C_VWM1_SET_2 | VDI_C_VWM1_CLR_2;
+ break;
+ case MEM_VDI_MEM_N:
+ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
+ << VDI_C_BURST_SIZE3_OFFSET;
+ reg |= VDI_C_VWM3_SET_2 | VDI_C_VWM3_CLR_2;
+ break;
+ default:
+ break;
+ }
+ ipu_vdi_write(ipu, reg, VDI_C);
+
+ if (params->mem_prp_vf_mem.field_fmt == IPU_DEINTERLACE_FIELD_TOP)
+ _ipu_vdi_set_top_field_man(ipu, true);
+ else if (params->mem_prp_vf_mem.field_fmt == IPU_DEINTERLACE_FIELD_BOTTOM)
+ _ipu_vdi_set_top_field_man(ipu, false);
+
+ _ipu_vdi_set_motion(ipu, params->mem_prp_vf_mem.motion_sel);
+
+ reg = ipu_ic_read(ipu, IC_CONF);
+ reg &= ~IC_CONF_RWS_EN;
+ ipu_ic_write(ipu, reg, IC_CONF);
+}
+
+void _ipu_vdi_uninit(struct ipu_soc *ipu)
+{
+ ipu_vdi_write(ipu, 0, VDI_FSIZE);
+ ipu_vdi_write(ipu, 0, VDI_C);
+}
+
+int _ipu_ic_init_prpvf(struct ipu_soc *ipu, ipu_channel_params_t *params,
+ bool src_is_csi)
+{
+ uint32_t reg, ic_conf;
+ uint32_t downsizeCoeff, resizeCoeff;
+ ipu_color_space_t in_fmt, out_fmt;
+ int ret = 0;
+
+ /* Setup vertical resizing */
+ if (!params->mem_prp_vf_mem.outv_resize_ratio) {
+ ret = _calc_resize_coeffs(ipu, params->mem_prp_vf_mem.in_height,
+ params->mem_prp_vf_mem.out_height,
+ &resizeCoeff, &downsizeCoeff);
+ if (ret < 0) {
+ dev_err(ipu->dev, "failed to calculate prpvf height "
+ "scaling coefficients\n");
+ return ret;
+ }
+
+ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
+ } else
+ reg = (params->mem_prp_vf_mem.outv_resize_ratio) << 16;
+
+ /* Setup horizontal resizing */
+ if (!params->mem_prp_vf_mem.outh_resize_ratio) {
+ ret = _calc_resize_coeffs(ipu, params->mem_prp_vf_mem.in_width,
+ params->mem_prp_vf_mem.out_width,
+ &resizeCoeff, &downsizeCoeff);
+ if (ret < 0) {
+ dev_err(ipu->dev, "failed to calculate prpvf width "
+ "scaling coefficients\n");
+ return ret;
+ }
+
+ reg |= (downsizeCoeff << 14) | resizeCoeff;
+ } else
+ reg |= params->mem_prp_vf_mem.outh_resize_ratio;
+
+ ipu_ic_write(ipu, reg, IC_PRP_VF_RSC);
+
+ ic_conf = ipu_ic_read(ipu, IC_CONF);
+
+ /* Setup color space conversion */
+ in_fmt = format_to_colorspace(params->mem_prp_vf_mem.in_pixel_fmt);
+ out_fmt = format_to_colorspace(params->mem_prp_vf_mem.out_pixel_fmt);
+ if (in_fmt == RGB) {
+ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
+ /* Enable RGB->YCBCR CSC1 */
+ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, out_fmt, 1);
+ ic_conf |= IC_CONF_PRPVF_CSC1;
+ }
+ }
+ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
+ if (out_fmt == RGB) {
+ /* Enable YCBCR->RGB CSC1 */
+ _init_csc(ipu, IC_TASK_VIEWFINDER, YCbCr, RGB, 1);
+ ic_conf |= IC_CONF_PRPVF_CSC1;
+ } else {
+ /* TODO: Support YUV<->YCbCr conversion? */
+ }
+ }
+
+ if (params->mem_prp_vf_mem.graphics_combine_en) {
+ ic_conf |= IC_CONF_PRPVF_CMB;
+
+ if (!(ic_conf & IC_CONF_PRPVF_CSC1)) {
+ /* need transparent CSC1 conversion */
+ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, RGB, 1);
+ ic_conf |= IC_CONF_PRPVF_CSC1; /* Enable RGB->RGB CSC */
+ }
+ in_fmt = format_to_colorspace(params->mem_prp_vf_mem.in_g_pixel_fmt);
+ out_fmt = format_to_colorspace(params->mem_prp_vf_mem.out_pixel_fmt);
+ if (in_fmt == RGB) {
+ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
+ /* Enable RGB->YCBCR CSC2 */
+ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, out_fmt, 2);
+ ic_conf |= IC_CONF_PRPVF_CSC2;
+ }
+ }
+ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
+ if (out_fmt == RGB) {
+ /* Enable YCBCR->RGB CSC2 */
+ _init_csc(ipu, IC_TASK_VIEWFINDER, YCbCr, RGB, 2);
+ ic_conf |= IC_CONF_PRPVF_CSC2;
+ } else {
+ /* TODO: Support YUV<->YCbCr conversion? */
+ }
+ }
+
+ if (params->mem_prp_vf_mem.global_alpha_en) {
+ ic_conf |= IC_CONF_IC_GLB_LOC_A;
+ reg = ipu_ic_read(ipu, IC_CMBP_1);
+ reg &= ~(0xff);
+ reg |= params->mem_prp_vf_mem.alpha;
+ ipu_ic_write(ipu, reg, IC_CMBP_1);
+ } else
+ ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
+
+ if (params->mem_prp_vf_mem.key_color_en) {
+ ic_conf |= IC_CONF_KEY_COLOR_EN;
+ ipu_ic_write(ipu, params->mem_prp_vf_mem.key_color,
+ IC_CMBP_2);
+ } else
+ ic_conf &= ~IC_CONF_KEY_COLOR_EN;
+ } else {
+ ic_conf &= ~IC_CONF_PRPVF_CMB;
+ }
+
+ if (src_is_csi)
+ ic_conf &= ~IC_CONF_RWS_EN;
+ else
+ ic_conf |= IC_CONF_RWS_EN;
+
+ ipu_ic_write(ipu, ic_conf, IC_CONF);
+
+ return ret;
+}
+
+void _ipu_ic_uninit_prpvf(struct ipu_soc *ipu)
+{
+ uint32_t reg;
+
+ reg = ipu_ic_read(ipu, IC_CONF);
+ reg &= ~(IC_CONF_PRPVF_EN | IC_CONF_PRPVF_CMB |
+ IC_CONF_PRPVF_CSC2 | IC_CONF_PRPVF_CSC1);
+ ipu_ic_write(ipu, reg, IC_CONF);
+}
+
+void _ipu_ic_init_rotate_vf(struct ipu_soc *ipu, ipu_channel_params_t *params)
+{
+}
+
+void _ipu_ic_uninit_rotate_vf(struct ipu_soc *ipu)
+{
+ uint32_t reg;
+ reg = ipu_ic_read(ipu, IC_CONF);
+ reg &= ~IC_CONF_PRPVF_ROT_EN;
+ ipu_ic_write(ipu, reg, IC_CONF);
+}
+
+int _ipu_ic_init_prpenc(struct ipu_soc *ipu, ipu_channel_params_t *params,
+ bool src_is_csi)
+{
+ uint32_t reg, ic_conf;
+ uint32_t downsizeCoeff, resizeCoeff;
+ ipu_color_space_t in_fmt, out_fmt;
+ int ret = 0;
+
+ /* Setup vertical resizing */
+ if (!params->mem_prp_enc_mem.outv_resize_ratio) {
+ ret = _calc_resize_coeffs(ipu,
+ params->mem_prp_enc_mem.in_height,
+ params->mem_prp_enc_mem.out_height,
+ &resizeCoeff, &downsizeCoeff);
+ if (ret < 0) {
+ dev_err(ipu->dev, "failed to calculate prpenc height "
+ "scaling coefficients\n");
+ return ret;
+ }
+
+ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
+ } else
+ reg = (params->mem_prp_enc_mem.outv_resize_ratio) << 16;
+
+ /* Setup horizontal resizing */
+ if (!params->mem_prp_enc_mem.outh_resize_ratio) {
+ ret = _calc_resize_coeffs(ipu, params->mem_prp_enc_mem.in_width,
+ params->mem_prp_enc_mem.out_width,
+ &resizeCoeff, &downsizeCoeff);
+ if (ret < 0) {
+ dev_err(ipu->dev, "failed to calculate prpenc width "
+ "scaling coefficients\n");
+ return ret;
+ }
+
+ reg |= (downsizeCoeff << 14) | resizeCoeff;
+ } else
+ reg |= params->mem_prp_enc_mem.outh_resize_ratio;
+
+ ipu_ic_write(ipu, reg, IC_PRP_ENC_RSC);
+
+ ic_conf = ipu_ic_read(ipu, IC_CONF);
+
+ /* Setup color space conversion */
+ in_fmt = format_to_colorspace(params->mem_prp_enc_mem.in_pixel_fmt);
+ out_fmt = format_to_colorspace(params->mem_prp_enc_mem.out_pixel_fmt);
+ if (in_fmt == RGB) {
+ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
+ /* Enable RGB->YCBCR CSC1 */
+ _init_csc(ipu, IC_TASK_ENCODER, RGB, out_fmt, 1);
+ ic_conf |= IC_CONF_PRPENC_CSC1;
+ }
+ }
+ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
+ if (out_fmt == RGB) {
+ /* Enable YCBCR->RGB CSC1 */
+ _init_csc(ipu, IC_TASK_ENCODER, YCbCr, RGB, 1);
+ ic_conf |= IC_CONF_PRPENC_CSC1;
+ } else {
+ /* TODO: Support YUV<->YCbCr conversion? */
+ }
+ }
+
+ if (src_is_csi)
+ ic_conf &= ~IC_CONF_RWS_EN;
+ else
+ ic_conf |= IC_CONF_RWS_EN;
+
+ ipu_ic_write(ipu, ic_conf, IC_CONF);
+
+ return ret;
+}
+
+void _ipu_ic_uninit_prpenc(struct ipu_soc *ipu)
+{
+ uint32_t reg;
+
+ reg = ipu_ic_read(ipu, IC_CONF);
+ reg &= ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1);
+ ipu_ic_write(ipu, reg, IC_CONF);
+}
+
+void _ipu_ic_init_rotate_enc(struct ipu_soc *ipu, ipu_channel_params_t *params)
+{
+}
+
+void _ipu_ic_uninit_rotate_enc(struct ipu_soc *ipu)
+{
+ uint32_t reg;
+
+ reg = ipu_ic_read(ipu, IC_CONF);
+ reg &= ~(IC_CONF_PRPENC_ROT_EN);
+ ipu_ic_write(ipu, reg, IC_CONF);
+}
+
+int _ipu_ic_init_pp(struct ipu_soc *ipu, ipu_channel_params_t *params)
+{
+ uint32_t reg, ic_conf;
+ uint32_t downsizeCoeff, resizeCoeff;
+ ipu_color_space_t in_fmt, out_fmt;
+ int ret = 0;
+
+ /* Setup vertical resizing */
+ if (!params->mem_pp_mem.outv_resize_ratio) {
+ ret = _calc_resize_coeffs(ipu, params->mem_pp_mem.in_height,
+ params->mem_pp_mem.out_height,
+ &resizeCoeff, &downsizeCoeff);
+ if (ret < 0) {
+ dev_err(ipu->dev, "failed to calculate pp height "
+ "scaling coefficients\n");
+ return ret;
+ }
+
+ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
+ } else {
+ reg = (params->mem_pp_mem.outv_resize_ratio) << 16;
+ }
+
+ /* Setup horizontal resizing */
+ if (!params->mem_pp_mem.outh_resize_ratio) {
+ ret = _calc_resize_coeffs(ipu, params->mem_pp_mem.in_width,
+ params->mem_pp_mem.out_width,
+ &resizeCoeff, &downsizeCoeff);
+ if (ret < 0) {
+ dev_err(ipu->dev, "failed to calculate pp width "
+ "scaling coefficients\n");
+ return ret;
+ }
+
+ reg |= (downsizeCoeff << 14) | resizeCoeff;
+ } else {
+ reg |= params->mem_pp_mem.outh_resize_ratio;
+ }
+
+ ipu_ic_write(ipu, reg, IC_PP_RSC);
+
+ ic_conf = ipu_ic_read(ipu, IC_CONF);
+
+ /* Setup color space conversion */
+ in_fmt = format_to_colorspace(params->mem_pp_mem.in_pixel_fmt);
+ out_fmt = format_to_colorspace(params->mem_pp_mem.out_pixel_fmt);
+ if (in_fmt == RGB) {
+ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
+ /* Enable RGB->YCBCR CSC1 */
+ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, out_fmt, 1);
+ ic_conf |= IC_CONF_PP_CSC1;
+ }
+ }
+ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
+ if (out_fmt == RGB) {
+ /* Enable YCBCR->RGB CSC1 */
+ _init_csc(ipu, IC_TASK_POST_PROCESSOR, YCbCr, RGB, 1);
+ ic_conf |= IC_CONF_PP_CSC1;
+ } else {
+ /* TODO: Support YUV<->YCbCr conversion? */
+ }
+ }
+
+ if (params->mem_pp_mem.graphics_combine_en) {
+ ic_conf |= IC_CONF_PP_CMB;
+
+ if (!(ic_conf & IC_CONF_PP_CSC1)) {
+ /* need transparent CSC1 conversion */
+ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, RGB, 1);
+ ic_conf |= IC_CONF_PP_CSC1; /* Enable RGB->RGB CSC */
+ }
+
+ in_fmt = format_to_colorspace(params->mem_pp_mem.in_g_pixel_fmt);
+ out_fmt = format_to_colorspace(params->mem_pp_mem.out_pixel_fmt);
+ if (in_fmt == RGB) {
+ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
+ /* Enable RGB->YCBCR CSC2 */
+ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, out_fmt, 2);
+ ic_conf |= IC_CONF_PP_CSC2;
+ }
+ }
+ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
+ if (out_fmt == RGB) {
+ /* Enable YCBCR->RGB CSC2 */
+ _init_csc(ipu, IC_TASK_POST_PROCESSOR, YCbCr, RGB, 2);
+ ic_conf |= IC_CONF_PP_CSC2;
+ } else {
+ /* TODO: Support YUV<->YCbCr conversion? */
+ }
+ }
+
+ if (params->mem_pp_mem.global_alpha_en) {
+ ic_conf |= IC_CONF_IC_GLB_LOC_A;
+ reg = ipu_ic_read(ipu, IC_CMBP_1);
+ reg &= ~(0xff00);
+ reg |= (params->mem_pp_mem.alpha << 8);
+ ipu_ic_write(ipu, reg, IC_CMBP_1);
+ } else
+ ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
+
+ if (params->mem_pp_mem.key_color_en) {
+ ic_conf |= IC_CONF_KEY_COLOR_EN;
+ ipu_ic_write(ipu, params->mem_pp_mem.key_color,
+ IC_CMBP_2);
+ } else
+ ic_conf &= ~IC_CONF_KEY_COLOR_EN;
+ } else {
+ ic_conf &= ~IC_CONF_PP_CMB;
+ }
+
+ ipu_ic_write(ipu, ic_conf, IC_CONF);
+
+ return ret;
+}
+
+void _ipu_ic_uninit_pp(struct ipu_soc *ipu)
+{
+ uint32_t reg;
+
+ reg = ipu_ic_read(ipu, IC_CONF);
+ reg &= ~(IC_CONF_PP_EN | IC_CONF_PP_CSC1 | IC_CONF_PP_CSC2 |
+ IC_CONF_PP_CMB);
+ ipu_ic_write(ipu, reg, IC_CONF);
+}
+
+void _ipu_ic_init_rotate_pp(struct ipu_soc *ipu, ipu_channel_params_t *params)
+{
+}
+
+void _ipu_ic_uninit_rotate_pp(struct ipu_soc *ipu)
+{
+ uint32_t reg;
+ reg = ipu_ic_read(ipu, IC_CONF);
+ reg &= ~IC_CONF_PP_ROT_EN;
+ ipu_ic_write(ipu, reg, IC_CONF);
+}
+
+int _ipu_ic_idma_init(struct ipu_soc *ipu, int dma_chan,
+ uint16_t width, uint16_t height,
+ int burst_size, ipu_rotate_mode_t rot)
+{
+ u32 ic_idmac_1, ic_idmac_2, ic_idmac_3;
+ u32 temp_rot = bitrev8(rot) >> 5;
+ bool need_hor_flip = false;
+
+ if ((burst_size != 8) && (burst_size != 16)) {
+ dev_dbg(ipu->dev, "Illegal burst length for IC\n");
+ return -EINVAL;
+ }
+
+ width--;
+ height--;
+
+ if (temp_rot & 0x2) /* Need horizontal flip */
+ need_hor_flip = true;
+
+ ic_idmac_1 = ipu_ic_read(ipu, IC_IDMAC_1);
+ ic_idmac_2 = ipu_ic_read(ipu, IC_IDMAC_2);
+ ic_idmac_3 = ipu_ic_read(ipu, IC_IDMAC_3);
+ if (dma_chan == 22) { /* PP output - CB2 */
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB2_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB2_BURST_16;
+
+ if (need_hor_flip)
+ ic_idmac_1 |= IC_IDMAC_1_PP_FLIP_RS;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_PP_FLIP_RS;
+
+ ic_idmac_2 &= ~IC_IDMAC_2_PP_HEIGHT_MASK;
+ ic_idmac_2 |= height << IC_IDMAC_2_PP_HEIGHT_OFFSET;
+
+ ic_idmac_3 &= ~IC_IDMAC_3_PP_WIDTH_MASK;
+ ic_idmac_3 |= width << IC_IDMAC_3_PP_WIDTH_OFFSET;
+ } else if (dma_chan == 11) { /* PP Input - CB5 */
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB5_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB5_BURST_16;
+ } else if (dma_chan == 47) { /* PP Rot input */
+ ic_idmac_1 &= ~IC_IDMAC_1_PP_ROT_MASK;
+ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PP_ROT_OFFSET;
+ }
+
+ if (dma_chan == 12) { /* PRP Input - CB6 */
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB6_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB6_BURST_16;
+ }
+
+ if (dma_chan == 20) { /* PRP ENC output - CB0 */
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB0_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB0_BURST_16;
+
+ if (need_hor_flip)
+ ic_idmac_1 |= IC_IDMAC_1_PRPENC_FLIP_RS;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_FLIP_RS;
+
+ ic_idmac_2 &= ~IC_IDMAC_2_PRPENC_HEIGHT_MASK;
+ ic_idmac_2 |= height << IC_IDMAC_2_PRPENC_HEIGHT_OFFSET;
+
+ ic_idmac_3 &= ~IC_IDMAC_3_PRPENC_WIDTH_MASK;
+ ic_idmac_3 |= width << IC_IDMAC_3_PRPENC_WIDTH_OFFSET;
+
+ } else if (dma_chan == 45) { /* PRP ENC Rot input */
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_ROT_MASK;
+ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPENC_ROT_OFFSET;
+ }
+
+ if (dma_chan == 21) { /* PRP VF output - CB1 */
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB1_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB1_BURST_16;
+
+ if (need_hor_flip)
+ ic_idmac_1 |= IC_IDMAC_1_PRPVF_FLIP_RS;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_FLIP_RS;
+
+ ic_idmac_2 &= ~IC_IDMAC_2_PRPVF_HEIGHT_MASK;
+ ic_idmac_2 |= height << IC_IDMAC_2_PRPVF_HEIGHT_OFFSET;
+
+ ic_idmac_3 &= ~IC_IDMAC_3_PRPVF_WIDTH_MASK;
+ ic_idmac_3 |= width << IC_IDMAC_3_PRPVF_WIDTH_OFFSET;
+
+ } else if (dma_chan == 46) { /* PRP VF Rot input */
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_ROT_MASK;
+ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPVF_ROT_OFFSET;
+ }
+
+ if (dma_chan == 14) { /* PRP VF graphics combining input - CB3 */
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB3_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB3_BURST_16;
+ } else if (dma_chan == 15) { /* PP graphics combining input - CB4 */
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB4_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB4_BURST_16;
+ } else if (dma_chan == 5) { /* VDIC OUTPUT - CB7 */
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB7_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB7_BURST_16;
+ }
+
+ ipu_ic_write(ipu, ic_idmac_1, IC_IDMAC_1);
+ ipu_ic_write(ipu, ic_idmac_2, IC_IDMAC_2);
+ ipu_ic_write(ipu, ic_idmac_3, IC_IDMAC_3);
+ return 0;
+}
+
+static void _init_csc(struct ipu_soc *ipu, uint8_t ic_task, ipu_color_space_t in_format,
+ ipu_color_space_t out_format, int csc_index)
+{
+ /*
+ * Y = 0.257 * R + 0.504 * G + 0.098 * B + 16;
+ * U = -0.148 * R - 0.291 * G + 0.439 * B + 128;
+ * V = 0.439 * R - 0.368 * G - 0.071 * B + 128;
+ */
+ static const uint32_t rgb2ycbcr_coeff[4][3] = {
+ {0x0042, 0x0081, 0x0019},
+ {0x01DA, 0x01B6, 0x0070},
+ {0x0070, 0x01A2, 0x01EE},
+ {0x0040, 0x0200, 0x0200}, /* A0, A1, A2 */
+ };
+
+ /* transparent RGB->RGB matrix for combining
+ */
+ static const uint32_t rgb2rgb_coeff[4][3] = {
+ {0x0080, 0x0000, 0x0000},
+ {0x0000, 0x0080, 0x0000},
+ {0x0000, 0x0000, 0x0080},
+ {0x0000, 0x0000, 0x0000}, /* A0, A1, A2 */
+ };
+
+/* R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128));
+ G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128));
+ B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128); */
+ static const uint32_t ycbcr2rgb_coeff[4][3] = {
+ {149, 0, 204},
+ {149, 462, 408},
+ {149, 255, 0},
+ {8192 - 446, 266, 8192 - 554}, /* A0, A1, A2 */
+ };
+
+ uint32_t param;
+ uint32_t *base = NULL;
+
+ if (ic_task == IC_TASK_ENCODER) {
+ base = (uint32_t *)ipu->tpmem_base + 0x2008 / 4;
+ } else if (ic_task == IC_TASK_VIEWFINDER) {
+ if (csc_index == 1)
+ base = (uint32_t *)ipu->tpmem_base + 0x4028 / 4;
+ else
+ base = (uint32_t *)ipu->tpmem_base + 0x4040 / 4;
+ } else if (ic_task == IC_TASK_POST_PROCESSOR) {
+ if (csc_index == 1)
+ base = (uint32_t *)ipu->tpmem_base + 0x6060 / 4;
+ else
+ base = (uint32_t *)ipu->tpmem_base + 0x6078 / 4;
+ } else {
+ BUG();
+ }
+
+ if ((in_format == YCbCr) && (out_format == RGB)) {
+ /* Init CSC (YCbCr->RGB) */
+ param = (ycbcr2rgb_coeff[3][0] << 27) |
+ (ycbcr2rgb_coeff[0][0] << 18) |
+ (ycbcr2rgb_coeff[1][1] << 9) | ycbcr2rgb_coeff[2][2];
+ writel(param, base++);
+ /* scale = 2, sat = 0 */
+ param = (ycbcr2rgb_coeff[3][0] >> 5) | (2L << (40 - 32));
+ writel(param, base++);
+
+ param = (ycbcr2rgb_coeff[3][1] << 27) |
+ (ycbcr2rgb_coeff[0][1] << 18) |
+ (ycbcr2rgb_coeff[1][0] << 9) | ycbcr2rgb_coeff[2][0];
+ writel(param, base++);
+ param = (ycbcr2rgb_coeff[3][1] >> 5);
+ writel(param, base++);
+
+ param = (ycbcr2rgb_coeff[3][2] << 27) |
+ (ycbcr2rgb_coeff[0][2] << 18) |
+ (ycbcr2rgb_coeff[1][2] << 9) | ycbcr2rgb_coeff[2][1];
+ writel(param, base++);
+ param = (ycbcr2rgb_coeff[3][2] >> 5);
+ writel(param, base++);
+ } else if ((in_format == RGB) && (out_format == YCbCr)) {
+ /* Init CSC (RGB->YCbCr) */
+ param = (rgb2ycbcr_coeff[3][0] << 27) |
+ (rgb2ycbcr_coeff[0][0] << 18) |
+ (rgb2ycbcr_coeff[1][1] << 9) | rgb2ycbcr_coeff[2][2];
+ writel(param, base++);
+ /* scale = 1, sat = 0 */
+ param = (rgb2ycbcr_coeff[3][0] >> 5) | (1UL << 8);
+ writel(param, base++);
+
+ param = (rgb2ycbcr_coeff[3][1] << 27) |
+ (rgb2ycbcr_coeff[0][1] << 18) |
+ (rgb2ycbcr_coeff[1][0] << 9) | rgb2ycbcr_coeff[2][0];
+ writel(param, base++);
+ param = (rgb2ycbcr_coeff[3][1] >> 5);
+ writel(param, base++);
+
+ param = (rgb2ycbcr_coeff[3][2] << 27) |
+ (rgb2ycbcr_coeff[0][2] << 18) |
+ (rgb2ycbcr_coeff[1][2] << 9) | rgb2ycbcr_coeff[2][1];
+ writel(param, base++);
+ param = (rgb2ycbcr_coeff[3][2] >> 5);
+ writel(param, base++);
+ } else if ((in_format == RGB) && (out_format == RGB)) {
+ /* Init CSC */
+ param =
+ (rgb2rgb_coeff[3][0] << 27) | (rgb2rgb_coeff[0][0] << 18) |
+ (rgb2rgb_coeff[1][1] << 9) | rgb2rgb_coeff[2][2];
+ writel(param, base++);
+ /* scale = 2, sat = 0 */
+ param = (rgb2rgb_coeff[3][0] >> 5) | (2UL << 8);
+ writel(param, base++);
+
+ param =
+ (rgb2rgb_coeff[3][1] << 27) | (rgb2rgb_coeff[0][1] << 18) |
+ (rgb2rgb_coeff[1][0] << 9) | rgb2rgb_coeff[2][0];
+ writel(param, base++);
+ param = (rgb2rgb_coeff[3][1] >> 5);
+ writel(param, base++);
+
+ param =
+ (rgb2rgb_coeff[3][2] << 27) | (rgb2rgb_coeff[0][2] << 18) |
+ (rgb2rgb_coeff[1][2] << 9) | rgb2rgb_coeff[2][1];
+ writel(param, base++);
+ param = (rgb2rgb_coeff[3][2] >> 5);
+ writel(param, base++);
+ } else {
+ dev_err(ipu->dev, "Unsupported color space conversion\n");
+ }
+}
+
+static int _calc_resize_coeffs(struct ipu_soc *ipu,
+ uint32_t inSize, uint32_t outSize,
+ uint32_t *resizeCoeff,
+ uint32_t *downsizeCoeff)
+{
+ uint32_t tempSize;
+ uint32_t tempDownsize;
+
+ if (inSize > 4096) {
+ dev_err(ipu->dev, "IC input size(%d) cannot exceed 4096\n",
+ inSize);
+ return -EINVAL;
+ }
+
+ if (outSize > 1024) {
+ dev_err(ipu->dev, "IC output size(%d) cannot exceed 1024\n",
+ outSize);
+ return -EINVAL;
+ }
+
+ if ((outSize << 3) < inSize) {
+ dev_err(ipu->dev, "IC cannot downsize more than 8:1\n");
+ return -EINVAL;
+ }
+
+ /* Compute downsizing coefficient */
+ /* Output of downsizing unit cannot be more than 1024 */
+ tempDownsize = 0;
+ tempSize = inSize;
+ while (((tempSize > 1024) || (tempSize >= outSize * 2)) &&
+ (tempDownsize < 2)) {
+ tempSize >>= 1;
+ tempDownsize++;
+ }
+ *downsizeCoeff = tempDownsize;
+
+ /* compute resizing coefficient using the following equation:
+ resizeCoeff = M*(SI -1)/(SO - 1)
+ where M = 2^13, SI - input size, SO - output size */
+ *resizeCoeff = (8192L * (tempSize - 1)) / (outSize - 1);
+ if (*resizeCoeff >= 16384L) {
+ dev_err(ipu->dev, "Overflow on IC resize coefficient.\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(ipu->dev, "resizing from %u -> %u pixels, "
+ "downsize=%u, resize=%u.%lu (reg=%u)\n", inSize, outSize,
+ *downsizeCoeff, (*resizeCoeff >= 8192L) ? 1 : 0,
+ ((*resizeCoeff & 0x1FFF) * 10000L) / 8192L, *resizeCoeff);
+
+ return 0;
+}
+
+void _ipu_vdi_toggle_top_field_man(struct ipu_soc *ipu)
+{
+ uint32_t reg;
+ uint32_t mask_reg;
+
+ reg = ipu_vdi_read(ipu, VDI_C);
+ mask_reg = reg & VDI_C_TOP_FIELD_MAN_1;
+ if (mask_reg == VDI_C_TOP_FIELD_MAN_1)
+ reg &= ~VDI_C_TOP_FIELD_MAN_1;
+ else
+ reg |= VDI_C_TOP_FIELD_MAN_1;
+
+ ipu_vdi_write(ipu, reg, VDI_C);
+}
diff --git a/drivers/mxc/ipu3/ipu_param_mem.h b/drivers/mxc/ipu3/ipu_param_mem.h
new file mode 100644
index 000000000000..266d4d20beea
--- /dev/null
+++ b/drivers/mxc/ipu3/ipu_param_mem.h
@@ -0,0 +1,996 @@
+/*
+ * Copyright 2005-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#ifndef __INCLUDE_IPU_PARAM_MEM_H__
+#define __INCLUDE_IPU_PARAM_MEM_H__
+
+#include <linux/bitrev.h>
+#include <linux/types.h>
+
+#include "ipu_prv.h"
+
+extern u32 *ipu_cpmem_base;
+
+struct ipu_ch_param_word {
+ uint32_t data[5];
+ uint32_t res[3];
+};
+
+struct ipu_ch_param {
+ struct ipu_ch_param_word word[2];
+};
+
+#define ipu_ch_param_addr(ipu, ch) (((struct ipu_ch_param *)ipu->cpmem_base) + (ch))
+
+#define _param_word(base, w) \
+ (((struct ipu_ch_param *)(base))->word[(w)].data)
+
+#define ipu_ch_param_set_field(base, w, bit, size, v) { \
+ int i = (bit) / 32; \
+ int off = (bit) % 32; \
+ _param_word(base, w)[i] |= (v) << off; \
+ if (((bit)+(size)-1)/32 > i) { \
+ _param_word(base, w)[i + 1] |= (v) >> (off ? (32 - off) : 0); \
+ } \
+}
+
+#define ipu_ch_param_set_field_io(base, w, bit, size, v) { \
+ int i = (bit) / 32; \
+ int off = (bit) % 32; \
+ unsigned reg_offset; \
+ u32 temp; \
+ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
+ reg_offset += i; \
+ temp = readl((u32 *)base + reg_offset); \
+ temp |= (v) << off; \
+ writel(temp, (u32 *)base + reg_offset); \
+ if (((bit)+(size)-1)/32 > i) { \
+ reg_offset++; \
+ temp = readl((u32 *)base + reg_offset); \
+ temp |= (v) >> (off ? (32 - off) : 0); \
+ writel(temp, (u32 *)base + reg_offset); \
+ } \
+}
+
+#define ipu_ch_param_mod_field(base, w, bit, size, v) { \
+ int i = (bit) / 32; \
+ int off = (bit) % 32; \
+ u32 mask = (1UL << size) - 1; \
+ u32 temp = _param_word(base, w)[i]; \
+ temp &= ~(mask << off); \
+ _param_word(base, w)[i] = temp | (v) << off; \
+ if (((bit)+(size)-1)/32 > i) { \
+ temp = _param_word(base, w)[i + 1]; \
+ temp &= ~(mask >> (32 - off)); \
+ _param_word(base, w)[i + 1] = \
+ temp | ((v) >> (off ? (32 - off) : 0)); \
+ } \
+}
+
+#define ipu_ch_param_mod_field_io(base, w, bit, size, v) { \
+ int i = (bit) / 32; \
+ int off = (bit) % 32; \
+ u32 mask = (1UL << size) - 1; \
+ unsigned reg_offset; \
+ u32 temp; \
+ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
+ reg_offset += i; \
+ temp = readl((u32 *)base + reg_offset); \
+ temp &= ~(mask << off); \
+ temp |= (v) << off; \
+ writel(temp, (u32 *)base + reg_offset); \
+ if (((bit)+(size)-1)/32 > i) { \
+ reg_offset++; \
+ temp = readl((u32 *)base + reg_offset); \
+ temp &= ~(mask >> (32 - off)); \
+ temp |= ((v) >> (off ? (32 - off) : 0)); \
+ writel(temp, (u32 *)base + reg_offset); \
+ } \
+}
+
+#define ipu_ch_param_read_field(base, w, bit, size) ({ \
+ u32 temp2; \
+ int i = (bit) / 32; \
+ int off = (bit) % 32; \
+ u32 mask = (1UL << size) - 1; \
+ u32 temp1 = _param_word(base, w)[i]; \
+ temp1 = mask & (temp1 >> off); \
+ if (((bit)+(size)-1)/32 > i) { \
+ temp2 = _param_word(base, w)[i + 1]; \
+ temp2 &= mask >> (off ? (32 - off) : 0); \
+ temp1 |= temp2 << (off ? (32 - off) : 0); \
+ } \
+ temp1; \
+})
+
+#define ipu_ch_param_read_field_io(base, w, bit, size) ({ \
+ u32 temp1, temp2; \
+ int i = (bit) / 32; \
+ int off = (bit) % 32; \
+ u32 mask = (1UL << size) - 1; \
+ unsigned reg_offset; \
+ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
+ reg_offset += i; \
+ temp1 = readl((u32 *)base + reg_offset); \
+ temp1 = mask & (temp1 >> off); \
+ if (((bit)+(size)-1)/32 > i) { \
+ reg_offset++; \
+ temp2 = readl((u32 *)base + reg_offset); \
+ temp2 &= mask >> (off ? (32 - off) : 0); \
+ temp1 |= temp2 << (off ? (32 - off) : 0); \
+ } \
+ temp1; \
+})
+
+static inline int __ipu_ch_get_third_buf_cpmem_num(int ch)
+{
+ switch (ch) {
+ case 8:
+ return 64;
+ case 9:
+ return 65;
+ case 10:
+ return 66;
+ case 13:
+ return 67;
+ case 21:
+ return 68;
+ case 23:
+ return 69;
+ case 27:
+ return 70;
+ case 28:
+ return 71;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline void _ipu_ch_params_set_packing(struct ipu_ch_param *p,
+ int red_width, int red_offset,
+ int green_width, int green_offset,
+ int blue_width, int blue_offset,
+ int alpha_width, int alpha_offset)
+{
+ /* Setup red width and offset */
+ ipu_ch_param_set_field(p, 1, 116, 3, red_width - 1);
+ ipu_ch_param_set_field(p, 1, 128, 5, red_offset);
+ /* Setup green width and offset */
+ ipu_ch_param_set_field(p, 1, 119, 3, green_width - 1);
+ ipu_ch_param_set_field(p, 1, 133, 5, green_offset);
+ /* Setup blue width and offset */
+ ipu_ch_param_set_field(p, 1, 122, 3, blue_width - 1);
+ ipu_ch_param_set_field(p, 1, 138, 5, blue_offset);
+ /* Setup alpha width and offset */
+ ipu_ch_param_set_field(p, 1, 125, 3, alpha_width - 1);
+ ipu_ch_param_set_field(p, 1, 143, 5, alpha_offset);
+}
+
+static inline void _ipu_ch_param_dump(struct ipu_soc *ipu, int ch)
+{
+ struct ipu_ch_param *p = ipu_ch_param_addr(ipu, ch);
+ dev_dbg(ipu->dev, "ch %d word 0 - %08X %08X %08X %08X %08X\n", ch,
+ p->word[0].data[0], p->word[0].data[1], p->word[0].data[2],
+ p->word[0].data[3], p->word[0].data[4]);
+ dev_dbg(ipu->dev, "ch %d word 1 - %08X %08X %08X %08X %08X\n", ch,
+ p->word[1].data[0], p->word[1].data[1], p->word[1].data[2],
+ p->word[1].data[3], p->word[1].data[4]);
+ dev_dbg(ipu->dev, "PFS 0x%x, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 85, 4));
+ dev_dbg(ipu->dev, "BPP 0x%x, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 107, 3));
+ dev_dbg(ipu->dev, "NPB 0x%x\n",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7));
+
+ dev_dbg(ipu->dev, "FW %d, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 125, 13));
+ dev_dbg(ipu->dev, "FH %d, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 138, 12));
+ dev_dbg(ipu->dev, "EBA0 0x%x\n",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 0, 29) << 3);
+ dev_dbg(ipu->dev, "EBA1 0x%x\n",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 29, 29) << 3);
+ dev_dbg(ipu->dev, "Stride %d\n",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14));
+ dev_dbg(ipu->dev, "scan_order %d\n",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 113, 1));
+ dev_dbg(ipu->dev, "uv_stride %d\n",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 128, 14));
+ dev_dbg(ipu->dev, "u_offset 0x%x\n",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22) << 3);
+ dev_dbg(ipu->dev, "v_offset 0x%x\n",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22) << 3);
+
+ dev_dbg(ipu->dev, "Width0 %d+1, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 116, 3));
+ dev_dbg(ipu->dev, "Width1 %d+1, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 119, 3));
+ dev_dbg(ipu->dev, "Width2 %d+1, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 122, 3));
+ dev_dbg(ipu->dev, "Width3 %d+1, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 125, 3));
+ dev_dbg(ipu->dev, "Offset0 %d, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 128, 5));
+ dev_dbg(ipu->dev, "Offset1 %d, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 133, 5));
+ dev_dbg(ipu->dev, "Offset2 %d, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 138, 5));
+ dev_dbg(ipu->dev, "Offset3 %d\n",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 143, 5));
+}
+
+static inline void fill_cpmem(struct ipu_soc *ipu, int ch, struct ipu_ch_param *params)
+{
+ int i, w;
+ void *addr = ipu_ch_param_addr(ipu, ch);
+
+ /* 2 words, 5 valid data */
+ for (w = 0; w < 2; w++) {
+ for (i = 0; i < 5; i++) {
+ writel(params->word[w].data[i], addr);
+ addr += 4;
+ }
+ addr += 12;
+ }
+}
+
+static inline void _ipu_ch_param_init(struct ipu_soc *ipu, int ch,
+ uint32_t pixel_fmt, uint32_t width,
+ uint32_t height, uint32_t stride,
+ uint32_t u, uint32_t v,
+ uint32_t uv_stride, dma_addr_t addr0,
+ dma_addr_t addr1, dma_addr_t addr2)
+{
+ uint32_t u_offset = 0;
+ uint32_t v_offset = 0;
+ uint32_t bs = 0;
+ int32_t sub_ch = 0;
+ struct ipu_ch_param params;
+
+ memset(&params, 0, sizeof(params));
+
+ ipu_ch_param_set_field(&params, 0, 125, 13, width - 1);
+
+ if (((ch == 8) || (ch == 9) || (ch == 10)) && !ipu->vdoa_en) {
+ ipu_ch_param_set_field(&params, 0, 138, 12, (height / 2) - 1);
+ ipu_ch_param_set_field(&params, 1, 102, 14, (stride * 2) - 1);
+ } else {
+ /* note: for vdoa+vdi- ch8/9/10, always use band mode */
+ ipu_ch_param_set_field(&params, 0, 138, 12, height - 1);
+ ipu_ch_param_set_field(&params, 1, 102, 14, stride - 1);
+ }
+
+ /* EBA is 8-byte aligned */
+ ipu_ch_param_set_field(&params, 1, 0, 29, addr0 >> 3);
+ ipu_ch_param_set_field(&params, 1, 29, 29, addr1 >> 3);
+ if (addr0%8)
+ dev_warn(ipu->dev,
+ "IDMAC%d's EBA0 is not 8-byte aligned\n", ch);
+ if (addr1%8)
+ dev_warn(ipu->dev,
+ "IDMAC%d's EBA1 is not 8-byte aligned\n", ch);
+
+ switch (pixel_fmt) {
+ case IPU_PIX_FMT_GENERIC:
+ /*Represents 8-bit Generic data */
+ ipu_ch_param_set_field(&params, 0, 107, 3, 5); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 6); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 63); /* burst size */
+
+ break;
+ case IPU_PIX_FMT_GENERIC_16:
+ /* Represents 16-bit generic data */
+ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 6); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
+
+ break;
+ case IPU_PIX_FMT_GENERIC_32:
+ /*Represents 32-bit Generic data */
+ break;
+ case IPU_PIX_FMT_RGB565:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
+
+ _ipu_ch_params_set_packing(&params, 5, 0, 6, 5, 5, 11, 8, 16);
+ break;
+ case IPU_PIX_FMT_BGRA4444:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
+
+ _ipu_ch_params_set_packing(&params, 4, 4, 4, 8, 4, 12, 4, 0);
+ break;
+ case IPU_PIX_FMT_BGRA5551:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
+
+ _ipu_ch_params_set_packing(&params, 5, 1, 5, 6, 5, 11, 1, 0);
+ break;
+ case IPU_PIX_FMT_BGR24:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
+
+ _ipu_ch_params_set_packing(&params, 8, 0, 8, 8, 8, 16, 8, 24);
+ break;
+ case IPU_PIX_FMT_RGB24:
+ case IPU_PIX_FMT_YUV444:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
+
+ _ipu_ch_params_set_packing(&params, 8, 16, 8, 8, 8, 0, 8, 24);
+ break;
+ case IPU_PIX_FMT_VYU444:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
+
+ _ipu_ch_params_set_packing(&params, 8, 8, 8, 0, 8, 16, 8, 24);
+ break;
+ case IPU_PIX_FMT_AYUV:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
+
+ _ipu_ch_params_set_packing(&params, 8, 8, 8, 16, 8, 24, 8, 0);
+ break;
+ case IPU_PIX_FMT_BGRA32:
+ case IPU_PIX_FMT_BGR32:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
+
+ _ipu_ch_params_set_packing(&params, 8, 8, 8, 16, 8, 24, 8, 0);
+ break;
+ case IPU_PIX_FMT_RGBA32:
+ case IPU_PIX_FMT_RGB32:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
+
+ _ipu_ch_params_set_packing(&params, 8, 24, 8, 16, 8, 8, 8, 0);
+ break;
+ case IPU_PIX_FMT_ABGR32:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
+
+ _ipu_ch_params_set_packing(&params, 8, 0, 8, 8, 8, 16, 8, 24);
+ break;
+ case IPU_PIX_FMT_UYVY:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 0xA); /* pix format */
+ if ((ch == 8) || (ch == 9) || (ch == 10)) {
+ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
+ } else {
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
+ }
+ break;
+ case IPU_PIX_FMT_YUYV:
+ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 0x8); /* pix format */
+ if ((ch == 8) || (ch == 9) || (ch == 10)) {
+ if (ipu->vdoa_en) {
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31);
+ } else {
+ ipu_ch_param_set_field(&params, 1, 78, 7, 15);
+ }
+ } else {
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
+ }
+ break;
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YUV420P:
+ ipu_ch_param_set_field(&params, 1, 85, 4, 2); /* pix format */
+
+ if (uv_stride < stride / 2)
+ uv_stride = stride / 2;
+
+ u_offset = stride * height;
+ v_offset = u_offset + (uv_stride * height / 2);
+ if ((ch == 8) || (ch == 9) || (ch == 10)) {
+ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
+ uv_stride = uv_stride*2;
+ } else {
+ if (_ipu_is_smfc_chan(ch) &&
+ ipu->smfc_idmac_12bit_3planar_bs_fixup)
+ bs = 15;
+ else
+ bs = 31;
+ ipu_ch_param_set_field(&params, 1, 78, 7, bs); /* burst size */
+ }
+ break;
+ case IPU_PIX_FMT_YVU420P:
+ ipu_ch_param_set_field(&params, 1, 85, 4, 2); /* pix format */
+
+ if (uv_stride < stride / 2)
+ uv_stride = stride / 2;
+
+ v_offset = stride * height;
+ u_offset = v_offset + (uv_stride * height / 2);
+ if ((ch == 8) || (ch == 9) || (ch == 10)) {
+ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
+ uv_stride = uv_stride*2;
+ } else {
+ if (_ipu_is_smfc_chan(ch) &&
+ ipu->smfc_idmac_12bit_3planar_bs_fixup)
+ bs = 15;
+ else
+ bs = 31;
+ ipu_ch_param_set_field(&params, 1, 78, 7, bs); /* burst size */
+ }
+ break;
+ case IPU_PIX_FMT_YVU422P:
+ /* BPP & pixel format */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 1); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
+
+ if (uv_stride < stride / 2)
+ uv_stride = stride / 2;
+
+ v_offset = (v == 0) ? stride * height : v;
+ u_offset = (u == 0) ? v_offset + v_offset / 2 : u;
+ break;
+ case IPU_PIX_FMT_YUV422P:
+ /* BPP & pixel format */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 1); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
+
+ if (uv_stride < stride / 2)
+ uv_stride = stride / 2;
+
+ u_offset = (u == 0) ? stride * height : u;
+ v_offset = (v == 0) ? u_offset + u_offset / 2 : v;
+ break;
+ case IPU_PIX_FMT_YUV444P:
+ /* BPP & pixel format */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 0); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
+ uv_stride = stride;
+ u_offset = (u == 0) ? stride * height : u;
+ v_offset = (v == 0) ? u_offset * 2 : v;
+ break;
+ case IPU_PIX_FMT_NV16:
+ ipu_ch_param_set_field(&params, 1, 85, 4, 1); /* pix format */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
+ uv_stride = stride;
+ u_offset = (u == 0) ? stride * height : u;
+ break;
+ case IPU_PIX_FMT_NV12:
+ /* BPP & pixel format */
+ ipu_ch_param_set_field(&params, 1, 85, 4, 4); /* pix format */
+ uv_stride = stride;
+ u_offset = (u == 0) ? stride * height : u;
+ if ((ch == 8) || (ch == 9) || (ch == 10)) {
+ if (ipu->vdoa_en) {
+ /* one field buffer, memory width 64bits */
+ ipu_ch_param_set_field(&params, 1, 78, 7, 63);
+ } else {
+ ipu_ch_param_set_field(&params, 1, 78, 7, 15);
+ /* top/bottom field in one buffer*/
+ uv_stride = uv_stride*2;
+ }
+ } else {
+ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
+ }
+ break;
+ default:
+ dev_err(ipu->dev, "mxc ipu: unimplemented pixel format\n");
+ break;
+ }
+ /*set burst size to 16*/
+
+
+ if (uv_stride)
+ ipu_ch_param_set_field(&params, 1, 128, 14, uv_stride - 1);
+
+ /* Get the uv offset from user when need cropping */
+ if (u || v) {
+ u_offset = u;
+ v_offset = v;
+ }
+
+ /* UBO and VBO are 22-bit and 8-byte aligned */
+ if (u_offset/8 > 0x3fffff)
+ dev_warn(ipu->dev,
+ "IDMAC%d's U offset exceeds IPU limitation\n", ch);
+ if (v_offset/8 > 0x3fffff)
+ dev_warn(ipu->dev,
+ "IDMAC%d's V offset exceeds IPU limitation\n", ch);
+ if (u_offset%8)
+ dev_warn(ipu->dev,
+ "IDMAC%d's U offset is not 8-byte aligned\n", ch);
+ if (v_offset%8)
+ dev_warn(ipu->dev,
+ "IDMAC%d's V offset is not 8-byte aligned\n", ch);
+
+ ipu_ch_param_set_field(&params, 0, 46, 22, u_offset / 8);
+ ipu_ch_param_set_field(&params, 0, 68, 22, v_offset / 8);
+
+ dev_dbg(ipu->dev, "initializing idma ch %d @ %p\n", ch, ipu_ch_param_addr(ipu, ch));
+ fill_cpmem(ipu, ch, &params);
+ if (addr2) {
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (sub_ch <= 0)
+ return;
+
+ ipu_ch_param_set_field(&params, 1, 0, 29, addr2 >> 3);
+ ipu_ch_param_set_field(&params, 1, 29, 29, 0);
+ if (addr2%8)
+ dev_warn(ipu->dev,
+ "IDMAC%d's sub-CPMEM entry%d EBA0 is not "
+ "8-byte aligned\n", ch, sub_ch);
+
+ dev_dbg(ipu->dev, "initializing idma ch %d @ %p sub cpmem\n", ch,
+ ipu_ch_param_addr(ipu, sub_ch));
+ fill_cpmem(ipu, sub_ch, &params);
+ }
+};
+
+static inline void _ipu_ch_param_set_burst_size(struct ipu_soc *ipu,
+ uint32_t ch,
+ uint16_t burst_pixels)
+{
+ int32_t sub_ch = 0;
+
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7,
+ burst_pixels - 1);
+
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (sub_ch <= 0)
+ return;
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 78, 7,
+ burst_pixels - 1);
+};
+
+static inline int _ipu_ch_param_get_burst_size(struct ipu_soc *ipu, uint32_t ch)
+{
+ return ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7) + 1;
+};
+
+static inline int _ipu_ch_param_get_bpp(struct ipu_soc *ipu, uint32_t ch)
+{
+ return ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 107, 3);
+};
+
+static inline void _ipu_ch_param_set_buffer(struct ipu_soc *ipu, uint32_t ch,
+ int bufNum, dma_addr_t phyaddr)
+{
+ if (bufNum == 2) {
+ ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (ch <= 0)
+ return;
+ bufNum = 0;
+ }
+
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 29 * bufNum, 29,
+ phyaddr / 8);
+};
+
+static inline void _ipu_ch_param_set_rotation(struct ipu_soc *ipu, uint32_t ch,
+ ipu_rotate_mode_t rot)
+{
+ u32 temp_rot = bitrev8(rot) >> 5;
+ int32_t sub_ch = 0;
+
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 119, 3, temp_rot);
+
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (sub_ch <= 0)
+ return;
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 119, 3, temp_rot);
+};
+
+static inline void _ipu_ch_param_set_block_mode(struct ipu_soc *ipu, uint32_t ch)
+{
+ int32_t sub_ch = 0;
+
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 117, 2, 1);
+
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (sub_ch <= 0)
+ return;
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 117, 2, 1);
+};
+
+static inline void _ipu_ch_param_set_alpha_use_separate_channel(struct ipu_soc *ipu,
+ uint32_t ch,
+ bool option)
+{
+ int32_t sub_ch = 0;
+
+ if (option) {
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 89, 1, 1);
+ } else {
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 89, 1, 0);
+ }
+
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (sub_ch <= 0)
+ return;
+
+ if (option) {
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 89, 1, 1);
+ } else {
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 89, 1, 0);
+ }
+};
+
+static inline void _ipu_ch_param_set_alpha_condition_read(struct ipu_soc *ipu, uint32_t ch)
+{
+ int32_t sub_ch = 0;
+
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 149, 1, 1);
+
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (sub_ch <= 0)
+ return;
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 149, 1, 1);
+};
+
+static inline void _ipu_ch_param_set_alpha_buffer_memory(struct ipu_soc *ipu, uint32_t ch)
+{
+ int alp_mem_idx;
+ int32_t sub_ch = 0;
+
+ switch (ch) {
+ case 14: /* PRP graphic */
+ alp_mem_idx = 0;
+ break;
+ case 15: /* PP graphic */
+ alp_mem_idx = 1;
+ break;
+ case 23: /* DP BG SYNC graphic */
+ alp_mem_idx = 4;
+ break;
+ case 27: /* DP FG SYNC graphic */
+ alp_mem_idx = 2;
+ break;
+ default:
+ dev_err(ipu->dev, "unsupported correlative channel of local "
+ "alpha channel\n");
+ return;
+ }
+
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 90, 3, alp_mem_idx);
+
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (sub_ch <= 0)
+ return;
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 90, 3, alp_mem_idx);
+};
+
+static inline void _ipu_ch_param_set_interlaced_scan(struct ipu_soc *ipu, uint32_t ch)
+{
+ u32 stride;
+ int32_t sub_ch = 0;
+
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+
+ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch), 0, 113, 1, 1);
+ if (sub_ch > 0)
+ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 113, 1, 1);
+ stride = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14) + 1;
+ /* ILO is 20-bit and 8-byte aligned */
+ if (stride/8 > 0xfffff)
+ dev_warn(ipu->dev,
+ "IDMAC%d's ILO exceeds IPU limitation\n", ch);
+ if (stride%8)
+ dev_warn(ipu->dev,
+ "IDMAC%d's ILO is not 8-byte aligned\n", ch);
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 58, 20, stride / 8);
+ if (sub_ch > 0)
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 58, 20,
+ stride / 8);
+ stride *= 2;
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14, stride - 1);
+ if (sub_ch > 0)
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 102, 14,
+ stride - 1);
+};
+
+static inline void _ipu_ch_param_set_axi_id(struct ipu_soc *ipu, uint32_t ch, uint32_t id)
+{
+ int32_t sub_ch = 0;
+
+ id %= 4;
+
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 93, 2, id);
+
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (sub_ch <= 0)
+ return;
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 93, 2, id);
+};
+
+static inline int _ipu_ch_param_get_axi_id(struct ipu_soc *ipu, uint32_t ch)
+{
+ return ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 93, 2);
+}
+
+static inline int __ipu_ch_offset_calc(uint32_t pixel_fmt,
+ uint32_t width,
+ uint32_t height,
+ uint32_t stride,
+ uint32_t u,
+ uint32_t v,
+ uint32_t uv_stride,
+ uint32_t vertical_offset,
+ uint32_t horizontal_offset,
+ uint32_t *u_offset,
+ uint32_t *v_offset)
+{
+ uint32_t u_fix = 0;
+ uint32_t v_fix = 0;
+
+ switch (pixel_fmt) {
+ case IPU_PIX_FMT_GENERIC:
+ case IPU_PIX_FMT_GENERIC_16:
+ case IPU_PIX_FMT_GENERIC_32:
+ case IPU_PIX_FMT_RGB565:
+ case IPU_PIX_FMT_BGR24:
+ case IPU_PIX_FMT_RGB24:
+ case IPU_PIX_FMT_YUV444:
+ case IPU_PIX_FMT_BGRA32:
+ case IPU_PIX_FMT_BGR32:
+ case IPU_PIX_FMT_RGBA32:
+ case IPU_PIX_FMT_RGB32:
+ case IPU_PIX_FMT_ABGR32:
+ case IPU_PIX_FMT_UYVY:
+ case IPU_PIX_FMT_YUYV:
+ case IPU_PIX_FMT_GPU32_SB_ST:
+ case IPU_PIX_FMT_GPU32_SB_SRT:
+ case IPU_PIX_FMT_GPU32_ST:
+ case IPU_PIX_FMT_GPU32_SRT:
+ case IPU_PIX_FMT_GPU16_SB_ST:
+ case IPU_PIX_FMT_GPU16_SB_SRT:
+ case IPU_PIX_FMT_GPU16_ST:
+ case IPU_PIX_FMT_GPU16_SRT:
+ *u_offset = 0;
+ *v_offset = 0;
+ break;
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YUV420P:
+ if (uv_stride < stride / 2)
+ uv_stride = stride / 2;
+
+ *u_offset = stride * (height - vertical_offset - 1) +
+ (stride - horizontal_offset) +
+ (uv_stride * vertical_offset / 2) +
+ horizontal_offset / 2;
+ *v_offset = *u_offset + (uv_stride * height / 2);
+ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
+ (horizontal_offset / 2) -
+ (stride * vertical_offset) - (horizontal_offset)) :
+ *u_offset;
+ v_fix = v ? (v + (uv_stride * vertical_offset / 2) +
+ (horizontal_offset / 2) -
+ (stride * vertical_offset) - (horizontal_offset)) :
+ *v_offset;
+ break;
+ case IPU_PIX_FMT_YVU420P:
+ if (uv_stride < stride / 2)
+ uv_stride = stride / 2;
+
+ *v_offset = stride * (height - vertical_offset - 1) +
+ (stride - horizontal_offset) +
+ (uv_stride * vertical_offset / 2) +
+ horizontal_offset / 2;
+ *u_offset = *v_offset + (uv_stride * height / 2);
+ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
+ (horizontal_offset / 2) -
+ (stride * vertical_offset) - (horizontal_offset)) :
+ *u_offset;
+ v_fix = v ? (v + (uv_stride * vertical_offset / 2) +
+ (horizontal_offset / 2) -
+ (stride * vertical_offset) - (horizontal_offset)) :
+ *v_offset;
+ break;
+ case IPU_PIX_FMT_YVU422P:
+ if (uv_stride < stride / 2)
+ uv_stride = stride / 2;
+
+ *v_offset = stride * (height - vertical_offset - 1) +
+ (stride - horizontal_offset) +
+ (uv_stride * vertical_offset) +
+ horizontal_offset / 2;
+ *u_offset = *v_offset + uv_stride * height;
+ u_fix = u ? (u + (uv_stride * vertical_offset) +
+ horizontal_offset / 2 -
+ (stride * vertical_offset) - (horizontal_offset)) :
+ *u_offset;
+ v_fix = v ? (v + (uv_stride * vertical_offset) +
+ horizontal_offset / 2 -
+ (stride * vertical_offset) - (horizontal_offset)) :
+ *v_offset;
+ break;
+ case IPU_PIX_FMT_YUV422P:
+ if (uv_stride < stride / 2)
+ uv_stride = stride / 2;
+
+ *u_offset = stride * (height - vertical_offset - 1) +
+ (stride - horizontal_offset) +
+ (uv_stride * vertical_offset) +
+ horizontal_offset / 2;
+ *v_offset = *u_offset + uv_stride * height;
+ u_fix = u ? (u + (uv_stride * vertical_offset) +
+ horizontal_offset / 2 -
+ (stride * vertical_offset) - (horizontal_offset)) :
+ *u_offset;
+ v_fix = v ? (v + (uv_stride * vertical_offset) +
+ horizontal_offset / 2 -
+ (stride * vertical_offset) - (horizontal_offset)) :
+ *v_offset;
+ break;
+ case IPU_PIX_FMT_YUV444P:
+ uv_stride = stride;
+ *u_offset = stride * (height - vertical_offset - 1) +
+ (stride - horizontal_offset) +
+ (uv_stride * vertical_offset) +
+ horizontal_offset;
+ *v_offset = *u_offset + uv_stride * height;
+ u_fix = u ? (u + (uv_stride * vertical_offset) +
+ horizontal_offset -
+ (stride * vertical_offset) -
+ (horizontal_offset)) :
+ *u_offset;
+ v_fix = v ? (v + (uv_stride * vertical_offset) +
+ horizontal_offset -
+ (stride * vertical_offset) -
+ (horizontal_offset)) :
+ *v_offset;
+ break;
+ case IPU_PIX_FMT_NV12:
+ case IPU_PIX_FMT_NV16:
+ case PRE_PIX_FMT_NV21:
+ case PRE_PIX_FMT_NV61:
+ uv_stride = stride;
+ *u_offset = stride * (height - vertical_offset - 1) +
+ (stride - horizontal_offset) +
+ (uv_stride * vertical_offset / 2) +
+ horizontal_offset;
+ *v_offset = 0;
+ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
+ horizontal_offset -
+ (stride * vertical_offset) - (horizontal_offset)) :
+ *u_offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (u_fix > *u_offset)
+ *u_offset = u_fix;
+
+ if (v_fix > *v_offset)
+ *v_offset = v_fix;
+
+ return 0;
+}
+
+/* IDMAC U/V offset changing support */
+/* U and V input is not affected, */
+/* the update is done by new calculation according to */
+/* vertical_offset and horizontal_offset */
+static inline void _ipu_ch_offset_update(struct ipu_soc *ipu,
+ int ch,
+ uint32_t pixel_fmt,
+ uint32_t width,
+ uint32_t height,
+ uint32_t stride,
+ uint32_t u,
+ uint32_t v,
+ uint32_t uv_stride,
+ uint32_t vertical_offset,
+ uint32_t horizontal_offset)
+{
+ uint32_t u_offset = 0;
+ uint32_t v_offset = 0;
+ uint32_t old_offset = 0;
+ int32_t sub_ch = 0;
+ int ret;
+
+ ret = __ipu_ch_offset_calc(pixel_fmt, width, height, stride,
+ u, v, uv_stride,
+ vertical_offset, horizontal_offset,
+ &u_offset, &v_offset);
+ if (ret) {
+ dev_err(ipu->dev, "mxc ipu: unimplemented pixel format\n");
+ return;
+ }
+
+ /* UBO and VBO are 22-bit and 8-byte aligned */
+ if (u_offset/8 > 0x3fffff)
+ dev_warn(ipu->dev,
+ "IDMAC%d's U offset exceeds IPU limitation\n", ch);
+ if (v_offset/8 > 0x3fffff)
+ dev_warn(ipu->dev,
+ "IDMAC%d's V offset exceeds IPU limitation\n", ch);
+ if (u_offset%8)
+ dev_warn(ipu->dev,
+ "IDMAC%d's U offset is not 8-byte aligned\n", ch);
+ if (v_offset%8)
+ dev_warn(ipu->dev,
+ "IDMAC%d's V offset is not 8-byte aligned\n", ch);
+
+ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22);
+ if (old_offset != u_offset / 8)
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22, u_offset / 8);
+ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22);
+ if (old_offset != v_offset / 8)
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22, v_offset / 8);
+
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (sub_ch <= 0)
+ return;
+ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 46, 22);
+ if (old_offset != u_offset / 8)
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 46, 22, u_offset / 8);
+ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 68, 22);
+ if (old_offset != v_offset / 8)
+ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 68, 22, v_offset / 8);
+};
+
+static inline void _ipu_ch_params_set_alpha_width(struct ipu_soc *ipu, uint32_t ch, int alpha_width)
+{
+ int32_t sub_ch = 0;
+
+ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch), 1, 125, 3, alpha_width - 1);
+
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (sub_ch <= 0)
+ return;
+ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 125, 3, alpha_width - 1);
+};
+
+static inline void _ipu_ch_param_set_bandmode(struct ipu_soc *ipu,
+ uint32_t ch, uint32_t band_height)
+{
+ int32_t sub_ch = 0;
+
+ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch),
+ 0, 114, 3, band_height - 1);
+ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
+ if (sub_ch <= 0)
+ return;
+ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch),
+ 0, 114, 3, band_height - 1);
+
+ dev_dbg(ipu->dev, "BNDM 0x%x, ",
+ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 114, 3));
+}
+
+/*
+ * The IPUv3 IDMAC has a bug to read 32bpp pixels from a graphics plane
+ * whose alpha component is at the most significant 8 bits. The bug only
+ * impacts on cases in which the relevant separate alpha channel is enabled.
+ *
+ * Return true on bad alpha component position, otherwise, return false.
+ */
+static inline bool _ipu_ch_param_bad_alpha_pos(uint32_t pixel_fmt)
+{
+ switch (pixel_fmt) {
+ case IPU_PIX_FMT_BGRA32:
+ case IPU_PIX_FMT_BGR32:
+ case IPU_PIX_FMT_RGBA32:
+ case IPU_PIX_FMT_RGB32:
+ return true;
+ }
+
+ return false;
+}
+#endif
diff --git a/drivers/mxc/ipu3/ipu_pixel_clk.c b/drivers/mxc/ipu3/ipu_pixel_clk.c
new file mode 100644
index 000000000000..4efb6b7d909a
--- /dev/null
+++ b/drivers/mxc/ipu3/ipu_pixel_clk.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2013-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file ipu_pixel_clk.c
+ *
+ * @brief IPU pixel clock implementation
+ *
+ * @ingroup IPU
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ipu-v3.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "ipu_prv.h"
+#include "ipu_regs.h"
+
+ /*
+ * muxd clock implementation
+ */
+struct clk_di_mux {
+ struct clk_hw hw;
+ u8 ipu_id;
+ u8 di_id;
+ u8 flags;
+ u8 index;
+};
+#define to_clk_di_mux(_hw) container_of(_hw, struct clk_di_mux, hw)
+
+static int _ipu_pixel_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_di_mux *mux = to_clk_di_mux(hw);
+ struct ipu_soc *ipu = ipu_get_soc(mux->ipu_id);
+ u32 di_gen;
+
+ di_gen = ipu_di_read(ipu, mux->di_id, DI_GENERAL);
+ if (index == 0)
+ /* ipu1_clk or ipu2_clk internal clk */
+ di_gen &= ~DI_GEN_DI_CLK_EXT;
+ else
+ di_gen |= DI_GEN_DI_CLK_EXT;
+
+ ipu_di_write(ipu, mux->di_id, di_gen, DI_GENERAL);
+ mux->index = index;
+ pr_debug("ipu_pixel_clk: di_clk_ext:0x%x, di_gen reg:0x%x.\n",
+ !(di_gen & DI_GEN_DI_CLK_EXT), di_gen);
+ return 0;
+}
+
+static u8 _ipu_pixel_clk_get_parent(struct clk_hw *hw)
+{
+ struct clk_di_mux *mux = to_clk_di_mux(hw);
+
+ return mux->index;
+}
+
+const struct clk_ops clk_mux_di_ops = {
+ .get_parent = _ipu_pixel_clk_get_parent,
+ .set_parent = _ipu_pixel_clk_set_parent,
+};
+
+struct clk *clk_register_mux_pix_clk(struct device *dev, const char *name,
+ const char **parent_names, u8 num_parents, unsigned long flags,
+ u8 ipu_id, u8 di_id, u8 clk_mux_flags)
+{
+ struct clk_di_mux *mux;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ mux = kzalloc(sizeof(struct clk_di_mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_mux_di_ops;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ mux->ipu_id = ipu_id;
+ mux->di_id = di_id;
+ mux->flags = clk_mux_flags | CLK_SET_RATE_PARENT;
+ mux->hw.init = &init;
+
+ clk = clk_register(dev, &mux->hw);
+ if (IS_ERR(clk))
+ kfree(mux);
+
+ return clk;
+}
+
+/*
+ * Gated clock implementation
+ */
+struct clk_di_div {
+ struct clk_hw hw;
+ u8 ipu_id;
+ u8 di_id;
+ u8 flags;
+};
+#define to_clk_di_div(_hw) container_of(_hw, struct clk_di_div, hw)
+
+static unsigned long _ipu_pixel_clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_di_div *di_div = to_clk_di_div(hw);
+ struct ipu_soc *ipu = ipu_get_soc(di_div->ipu_id);
+ u32 div;
+ u64 final_rate = (unsigned long long)parent_rate * 16;
+
+ _ipu_get(ipu);
+ div = ipu_di_read(ipu, di_div->di_id, DI_BS_CLKGEN0);
+ _ipu_put(ipu);
+ pr_debug("ipu_di%d read BS_CLKGEN0 div:%d, final_rate:%lld, prate:%ld\n",
+ di_div->di_id, div, final_rate, parent_rate);
+
+ if (div == 0)
+ return 0;
+ do_div(final_rate, div);
+
+ return (unsigned long)final_rate;
+}
+
+static long _ipu_pixel_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_clk_rate)
+{
+ u64 div, final_rate;
+ u32 remainder;
+ u64 parent_rate = (unsigned long long)(*parent_clk_rate) * 16;
+
+ /*
+ * Calculate divider
+ * Fractional part is 4 bits,
+ * so simply multiply by 2^4 to get fractional part.
+ */
+ div = parent_rate;
+ remainder = do_div(div, rate);
+ /* Round the divider value */
+ if (remainder > (rate/2))
+ div++;
+ if (div < 0x10) /* Min DI disp clock divider is 1 */
+ div = 0x10;
+ if (div & ~0xFEF)
+ div &= 0xFF8;
+ else {
+ /* Round up divider if it gets us closer to desired pix clk */
+ if ((div & 0xC) == 0xC) {
+ div += 0x10;
+ div &= ~0xF;
+ }
+ }
+ final_rate = parent_rate;
+ do_div(final_rate, div);
+
+ return final_rate;
+}
+
+static int _ipu_pixel_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_clk_rate)
+{
+ struct clk_di_div *di_div = to_clk_di_div(hw);
+ struct ipu_soc *ipu = ipu_get_soc(di_div->ipu_id);
+ u64 div, parent_rate;
+ u32 remainder;
+
+ parent_rate = (unsigned long long)parent_clk_rate * 16;
+ div = parent_rate;
+ remainder = do_div(div, rate);
+ /* Round the divider value */
+ if (remainder > (rate/2))
+ div++;
+
+ /* Round up divider if it gets us closer to desired pix clk */
+ if ((div & 0xC) == 0xC) {
+ div += 0x10;
+ div &= ~0xF;
+ }
+ if (div > 0x1000)
+ pr_err("Overflow, di:%d, DI_BS_CLKGEN0 div:0x%x\n",
+ di_div->di_id, (u32)div);
+ _ipu_get(ipu);
+ ipu_di_write(ipu, di_div->di_id, (u32)div, DI_BS_CLKGEN0);
+
+ /* Setup pixel clock timing */
+ /* FIXME: needs to be more flexible */
+ /* Down time is half of period */
+ ipu_di_write(ipu, di_div->di_id, ((u32)div / 16) << 16, DI_BS_CLKGEN1);
+ _ipu_put(ipu);
+
+ return 0;
+}
+
+static struct clk_ops clk_div_ops = {
+ .recalc_rate = _ipu_pixel_clk_div_recalc_rate,
+ .round_rate = _ipu_pixel_clk_div_round_rate,
+ .set_rate = _ipu_pixel_clk_div_set_rate,
+};
+
+struct clk *clk_register_div_pix_clk(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ u8 ipu_id, u8 di_id, u8 clk_div_flags)
+{
+ struct clk_di_div *di_div;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ di_div = kzalloc(sizeof(struct clk_di_div), GFP_KERNEL);
+ if (!di_div)
+ return ERR_PTR(-ENOMEM);
+
+ /* struct clk_di_div assignments */
+ di_div->ipu_id = ipu_id;
+ di_div->di_id = di_id;
+ di_div->flags = clk_div_flags;
+
+ init.name = name;
+ init.ops = &clk_div_ops;
+ init.flags = flags | CLK_SET_RATE_PARENT;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ di_div->hw.init = &init;
+
+ clk = clk_register(dev, &di_div->hw);
+ if (IS_ERR(clk))
+ kfree(clk);
+
+ return clk;
+}
+
+/*
+ * Gated clock implementation
+ */
+struct clk_di_gate {
+ struct clk_hw hw;
+ u8 ipu_id;
+ u8 di_id;
+ u8 flags;
+};
+#define to_clk_di_gate(_hw) container_of(_hw, struct clk_di_gate, hw)
+
+static int _ipu_pixel_clk_enable(struct clk_hw *hw)
+{
+ struct clk_di_gate *gate = to_clk_di_gate(hw);
+ struct ipu_soc *ipu = ipu_get_soc(gate->ipu_id);
+ u32 disp_gen;
+
+ disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
+ disp_gen |= gate->di_id ? DI1_COUNTER_RELEASE : DI0_COUNTER_RELEASE;
+ ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
+
+ return 0;
+}
+
+static void _ipu_pixel_clk_disable(struct clk_hw *hw)
+{
+ struct clk_di_gate *gate = to_clk_di_gate(hw);
+ struct ipu_soc *ipu = ipu_get_soc(gate->ipu_id);
+ u32 disp_gen;
+
+ disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
+ disp_gen &= gate->di_id ? ~DI1_COUNTER_RELEASE : ~DI0_COUNTER_RELEASE;
+ ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
+
+}
+
+
+static struct clk_ops clk_gate_di_ops = {
+ .enable = _ipu_pixel_clk_enable,
+ .disable = _ipu_pixel_clk_disable,
+};
+
+struct clk *clk_register_gate_pix_clk(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ u8 ipu_id, u8 di_id, u8 clk_gate_flags)
+{
+ struct clk_di_gate *gate;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ gate = kzalloc(sizeof(struct clk_di_gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->ipu_id = ipu_id;
+ gate->di_id = di_id;
+ gate->flags = clk_gate_flags;
+
+ init.name = name;
+ init.ops = &clk_gate_di_ops;
+ init.flags = flags | CLK_SET_RATE_PARENT;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ gate->hw.init = &init;
+
+ clk = clk_register(dev, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(clk);
+
+ return clk;
+}
diff --git a/drivers/mxc/ipu3/ipu_prv.h b/drivers/mxc/ipu3/ipu_prv.h
new file mode 100644
index 000000000000..0a77b580d6a1
--- /dev/null
+++ b/drivers/mxc/ipu3/ipu_prv.h
@@ -0,0 +1,368 @@
+/*
+ * Copyright 2005-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#ifndef __INCLUDE_IPU_PRV_H__
+#define __INCLUDE_IPU_PRV_H__
+
+#include <linux/clkdev.h>
+#include <linux/device.h>
+#include <linux/fsl_devices.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#define MXC_IPU_MAX_NUM 2
+#define MXC_DI_NUM_PER_IPU 2
+
+/* Globals */
+extern int dmfc_type_setup;
+
+#define IDMA_CHAN_INVALID 0xFF
+#define HIGH_RESOLUTION_WIDTH 1024
+
+enum ipuv3_type {
+ IPUv3D, /* i.MX37 */
+ IPUv3EX, /* i.MX51 */
+ IPUv3M, /* i.MX53 */
+ IPUv3H, /* i.MX6Q/SDL */
+};
+
+#define IPU_MAX_VDI_IN_WIDTH(type) ({ (type) >= IPUv3M ? 968 : 720; })
+
+struct ipu_irq_node {
+ irqreturn_t(*handler) (int, void *); /*!< the ISR */
+ const char *name; /*!< device associated with the interrupt */
+ void *dev_id; /*!< some unique information for the ISR */
+ __u32 flags; /*!< not used */
+};
+
+enum csc_type_t {
+ RGB2YUV = 0,
+ YUV2RGB,
+ RGB2RGB,
+ YUV2YUV,
+ CSC_NONE,
+ CSC_NUM
+};
+
+struct ipu_soc {
+ unsigned int id;
+ unsigned int devtype;
+ bool online;
+
+ /*clk*/
+ struct clk *ipu_clk;
+ struct clk *di_clk[2];
+ struct clk *di_clk_sel[2];
+ struct clk *pixel_clk[2];
+ bool pixel_clk_en[2];
+ struct clk *pixel_clk_sel[2];
+ struct clk *csi_clk[2];
+ struct clk *prg_clk;
+
+ /*irq*/
+ int irq_sync;
+ int irq_err;
+ struct ipu_irq_node irq_list[IPU_IRQ_COUNT];
+
+ /*reg*/
+ void __iomem *cm_reg;
+ void __iomem *idmac_reg;
+ void __iomem *dp_reg;
+ void __iomem *ic_reg;
+ void __iomem *dc_reg;
+ void __iomem *dc_tmpl_reg;
+ void __iomem *dmfc_reg;
+ void __iomem *di_reg[2];
+ void __iomem *smfc_reg;
+ void __iomem *csi_reg[2];
+ void __iomem *cpmem_base;
+ void __iomem *tpmem_base;
+ void __iomem *vdi_reg;
+
+ struct device *dev;
+
+ ipu_channel_t csi_channel[2];
+ ipu_channel_t using_ic_dirct_ch;
+ unsigned char dc_di_assignment[10];
+ bool sec_chan_en[24];
+ bool thrd_chan_en[24];
+ bool chan_is_interlaced[52];
+ uint32_t channel_init_mask;
+ uint32_t channel_enable_mask;
+
+ /*use count*/
+ int dc_use_count;
+ int dp_use_count;
+ int dmfc_use_count;
+ int smfc_use_count;
+ int ic_use_count;
+ int rot_use_count;
+ int vdi_use_count;
+ int di_use_count[2];
+ int csi_use_count[2];
+
+ struct mutex mutex_lock;
+ spinlock_t int_reg_spin_lock;
+ spinlock_t rdy_reg_spin_lock;
+
+ int dmfc_size_28;
+ int dmfc_size_29;
+ int dmfc_size_24;
+ int dmfc_size_27;
+ int dmfc_size_23;
+
+ enum csc_type_t fg_csc_type;
+ enum csc_type_t bg_csc_type;
+ bool color_key_4rgb;
+ bool dc_swap;
+ struct completion dc_comp;
+ struct completion csi_comp;
+
+ struct rot_mem {
+ void *vaddr;
+ dma_addr_t paddr;
+ int size;
+ } rot_dma[2];
+
+ int vdoa_en;
+ struct task_struct *thread[2];
+
+ /*
+ * Bypass reset to avoid display channel being
+ * stopped by probe since it may starts to work
+ * in bootloader.
+ */
+ bool bypass_reset;
+
+ unsigned int ch0123_axi;
+ unsigned int ch23_axi;
+ unsigned int ch27_axi;
+ unsigned int ch28_axi;
+ unsigned int normal_axi;
+
+ bool smfc_idmac_12bit_3planar_bs_fixup; /* workaround little stripes */
+};
+
+struct ipu_channel {
+ u8 video_in_dma;
+ u8 alpha_in_dma;
+ u8 graph_in_dma;
+ u8 out_dma;
+};
+
+enum ipu_dmfc_type {
+ DMFC_NORMAL = 0,
+ DMFC_HIGH_RESOLUTION_DC,
+ DMFC_HIGH_RESOLUTION_DP,
+ DMFC_HIGH_RESOLUTION_ONLY_DP,
+};
+
+static inline int _ipu_is_smfc_chan(uint32_t dma_chan)
+{
+ return ((dma_chan >= 0) && (dma_chan <= 3));
+}
+
+static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->cm_reg + offset);
+}
+
+static inline void ipu_cm_write(struct ipu_soc *ipu,
+ u32 value, unsigned offset)
+{
+ writel(value, ipu->cm_reg + offset);
+}
+
+static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->idmac_reg + offset);
+}
+
+static inline void ipu_idmac_write(struct ipu_soc *ipu,
+ u32 value, unsigned offset)
+{
+ writel(value, ipu->idmac_reg + offset);
+}
+
+static inline u32 ipu_dc_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->dc_reg + offset);
+}
+
+static inline void ipu_dc_write(struct ipu_soc *ipu,
+ u32 value, unsigned offset)
+{
+ writel(value, ipu->dc_reg + offset);
+}
+
+static inline u32 ipu_dc_tmpl_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->dc_tmpl_reg + offset);
+}
+
+static inline void ipu_dc_tmpl_write(struct ipu_soc *ipu,
+ u32 value, unsigned offset)
+{
+ writel(value, ipu->dc_tmpl_reg + offset);
+}
+
+static inline u32 ipu_dmfc_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->dmfc_reg + offset);
+}
+
+static inline void ipu_dmfc_write(struct ipu_soc *ipu,
+ u32 value, unsigned offset)
+{
+ writel(value, ipu->dmfc_reg + offset);
+}
+
+static inline u32 ipu_dp_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->dp_reg + offset);
+}
+
+static inline void ipu_dp_write(struct ipu_soc *ipu,
+ u32 value, unsigned offset)
+{
+ writel(value, ipu->dp_reg + offset);
+}
+
+static inline u32 ipu_di_read(struct ipu_soc *ipu, int di, unsigned offset)
+{
+ return readl(ipu->di_reg[di] + offset);
+}
+
+static inline void ipu_di_write(struct ipu_soc *ipu, int di,
+ u32 value, unsigned offset)
+{
+ writel(value, ipu->di_reg[di] + offset);
+}
+
+static inline u32 ipu_csi_read(struct ipu_soc *ipu, int csi, unsigned offset)
+{
+ return readl(ipu->csi_reg[csi] + offset);
+}
+
+static inline void ipu_csi_write(struct ipu_soc *ipu, int csi,
+ u32 value, unsigned offset)
+{
+ writel(value, ipu->csi_reg[csi] + offset);
+}
+
+static inline u32 ipu_smfc_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->smfc_reg + offset);
+}
+
+static inline void ipu_smfc_write(struct ipu_soc *ipu,
+ u32 value, unsigned offset)
+{
+ writel(value, ipu->smfc_reg + offset);
+}
+
+static inline u32 ipu_vdi_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->vdi_reg + offset);
+}
+
+static inline void ipu_vdi_write(struct ipu_soc *ipu,
+ u32 value, unsigned offset)
+{
+ writel(value, ipu->vdi_reg + offset);
+}
+
+static inline u32 ipu_ic_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->ic_reg + offset);
+}
+
+static inline void ipu_ic_write(struct ipu_soc *ipu,
+ u32 value, unsigned offset)
+{
+ writel(value, ipu->ic_reg + offset);
+}
+
+int register_ipu_device(struct ipu_soc *ipu, int id);
+void unregister_ipu_device(struct ipu_soc *ipu, int id);
+ipu_color_space_t format_to_colorspace(uint32_t fmt);
+bool ipu_pixel_format_has_alpha(uint32_t fmt);
+
+void ipu_dump_registers(struct ipu_soc *ipu);
+
+uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel);
+
+void ipu_disp_init(struct ipu_soc *ipu);
+void _ipu_init_dc_mappings(struct ipu_soc *ipu);
+int _ipu_dp_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t in_pixel_fmt,
+ uint32_t out_pixel_fmt);
+void _ipu_dp_uninit(struct ipu_soc *ipu, ipu_channel_t channel);
+void _ipu_dc_init(struct ipu_soc *ipu, int dc_chan, int di, bool interlaced, uint32_t pixel_fmt);
+void _ipu_dc_uninit(struct ipu_soc *ipu, int dc_chan);
+void _ipu_dp_dc_enable(struct ipu_soc *ipu, ipu_channel_t channel);
+void _ipu_dp_dc_disable(struct ipu_soc *ipu, ipu_channel_t channel, bool swap);
+void _ipu_dmfc_init(struct ipu_soc *ipu, int dmfc_type, int first);
+void _ipu_dmfc_set_wait4eot(struct ipu_soc *ipu, int dma_chan, int width);
+void _ipu_dmfc_set_burst_size(struct ipu_soc *ipu, int dma_chan, int burst_size);
+int _ipu_disp_chan_is_interlaced(struct ipu_soc *ipu, ipu_channel_t channel);
+
+void _ipu_ic_enable_task(struct ipu_soc *ipu, ipu_channel_t channel);
+void _ipu_ic_disable_task(struct ipu_soc *ipu, ipu_channel_t channel);
+int _ipu_ic_init_prpvf(struct ipu_soc *ipu, ipu_channel_params_t *params,
+ bool src_is_csi);
+void _ipu_vdi_init(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params);
+void _ipu_vdi_uninit(struct ipu_soc *ipu);
+void _ipu_ic_uninit_prpvf(struct ipu_soc *ipu);
+void _ipu_ic_init_rotate_vf(struct ipu_soc *ipu, ipu_channel_params_t *params);
+void _ipu_ic_uninit_rotate_vf(struct ipu_soc *ipu);
+void _ipu_ic_init_csi(struct ipu_soc *ipu, ipu_channel_params_t *params);
+void _ipu_ic_uninit_csi(struct ipu_soc *ipu);
+int _ipu_ic_init_prpenc(struct ipu_soc *ipu, ipu_channel_params_t *params,
+ bool src_is_csi);
+void _ipu_ic_uninit_prpenc(struct ipu_soc *ipu);
+void _ipu_ic_init_rotate_enc(struct ipu_soc *ipu, ipu_channel_params_t *params);
+void _ipu_ic_uninit_rotate_enc(struct ipu_soc *ipu);
+int _ipu_ic_init_pp(struct ipu_soc *ipu, ipu_channel_params_t *params);
+void _ipu_ic_uninit_pp(struct ipu_soc *ipu);
+void _ipu_ic_init_rotate_pp(struct ipu_soc *ipu, ipu_channel_params_t *params);
+void _ipu_ic_uninit_rotate_pp(struct ipu_soc *ipu);
+int _ipu_ic_idma_init(struct ipu_soc *ipu, int dma_chan, uint16_t width, uint16_t height,
+ int burst_size, ipu_rotate_mode_t rot);
+void _ipu_vdi_toggle_top_field_man(struct ipu_soc *ipu);
+int _ipu_csi_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t csi);
+int _ipu_csi_set_mipi_di(struct ipu_soc *ipu, uint32_t num, uint32_t di_val, uint32_t csi);
+void ipu_csi_set_test_generator(struct ipu_soc *ipu, bool active, uint32_t r_value,
+ uint32_t g_value, uint32_t b_value,
+ uint32_t pix_clk, uint32_t csi);
+void _ipu_csi_ccir_err_detection_enable(struct ipu_soc *ipu, uint32_t csi);
+void _ipu_csi_ccir_err_detection_disable(struct ipu_soc *ipu, uint32_t csi);
+void _ipu_csi_wait4eof(struct ipu_soc *ipu, ipu_channel_t channel);
+void _ipu_smfc_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t mipi_id, uint32_t csi);
+void _ipu_smfc_set_burst_size(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t bs);
+void _ipu_dp_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3]);
+int32_t _ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
+ int16_t x_pos, int16_t y_pos);
+int32_t _ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
+ int16_t *x_pos, int16_t *y_pos);
+void _ipu_get(struct ipu_soc *ipu);
+void _ipu_put(struct ipu_soc *ipu);
+
+struct clk *clk_register_mux_pix_clk(struct device *dev, const char *name,
+ const char **parent_names, u8 num_parents, unsigned long flags,
+ u8 ipu_id, u8 di_id, u8 clk_mux_flags);
+struct clk *clk_register_div_pix_clk(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ u8 ipu_id, u8 di_id, u8 clk_div_flags);
+struct clk *clk_register_gate_pix_clk(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ u8 ipu_id, u8 di_id, u8 clk_gate_flags);
+#endif /* __INCLUDE_IPU_PRV_H__ */
diff --git a/drivers/mxc/ipu3/ipu_regs.h b/drivers/mxc/ipu3/ipu_regs.h
new file mode 100644
index 000000000000..79051cf5f229
--- /dev/null
+++ b/drivers/mxc/ipu3/ipu_regs.h
@@ -0,0 +1,702 @@
+/*
+ * Copyright (C) 2005-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*
+ * @file ipu_regs.h
+ *
+ * @brief IPU Register definitions
+ *
+ * @ingroup IPU
+ */
+#ifndef __IPU_REGS_INCLUDED__
+#define __IPU_REGS_INCLUDED__
+
+#include "ipu_prv.h"
+
+#define IPU_MCU_T_DEFAULT 8
+
+/* Register addresses */
+/* IPU Common registers */
+#define IPU_CM_REG(offset) (offset)
+
+#define IPU_CONF IPU_CM_REG(0)
+#define IPU_SRM_PRI1 IPU_CM_REG(0x00A0)
+#define IPU_SRM_PRI2 IPU_CM_REG(0x00A4)
+#define IPU_FS_PROC_FLOW1 IPU_CM_REG(0x00A8)
+#define IPU_FS_PROC_FLOW2 IPU_CM_REG(0x00AC)
+#define IPU_FS_PROC_FLOW3 IPU_CM_REG(0x00B0)
+#define IPU_FS_DISP_FLOW1 IPU_CM_REG(0x00B4)
+#define IPU_FS_DISP_FLOW2 IPU_CM_REG(0x00B8)
+#define IPU_SKIP IPU_CM_REG(0x00BC)
+#define IPU_DISP_ALT_CONF IPU_CM_REG(0x00C0)
+#define IPU_DISP_GEN IPU_CM_REG(0x00C4)
+#define IPU_DISP_ALT1 IPU_CM_REG(0x00C8)
+#define IPU_DISP_ALT2 IPU_CM_REG(0x00CC)
+#define IPU_DISP_ALT3 IPU_CM_REG(0x00D0)
+#define IPU_DISP_ALT4 IPU_CM_REG(0x00D4)
+#define IPU_SNOOP IPU_CM_REG(0x00D8)
+#define IPU_MEM_RST IPU_CM_REG(0x00DC)
+#define IPU_PM IPU_CM_REG(0x00E0)
+#define IPU_GPR IPU_CM_REG(0x00E4)
+#define IPU_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0150 + 4 * ((ch) / 32))
+#define IPU_ALT_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0168 + 4 * ((ch) / 32))
+/*
+ * IPUv3D doesn't support triple buffer, so point
+ * IPU_CHA_TRB_MODE_SEL, IPU_CHA_TRIPLE_CUR_BUF and
+ * IPU_CHA_BUF2_RDY to readonly
+ * IPU_ALT_CUR_BUF0 for IPUv3D.
+ */
+#define IPU_CHA_TRB_MODE_SEL(type, ch) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0178 + 4 * ((ch) / 32)) : \
+ (0x012C); })
+#define IPU_CHA_TRIPLE_CUR_BUF(type, ch) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0258 + \
+ 4 * (((ch) * 2) / 32)) : \
+ (0x012C); })
+#define IPU_CHA_BUF2_RDY(type, ch) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0288 + 4 * ((ch) / 32)) : \
+ (0x012C); })
+#define IPU_CHA_CUR_BUF(type, ch) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x023C + 4 * ((ch) / 32)) : \
+ (0x0124 + 4 * ((ch) / 32)); })
+#define IPU_ALT_CUR_BUF0(type) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0244) : \
+ (0x012C); })
+#define IPU_ALT_CUR_BUF1(type) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0248) : \
+ (0x0130); })
+#define IPU_SRM_STAT(type) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x024C) : \
+ (0x0134); })
+#define IPU_PROC_TASK_STAT(type) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0250) : \
+ (0x0138); })
+#define IPU_DISP_TASK_STAT(type) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0254) : \
+ (0x013C); })
+#define IPU_CHA_BUF0_RDY(type, ch) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0268 + 4 * ((ch) / 32)) : \
+ (0x0140 + 4 * ((ch) / 32)); })
+#define IPU_CHA_BUF1_RDY(type, ch) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0270 + 4 * ((ch) / 32)) : \
+ (0x0148 + 4 * ((ch) / 32)); })
+#define IPU_ALT_CHA_BUF0_RDY(type, ch) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0278 + 4 * ((ch) / 32)) : \
+ (0x0158 + 4 * ((ch) / 32)); })
+#define IPU_ALT_CHA_BUF1_RDY(type, ch) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0280 + 4 * ((ch) / 32)) : \
+ (0x0160 + 4 * ((ch) / 32)); })
+
+#define IPU_INT_CTRL(n) IPU_CM_REG(0x003C + 4 * ((n) - 1))
+#define IPU_INT_STAT(type, n) IPU_CM_REG({(type) >= IPUv3EX ? \
+ (0x0200 + 4 * ((n) - 1)) : \
+ (0x00E8 + 4 * ((n) - 1)); })
+
+#define IPUIRQ_2_STATREG(type, irq) IPU_CM_REG(IPU_INT_STAT((type), 1) + 4 \
+ * ((irq) / 32))
+#define IPUIRQ_2_CTRLREG(irq) IPU_CM_REG(IPU_INT_CTRL(1) + 4 * ((irq) / 32))
+#define IPUIRQ_2_MASK(irq) (1UL << ((irq) & 0x1F))
+
+/* IPU VDI registers */
+#define IPU_VDI_REG(offset) (offset)
+
+#define VDI_FSIZE IPU_VDI_REG(0)
+#define VDI_C IPU_VDI_REG(0x0004)
+
+/* IPU CSI Registers */
+#define IPU_CSI_REG(offset) (offset)
+
+#define CSI_SENS_CONF IPU_CSI_REG(0)
+#define CSI_SENS_FRM_SIZE IPU_CSI_REG(0x0004)
+#define CSI_ACT_FRM_SIZE IPU_CSI_REG(0x0008)
+#define CSI_OUT_FRM_CTRL IPU_CSI_REG(0x000C)
+#define CSI_TST_CTRL IPU_CSI_REG(0x0010)
+#define CSI_CCIR_CODE_1 IPU_CSI_REG(0x0014)
+#define CSI_CCIR_CODE_2 IPU_CSI_REG(0x0018)
+#define CSI_CCIR_CODE_3 IPU_CSI_REG(0x001C)
+#define CSI_MIPI_DI IPU_CSI_REG(0x0020)
+#define CSI_SKIP IPU_CSI_REG(0x0024)
+#define CSI_CPD_CTRL IPU_CSI_REG(0x0028)
+#define CSI_CPD_RC(n) IPU_CSI_REG(0x002C + 4 * (n))
+#define CSI_CPD_RS(n) IPU_CSI_REG(0x004C + 4 * (n))
+#define CSI_CPD_GRC(n) IPU_CSI_REG(0x005C + 4 * (n))
+#define CSI_CPD_GRS(n) IPU_CSI_REG(0x007C + 4 * (n))
+#define CSI_CPD_GBC(n) IPU_CSI_REG(0x008C + 4 * (n))
+#define CSI_CPD_GBS(n) IPU_CSI_REG(0x00AC + 4 * (n))
+#define CSI_CPD_BC(n) IPU_CSI_REG(0x00BC + 4 * (n))
+#define CSI_CPD_BS(n) IPU_CSI_REG(0x00DC + 4 * (n))
+#define CSI_CPD_OFFSET1 IPU_CSI_REG(0x00EC)
+#define CSI_CPD_OFFSET2 IPU_CSI_REG(0x00F0)
+
+/* IPU SMFC Registers */
+#define IPU_SMFC_REG(offset) (offset)
+
+#define SMFC_MAP IPU_SMFC_REG(0)
+#define SMFC_WMC IPU_SMFC_REG(0x0004)
+#define SMFC_BS IPU_SMFC_REG(0x0008)
+
+/* IPU IC Registers */
+#define IPU_IC_REG(offset) (offset)
+
+#define IC_CONF IPU_IC_REG(0)
+#define IC_PRP_ENC_RSC IPU_IC_REG(0x0004)
+#define IC_PRP_VF_RSC IPU_IC_REG(0x0008)
+#define IC_PP_RSC IPU_IC_REG(0x000C)
+#define IC_CMBP_1 IPU_IC_REG(0x0010)
+#define IC_CMBP_2 IPU_IC_REG(0x0014)
+#define IC_IDMAC_1 IPU_IC_REG(0x0018)
+#define IC_IDMAC_2 IPU_IC_REG(0x001C)
+#define IC_IDMAC_3 IPU_IC_REG(0x0020)
+#define IC_IDMAC_4 IPU_IC_REG(0x0024)
+
+/* IPU IDMAC Registers */
+#define IPU_IDMAC_REG(offset) (offset)
+
+#define IDMAC_CONF IPU_IDMAC_REG(0x0000)
+#define IDMAC_CHA_EN(ch) IPU_IDMAC_REG(0x0004 + 4 * ((ch) / 32))
+#define IDMAC_SEP_ALPHA IPU_IDMAC_REG(0x000C)
+#define IDMAC_ALT_SEP_ALPHA IPU_IDMAC_REG(0x0010)
+#define IDMAC_CHA_PRI(ch) IPU_IDMAC_REG(0x0014 + 4 * ((ch) / 32))
+#define IDMAC_WM_EN(ch) IPU_IDMAC_REG(0x001C + 4 * ((ch) / 32))
+#define IDMAC_CH_LOCK_EN_1(type) IPU_IDMAC_REG({(type) >= IPUv3EX ? \
+ (0x0024) : 0; })
+#define IDMAC_CH_LOCK_EN_2(type) IPU_IDMAC_REG({(type) >= IPUv3EX ? \
+ (0x0028) : \
+ (0x0024); })
+#define IDMAC_SUB_ADDR_0(type) IPU_IDMAC_REG({(type) >= IPUv3EX ? \
+ (0x002C) : \
+ (0x0028); })
+#define IDMAC_SUB_ADDR_1(type) IPU_IDMAC_REG({(type) >= IPUv3EX ? \
+ (0x0030) : \
+ (0x002C); })
+#define IDMAC_SUB_ADDR_2(type) IPU_IDMAC_REG({(type) >= IPUv3EX ? \
+ (0x0034) : \
+ (0x0030); })
+/*
+ * IPUv3D doesn't support IDMAC_SUB_ADDR_3 and IDMAC_SUB_ADDR_4,
+ * so point them to readonly IDMAC_CHA_BUSY1 for IPUv3D.
+ */
+#define IDMAC_SUB_ADDR_3(type) IPU_IDMAC_REG({(type) >= IPUv3EX ? \
+ (0x0038) : \
+ (0x0040); })
+#define IDMAC_SUB_ADDR_4(type) IPU_IDMAC_REG({(type) >= IPUv3EX ? \
+ (0x003C) : \
+ (0x0040); })
+#define IDMAC_BAND_EN(type, ch) IPU_IDMAC_REG({(type) >= IPUv3EX ? \
+ (0x0040 + 4 * ((ch) / 32)) : \
+ (0x0034 + 4 * ((ch) / 32)); })
+#define IDMAC_CHA_BUSY(type, ch) IPU_IDMAC_REG({(type) >= IPUv3EX ? \
+ (0x0100 + 4 * ((ch) / 32)) : \
+ (0x0040 + 4 * ((ch) / 32)); })
+
+/* IPU DI Registers */
+#define IPU_DI_REG(offset) (offset)
+
+#define DI_GENERAL IPU_DI_REG(0)
+#define DI_BS_CLKGEN0 IPU_DI_REG(0x0004)
+#define DI_BS_CLKGEN1 IPU_DI_REG(0x0008)
+#define DI_SW_GEN0(gen) IPU_DI_REG(0x000C + 4 * ((gen) - 1))
+#define DI_SW_GEN1(gen) IPU_DI_REG(0x0030 + 4 * ((gen) - 1))
+#define DI_STP_REP(gen) IPU_DI_REG(0x0148 + 4 * (((gen) - 1) / 2))
+#define DI_SYNC_AS_GEN IPU_DI_REG(0x0054)
+#define DI_DW_GEN(gen) IPU_DI_REG(0x0058 + 4 * (gen))
+#define DI_DW_SET(gen, set) IPU_DI_REG(0x0088 + 4 * ((gen) + 0xC * (set)))
+#define DI_SER_CONF IPU_DI_REG(0x015C)
+#define DI_SSC IPU_DI_REG(0x0160)
+#define DI_POL IPU_DI_REG(0x0164)
+#define DI_AW0 IPU_DI_REG(0x0168)
+#define DI_AW1 IPU_DI_REG(0x016C)
+#define DI_SCR_CONF IPU_DI_REG(0x0170)
+#define DI_STAT IPU_DI_REG(0x0174)
+
+/* IPU DMFC Registers */
+#define IPU_DMFC_REG(offset) (offset)
+
+#define DMFC_RD_CHAN IPU_DMFC_REG(0)
+#define DMFC_WR_CHAN IPU_DMFC_REG(0x0004)
+#define DMFC_WR_CHAN_DEF IPU_DMFC_REG(0x0008)
+#define DMFC_DP_CHAN IPU_DMFC_REG(0x000C)
+#define DMFC_DP_CHAN_DEF IPU_DMFC_REG(0x0010)
+#define DMFC_GENERAL1 IPU_DMFC_REG(0x0014)
+#define DMFC_GENERAL2 IPU_DMFC_REG(0x0018)
+#define DMFC_IC_CTRL IPU_DMFC_REG(0x001C)
+#define DMFC_STAT IPU_DMFC_REG(0x0020)
+
+/* IPU DC Registers */
+#define IPU_DC_REG(offset) (offset)
+
+#define DC_MAP_CONF_PTR(n) IPU_DC_REG(0x0108 + ((n) & ~0x1) * 2)
+#define DC_MAP_CONF_VAL(n) IPU_DC_REG(0x0144 + ((n) & ~0x1) * 2)
+
+#define _RL_CH_2_OFFSET(ch) (((ch) == 0) ? 8 : ( \
+ ((ch) == 1) ? 0x24 : ( \
+ ((ch) == 2) ? 0x40 : ( \
+ ((ch) == 5) ? 0x64 : ( \
+ ((ch) == 6) ? 0x80 : ( \
+ ((ch) == 8) ? 0x9C : ( \
+ ((ch) == 9) ? 0xBC : (-1))))))))
+#define DC_RL_CH(ch, evt) IPU_DC_REG(_RL_CH_2_OFFSET(ch) + \
+ ((evt) & ~0x1) * 2)
+
+#define DC_EVT_NF 0
+#define DC_EVT_NL 1
+#define DC_EVT_EOF 2
+#define DC_EVT_NFIELD 3
+#define DC_EVT_EOL 4
+#define DC_EVT_EOFIELD 5
+#define DC_EVT_NEW_ADDR 6
+#define DC_EVT_NEW_CHAN 7
+#define DC_EVT_NEW_DATA 8
+
+#define DC_EVT_NEW_ADDR_W_0 0
+#define DC_EVT_NEW_ADDR_W_1 1
+#define DC_EVT_NEW_CHAN_W_0 2
+#define DC_EVT_NEW_CHAN_W_1 3
+#define DC_EVT_NEW_DATA_W_0 4
+#define DC_EVT_NEW_DATA_W_1 5
+#define DC_EVT_NEW_ADDR_R_0 6
+#define DC_EVT_NEW_ADDR_R_1 7
+#define DC_EVT_NEW_CHAN_R_0 8
+#define DC_EVT_NEW_CHAN_R_1 9
+#define DC_EVT_NEW_DATA_R_0 10
+#define DC_EVT_NEW_DATA_R_1 11
+#define DC_EVEN_UGDE0 12
+#define DC_ODD_UGDE0 13
+#define DC_EVEN_UGDE1 14
+#define DC_ODD_UGDE1 15
+#define DC_EVEN_UGDE2 16
+#define DC_ODD_UGDE2 17
+#define DC_EVEN_UGDE3 18
+#define DC_ODD_UGDE3 19
+
+#define dc_ch_offset(ch) \
+({ \
+ const u8 _offset[] = { \
+ 0, 0x1C, 0x38, 0x54, 0x58, 0x5C, 0x78, 0, 0x94, 0xB4}; \
+ _offset[ch]; \
+})
+#define DC_WR_CH_CONF(ch) IPU_DC_REG(dc_ch_offset(ch))
+#define DC_WR_CH_ADDR(ch) IPU_DC_REG(dc_ch_offset(ch) + 4)
+
+#define DC_WR_CH_CONF_1 IPU_DC_REG(0x001C)
+#define DC_WR_CH_ADDR_1 IPU_DC_REG(0x0020)
+#define DC_WR_CH_CONF_5 IPU_DC_REG(0x005C)
+#define DC_WR_CH_ADDR_5 IPU_DC_REG(0x0060)
+#define DC_GEN IPU_DC_REG(0x00D4)
+#define DC_DISP_CONF1(disp) IPU_DC_REG(0x00D8 + 4 * (disp))
+#define DC_DISP_CONF2(disp) IPU_DC_REG(0x00E8 + 4 * (disp))
+#define DC_STAT IPU_DC_REG(0x01C8)
+#define DC_UGDE_0(evt) IPU_DC_REG(0x0174 + 16 * (evt))
+#define DC_UGDE_1(evt) IPU_DC_REG(0x0178 + 16 * (evt))
+#define DC_UGDE_2(evt) IPU_DC_REG(0x017C + 16 * (evt))
+#define DC_UGDE_3(evt) IPU_DC_REG(0x0180 + 16 * (evt))
+
+/* IPU DP Registers */
+#define IPU_DP_REG(offset) (offset)
+
+#define DP_SYNC 0
+#define DP_ASYNC0 0x60
+#define DP_ASYNC1 0xBC
+#define DP_COM_CONF(flow) IPU_DP_REG(flow)
+#define DP_GRAPH_WIND_CTRL(flow) IPU_DP_REG(0x0004 + (flow))
+#define DP_FG_POS(flow) IPU_DP_REG(0x0008 + (flow))
+#define DP_GAMMA_C(flow, i) IPU_DP_REG(0x0014 + (flow) + 4 * (i))
+#define DP_GAMMA_S(flow, i) IPU_DP_REG(0x0034 + (flow) + 4 * (i))
+#define DP_CSC_A_0(flow) IPU_DP_REG(0x0044 + (flow))
+#define DP_CSC_A_1(flow) IPU_DP_REG(0x0048 + (flow))
+#define DP_CSC_A_2(flow) IPU_DP_REG(0x004C + (flow))
+#define DP_CSC_A_3(flow) IPU_DP_REG(0x0050 + (flow))
+#define DP_CSC_0(flow) IPU_DP_REG(0x0054 + (flow))
+#define DP_CSC_1(flow) IPU_DP_REG(0x0058 + (flow))
+
+enum {
+ IPU_CONF_CSI0_EN = 0x00000001,
+ IPU_CONF_CSI1_EN = 0x00000002,
+ IPU_CONF_IC_EN = 0x00000004,
+ IPU_CONF_ROT_EN = 0x00000008,
+ IPU_CONF_ISP_EN = 0x00000010,
+ IPU_CONF_DP_EN = 0x00000020,
+ IPU_CONF_DI0_EN = 0x00000040,
+ IPU_CONF_DI1_EN = 0x00000080,
+ IPU_CONF_DMFC_EN = 0x00000400,
+ IPU_CONF_SMFC_EN = 0x00000100,
+ IPU_CONF_DC_EN = 0x00000200,
+ IPU_CONF_VDI_EN = 0x00001000,
+ IPU_CONF_IDMAC_DIS = 0x00400000,
+ IPU_CONF_IC_DMFC_SEL = 0x02000000,
+ IPU_CONF_IC_DMFC_SYNC = 0x04000000,
+ IPU_CONF_VDI_DMFC_SYNC = 0x08000000,
+ IPU_CONF_CSI0_DATA_SOURCE = 0x10000000,
+ IPU_CONF_CSI0_DATA_SOURCE_OFFSET = 28,
+ IPU_CONF_CSI1_DATA_SOURCE = 0x20000000,
+ IPU_CONF_IC_INPUT = 0x40000000,
+ IPU_CONF_CSI_SEL = 0x80000000,
+
+ DI0_COUNTER_RELEASE = 0x01000000,
+ DI1_COUNTER_RELEASE = 0x02000000,
+
+ FS_PRPVF_ROT_SRC_SEL_MASK = 0x00000F00,
+ FS_PRPVF_ROT_SRC_SEL_OFFSET = 8,
+ FS_PRPENC_ROT_SRC_SEL_MASK = 0x0000000F,
+ FS_PRPENC_ROT_SRC_SEL_OFFSET = 0,
+ FS_PP_ROT_SRC_SEL_MASK = 0x000F0000,
+ FS_PP_ROT_SRC_SEL_OFFSET = 16,
+ FS_PP_SRC_SEL_MASK = 0x0000F000,
+ FS_PP_SRC_SEL_VDOA = 0x00008000,
+ FS_PP_SRC_SEL_OFFSET = 12,
+ FS_PRP_SRC_SEL_MASK = 0x0F000000,
+ FS_PRP_SRC_SEL_OFFSET = 24,
+ FS_VF_IN_VALID = 0x80000000,
+ FS_ENC_IN_VALID = 0x40000000,
+ FS_VDI_SRC_SEL_MASK = 0x30000000,
+ FS_VDI_SRC_SEL_VDOA = 0x20000000,
+ FS_VDOA_DEST_SEL_MASK = 0x00030000,
+ FS_VDOA_DEST_SEL_VDI = 0x00020000,
+ FS_VDOA_DEST_SEL_IC = 0x00010000,
+ FS_VDI_SRC_SEL_OFFSET = 28,
+
+
+ FS_PRPENC_DEST_SEL_MASK = 0x0000000F,
+ FS_PRPENC_DEST_SEL_OFFSET = 0,
+ FS_PRPVF_DEST_SEL_MASK = 0x000000F0,
+ FS_PRPVF_DEST_SEL_OFFSET = 4,
+ FS_PRPVF_ROT_DEST_SEL_MASK = 0x00000F00,
+ FS_PRPVF_ROT_DEST_SEL_OFFSET = 8,
+ FS_PP_DEST_SEL_MASK = 0x0000F000,
+ FS_PP_DEST_SEL_OFFSET = 12,
+ FS_PP_ROT_DEST_SEL_MASK = 0x000F0000,
+ FS_PP_ROT_DEST_SEL_OFFSET = 16,
+ FS_PRPENC_ROT_DEST_SEL_MASK = 0x00F00000,
+ FS_PRPENC_ROT_DEST_SEL_OFFSET = 20,
+
+ FS_SMFC0_DEST_SEL_MASK = 0x0000000F,
+ FS_SMFC0_DEST_SEL_OFFSET = 0,
+ FS_SMFC1_DEST_SEL_MASK = 0x00000070,
+ FS_SMFC1_DEST_SEL_OFFSET = 4,
+ FS_SMFC2_DEST_SEL_MASK = 0x00000780,
+ FS_SMFC2_DEST_SEL_OFFSET = 7,
+ FS_SMFC3_DEST_SEL_MASK = 0x00003800,
+ FS_SMFC3_DEST_SEL_OFFSET = 11,
+
+ FS_DC1_SRC_SEL_MASK = 0x00F00000,
+ FS_DC1_SRC_SEL_OFFSET = 20,
+ FS_DC2_SRC_SEL_MASK = 0x000F0000,
+ FS_DC2_SRC_SEL_OFFSET = 16,
+ FS_DP_SYNC0_SRC_SEL_MASK = 0x0000000F,
+ FS_DP_SYNC0_SRC_SEL_OFFSET = 0,
+ FS_DP_SYNC1_SRC_SEL_MASK = 0x000000F0,
+ FS_DP_SYNC1_SRC_SEL_OFFSET = 4,
+ FS_DP_ASYNC0_SRC_SEL_MASK = 0x00000F00,
+ FS_DP_ASYNC0_SRC_SEL_OFFSET = 8,
+ FS_DP_ASYNC1_SRC_SEL_MASK = 0x0000F000,
+ FS_DP_ASYNC1_SRC_SEL_OFFSET = 12,
+
+ FS_AUTO_REF_PER_MASK = 0,
+ FS_AUTO_REF_PER_OFFSET = 16,
+
+ TSTAT_VF_MASK = 0x0000000C,
+ TSTAT_VF_OFFSET = 2,
+ TSTAT_VF_ROT_MASK = 0x00000300,
+ TSTAT_VF_ROT_OFFSET = 8,
+ TSTAT_ENC_MASK = 0x00000003,
+ TSTAT_ENC_OFFSET = 0,
+ TSTAT_ENC_ROT_MASK = 0x000000C0,
+ TSTAT_ENC_ROT_OFFSET = 6,
+ TSTAT_PP_MASK = 0x00000030,
+ TSTAT_PP_OFFSET = 4,
+ TSTAT_PP_ROT_MASK = 0x00000C00,
+ TSTAT_PP_ROT_OFFSET = 10,
+
+ TASK_STAT_IDLE = 0,
+ TASK_STAT_ACTIVE = 1,
+ TASK_STAT_WAIT4READY = 2,
+
+ /* IDMAC register bits */
+ IDMAC_CONF_USED_BUFS_EN_R = 0x02000000,
+ IDMAC_CONF_USED_BUFS_MAX_R_MASK = 0x01E00000,
+ IDMAC_CONF_USED_BUFS_MAX_R_OFFSET = 21,
+ IDMAC_CONF_USED_BUFS_EN_W = 0x00100000,
+ IDMAC_CONF_USED_BUFS_MAX_W_MASK = 0x000E0000,
+ IDMAC_CONF_USED_BUFS_MAX_W_OFFSET = 17,
+
+ /* Image Converter Register bits */
+ IC_CONF_PRPENC_EN = 0x00000001,
+ IC_CONF_PRPENC_CSC1 = 0x00000002,
+ IC_CONF_PRPENC_ROT_EN = 0x00000004,
+ IC_CONF_PRPVF_EN = 0x00000100,
+ IC_CONF_PRPVF_CSC1 = 0x00000200,
+ IC_CONF_PRPVF_CSC2 = 0x00000400,
+ IC_CONF_PRPVF_CMB = 0x00000800,
+ IC_CONF_PRPVF_ROT_EN = 0x00001000,
+ IC_CONF_PP_EN = 0x00010000,
+ IC_CONF_PP_CSC1 = 0x00020000,
+ IC_CONF_PP_CSC2 = 0x00040000,
+ IC_CONF_PP_CMB = 0x00080000,
+ IC_CONF_PP_ROT_EN = 0x00100000,
+ IC_CONF_IC_GLB_LOC_A = 0x10000000,
+ IC_CONF_KEY_COLOR_EN = 0x20000000,
+ IC_CONF_RWS_EN = 0x40000000,
+ IC_CONF_CSI_MEM_WR_EN = 0x80000000,
+
+ IC_RSZ_MAX_RESIZE_RATIO = 0x00004000,
+
+ IC_IDMAC_1_CB0_BURST_16 = 0x00000001,
+ IC_IDMAC_1_CB1_BURST_16 = 0x00000002,
+ IC_IDMAC_1_CB2_BURST_16 = 0x00000004,
+ IC_IDMAC_1_CB3_BURST_16 = 0x00000008,
+ IC_IDMAC_1_CB4_BURST_16 = 0x00000010,
+ IC_IDMAC_1_CB5_BURST_16 = 0x00000020,
+ IC_IDMAC_1_CB6_BURST_16 = 0x00000040,
+ IC_IDMAC_1_CB7_BURST_16 = 0x00000080,
+ IC_IDMAC_1_PRPENC_ROT_MASK = 0x00003800,
+ IC_IDMAC_1_PRPENC_ROT_OFFSET = 11,
+ IC_IDMAC_1_PRPVF_ROT_MASK = 0x0001C000,
+ IC_IDMAC_1_PRPVF_ROT_OFFSET = 14,
+ IC_IDMAC_1_PP_ROT_MASK = 0x000E0000,
+ IC_IDMAC_1_PP_ROT_OFFSET = 17,
+ IC_IDMAC_1_PP_FLIP_RS = 0x00400000,
+ IC_IDMAC_1_PRPVF_FLIP_RS = 0x00200000,
+ IC_IDMAC_1_PRPENC_FLIP_RS = 0x00100000,
+
+ IC_IDMAC_2_PRPENC_HEIGHT_MASK = 0x000003FF,
+ IC_IDMAC_2_PRPENC_HEIGHT_OFFSET = 0,
+ IC_IDMAC_2_PRPVF_HEIGHT_MASK = 0x000FFC00,
+ IC_IDMAC_2_PRPVF_HEIGHT_OFFSET = 10,
+ IC_IDMAC_2_PP_HEIGHT_MASK = 0x3FF00000,
+ IC_IDMAC_2_PP_HEIGHT_OFFSET = 20,
+
+ IC_IDMAC_3_PRPENC_WIDTH_MASK = 0x000003FF,
+ IC_IDMAC_3_PRPENC_WIDTH_OFFSET = 0,
+ IC_IDMAC_3_PRPVF_WIDTH_MASK = 0x000FFC00,
+ IC_IDMAC_3_PRPVF_WIDTH_OFFSET = 10,
+ IC_IDMAC_3_PP_WIDTH_MASK = 0x3FF00000,
+ IC_IDMAC_3_PP_WIDTH_OFFSET = 20,
+
+ CSI_SENS_CONF_DATA_FMT_SHIFT = 8,
+ CSI_SENS_CONF_DATA_FMT_MASK = 0x00000700,
+ CSI_SENS_CONF_DATA_FMT_RGB_YUV444 = 0L,
+ CSI_SENS_CONF_DATA_FMT_YUV422_YUYV = 1L,
+ CSI_SENS_CONF_DATA_FMT_YUV422_UYVY = 2L,
+ CSI_SENS_CONF_DATA_FMT_BAYER = 3L,
+ CSI_SENS_CONF_DATA_FMT_RGB565 = 4L,
+ CSI_SENS_CONF_DATA_FMT_RGB555 = 5L,
+ CSI_SENS_CONF_DATA_FMT_RGB444 = 6L,
+ CSI_SENS_CONF_DATA_FMT_JPEG = 7L,
+
+ CSI_SENS_CONF_VSYNC_POL_SHIFT = 0,
+ CSI_SENS_CONF_HSYNC_POL_SHIFT = 1,
+ CSI_SENS_CONF_DATA_POL_SHIFT = 2,
+ CSI_SENS_CONF_PIX_CLK_POL_SHIFT = 3,
+ CSI_SENS_CONF_SENS_PRTCL_MASK = 0x00000070L,
+ CSI_SENS_CONF_SENS_PRTCL_SHIFT = 4,
+ CSI_SENS_CONF_PACK_TIGHT_SHIFT = 7,
+ CSI_SENS_CONF_DATA_WIDTH_SHIFT = 11,
+ CSI_SENS_CONF_EXT_VSYNC_SHIFT = 15,
+ CSI_SENS_CONF_DIVRATIO_SHIFT = 16,
+
+ CSI_SENS_CONF_DIVRATIO_MASK = 0x00FF0000L,
+ CSI_SENS_CONF_DATA_DEST_SHIFT = 24,
+ CSI_SENS_CONF_DATA_DEST_MASK = 0x07000000L,
+ CSI_SENS_CONF_JPEG8_EN_SHIFT = 27,
+ CSI_SENS_CONF_JPEG_EN_SHIFT = 28,
+ CSI_SENS_CONF_FORCE_EOF_SHIFT = 29,
+ CSI_SENS_CONF_DATA_EN_POL_SHIFT = 31,
+
+ CSI_DATA_DEST_ISP = 1L,
+ CSI_DATA_DEST_IC = 2L,
+ CSI_DATA_DEST_IDMAC = 4L,
+
+ CSI_CCIR_ERR_DET_EN = 0x01000000L,
+ CSI_HORI_DOWNSIZE_EN = 0x80000000L,
+ CSI_VERT_DOWNSIZE_EN = 0x40000000L,
+ CSI_TEST_GEN_MODE_EN = 0x01000000L,
+
+ CSI_HSC_MASK = 0x1FFF0000,
+ CSI_HSC_SHIFT = 16,
+ CSI_VSC_MASK = 0x00000FFF,
+ CSI_VSC_SHIFT = 0,
+
+ CSI_TEST_GEN_R_MASK = 0x000000FFL,
+ CSI_TEST_GEN_R_SHIFT = 0,
+ CSI_TEST_GEN_G_MASK = 0x0000FF00L,
+ CSI_TEST_GEN_G_SHIFT = 8,
+ CSI_TEST_GEN_B_MASK = 0x00FF0000L,
+ CSI_TEST_GEN_B_SHIFT = 16,
+
+ CSI_MIPI_DI0_MASK = 0x000000FFL,
+ CSI_MIPI_DI0_SHIFT = 0,
+ CSI_MIPI_DI1_MASK = 0x0000FF00L,
+ CSI_MIPI_DI1_SHIFT = 8,
+ CSI_MIPI_DI2_MASK = 0x00FF0000L,
+ CSI_MIPI_DI2_SHIFT = 16,
+ CSI_MIPI_DI3_MASK = 0xFF000000L,
+ CSI_MIPI_DI3_SHIFT = 24,
+
+ CSI_MAX_RATIO_SKIP_ISP_MASK = 0x00070000L,
+ CSI_MAX_RATIO_SKIP_ISP_SHIFT = 16,
+ CSI_SKIP_ISP_MASK = 0x00F80000L,
+ CSI_SKIP_ISP_SHIFT = 19,
+ CSI_MAX_RATIO_SKIP_SMFC_MASK = 0x00000007L,
+ CSI_MAX_RATIO_SKIP_SMFC_SHIFT = 0,
+ CSI_SKIP_SMFC_MASK = 0x000000F8L,
+ CSI_SKIP_SMFC_SHIFT = 3,
+ CSI_ID_2_SKIP_MASK = 0x00000300L,
+ CSI_ID_2_SKIP_SHIFT = 8,
+
+ CSI_COLOR_FIRST_ROW_MASK = 0x00000002L,
+ CSI_COLOR_FIRST_COMP_MASK = 0x00000001L,
+
+ SMFC_MAP_CH0_MASK = 0x00000007L,
+ SMFC_MAP_CH0_SHIFT = 0,
+ SMFC_MAP_CH1_MASK = 0x00000038L,
+ SMFC_MAP_CH1_SHIFT = 3,
+ SMFC_MAP_CH2_MASK = 0x000001C0L,
+ SMFC_MAP_CH2_SHIFT = 6,
+ SMFC_MAP_CH3_MASK = 0x00000E00L,
+ SMFC_MAP_CH3_SHIFT = 9,
+
+ SMFC_WM0_SET_MASK = 0x00000007L,
+ SMFC_WM0_SET_SHIFT = 0,
+ SMFC_WM1_SET_MASK = 0x000001C0L,
+ SMFC_WM1_SET_SHIFT = 6,
+ SMFC_WM2_SET_MASK = 0x00070000L,
+ SMFC_WM2_SET_SHIFT = 16,
+ SMFC_WM3_SET_MASK = 0x01C00000L,
+ SMFC_WM3_SET_SHIFT = 22,
+
+ SMFC_WM0_CLR_MASK = 0x00000038L,
+ SMFC_WM0_CLR_SHIFT = 3,
+ SMFC_WM1_CLR_MASK = 0x00000E00L,
+ SMFC_WM1_CLR_SHIFT = 9,
+ SMFC_WM2_CLR_MASK = 0x00380000L,
+ SMFC_WM2_CLR_SHIFT = 19,
+ SMFC_WM3_CLR_MASK = 0x0E000000L,
+ SMFC_WM3_CLR_SHIFT = 25,
+
+ SMFC_BS0_MASK = 0x0000000FL,
+ SMFC_BS0_SHIFT = 0,
+ SMFC_BS1_MASK = 0x000000F0L,
+ SMFC_BS1_SHIFT = 4,
+ SMFC_BS2_MASK = 0x00000F00L,
+ SMFC_BS2_SHIFT = 8,
+ SMFC_BS3_MASK = 0x0000F000L,
+ SMFC_BS3_SHIFT = 12,
+
+ PF_CONF_TYPE_MASK = 0x00000007,
+ PF_CONF_TYPE_SHIFT = 0,
+ PF_CONF_PAUSE_EN = 0x00000010,
+ PF_CONF_RESET = 0x00008000,
+ PF_CONF_PAUSE_ROW_MASK = 0x00FF0000,
+ PF_CONF_PAUSE_ROW_SHIFT = 16,
+
+ DI_DW_GEN_ACCESS_SIZE_OFFSET = 24,
+ DI_DW_GEN_COMPONENT_SIZE_OFFSET = 16,
+
+ DI_GEN_DI_CLK_EXT = 0x100000,
+ DI_GEN_POLARITY_DISP_CLK = 0x00020000,
+ DI_GEN_POLARITY_1 = 0x00000001,
+ DI_GEN_POLARITY_2 = 0x00000002,
+ DI_GEN_POLARITY_3 = 0x00000004,
+ DI_GEN_POLARITY_4 = 0x00000008,
+ DI_GEN_POLARITY_5 = 0x00000010,
+ DI_GEN_POLARITY_6 = 0x00000020,
+ DI_GEN_POLARITY_7 = 0x00000040,
+ DI_GEN_POLARITY_8 = 0x00000080,
+
+ DI_POL_DRDY_DATA_POLARITY = 0x00000080,
+ DI_POL_DRDY_POLARITY_15 = 0x00000010,
+
+ DI_VSYNC_SEL_OFFSET = 13,
+
+ DC_WR_CH_CONF_FIELD_MODE = 0x00000200,
+ DC_WR_CH_CONF_PROG_TYPE_OFFSET = 5,
+ DC_WR_CH_CONF_PROG_TYPE_MASK = 0x000000E0,
+ DC_WR_CH_CONF_PROG_DI_ID = 0x00000004,
+ DC_WR_CH_CONF_PROG_DISP_ID_OFFSET = 3,
+ DC_WR_CH_CONF_PROG_DISP_ID_MASK = 0x00000018,
+
+ DC_UGDE_0_ODD_EN = 0x02000000,
+ DC_UGDE_0_ID_CODED_MASK = 0x00000007,
+ DC_UGDE_0_ID_CODED_OFFSET = 0,
+ DC_UGDE_0_EV_PRIORITY_MASK = 0x00000078,
+ DC_UGDE_0_EV_PRIORITY_OFFSET = 3,
+
+ DP_COM_CONF_FG_EN = 0x00000001,
+ DP_COM_CONF_GWSEL = 0x00000002,
+ DP_COM_CONF_GWAM = 0x00000004,
+ DP_COM_CONF_GWCKE = 0x00000008,
+ DP_COM_CONF_CSC_DEF_MASK = 0x00000300,
+ DP_COM_CONF_CSC_DEF_OFFSET = 8,
+ DP_COM_CONF_CSC_DEF_FG = 0x00000300,
+ DP_COM_CONF_CSC_DEF_BG = 0x00000200,
+ DP_COM_CONF_CSC_DEF_BOTH = 0x00000100,
+ DP_COM_CONF_GAMMA_EN = 0x00001000,
+ DP_COM_CONF_GAMMA_YUV_EN = 0x00002000,
+
+ DI_SER_CONF_LLA_SER_ACCESS = 0x00000020,
+ DI_SER_CONF_SERIAL_CLK_POL = 0x00000010,
+ DI_SER_CONF_SERIAL_DATA_POL = 0x00000008,
+ DI_SER_CONF_SERIAL_RS_POL = 0x00000004,
+ DI_SER_CONF_SERIAL_CS_POL = 0x00000002,
+ DI_SER_CONF_WAIT4SERIAL = 0x00000001,
+
+ VDI_C_CH_420 = 0x00000000,
+ VDI_C_CH_422 = 0x00000002,
+ VDI_C_MOT_SEL_FULL = 0x00000008,
+ VDI_C_MOT_SEL_LOW = 0x00000004,
+ VDI_C_MOT_SEL_MED = 0x00000000,
+ VDI_C_BURST_SIZE1_4 = 0x00000030,
+ VDI_C_BURST_SIZE2_4 = 0x00000300,
+ VDI_C_BURST_SIZE3_4 = 0x00003000,
+ VDI_C_BURST_SIZE_MASK = 0xF,
+ VDI_C_BURST_SIZE1_OFFSET = 4,
+ VDI_C_BURST_SIZE2_OFFSET = 8,
+ VDI_C_BURST_SIZE3_OFFSET = 12,
+ VDI_C_VWM1_SET_1 = 0x00000000,
+ VDI_C_VWM1_SET_2 = 0x00010000,
+ VDI_C_VWM1_CLR_2 = 0x00080000,
+ VDI_C_VWM3_SET_1 = 0x00000000,
+ VDI_C_VWM3_SET_2 = 0x00400000,
+ VDI_C_VWM3_CLR_2 = 0x02000000,
+ VDI_C_TOP_FIELD_MAN_1 = 0x40000000,
+ VDI_C_TOP_FIELD_AUTO_1 = 0x80000000,
+};
+
+enum di_pins {
+ DI_PIN11 = 0,
+ DI_PIN12 = 1,
+ DI_PIN13 = 2,
+ DI_PIN14 = 3,
+ DI_PIN15 = 4,
+ DI_PIN16 = 5,
+ DI_PIN17 = 6,
+ DI_PIN_CS = 7,
+
+ DI_PIN_SER_CLK = 0,
+ DI_PIN_SER_RS = 1,
+};
+
+enum di_sync_wave {
+ DI_SYNC_NONE = -1,
+ DI_SYNC_CLK = 0,
+ DI_SYNC_INT_HSYNC = 1,
+ DI_SYNC_HSYNC = 2,
+ DI_SYNC_VSYNC = 3,
+ DI_SYNC_DE = 5,
+};
+
+/* DC template opcodes */
+#define WROD(lf) (0x18 | (lf << 1))
+#define WRG (0x01)
+
+#endif
diff --git a/drivers/mxc/ipu3/pre-regs.h b/drivers/mxc/ipu3/pre-regs.h
new file mode 100644
index 000000000000..06a9b029ce3e
--- /dev/null
+++ b/drivers/mxc/ipu3/pre-regs.h
@@ -0,0 +1,480 @@
+/*
+ * Freescale PRE Register Definitions
+ *
+ * Copyright 2014-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __ARCH_ARM___PRE_H
+#define __ARCH_ARM___PRE_H
+
+
+#define HW_PRE_CTRL (0x00000000)
+#define HW_PRE_CTRL_SET (0x00000004)
+#define HW_PRE_CTRL_CLR (0x00000008)
+#define HW_PRE_CTRL_TOG (0x0000000c)
+
+#define BM_PRE_CTRL_SFTRST 0x80000000
+#define BF_PRE_CTRL_SFTRST(v) \
+ (((v) << 31) & BM_PRE_CTRL_SFTRST)
+#define BM_PRE_CTRL_CLKGATE 0x40000000
+#define BF_PRE_CTRL_CLKGATE(v) \
+ (((v) << 30) & BM_PRE_CTRL_CLKGATE)
+#define BM_PRE_CTRL_TPR_RESET_SEL 0x20000000
+#define BF_PRE_CTRL_TPR_RESET_SEL(v) \
+ (((v) << 29) & BM_PRE_CTRL_TPR_RESET_SEL)
+#define BM_PRE_CTRL_EN_REPEAT 0x10000000
+#define BF_PRE_CTRL_EN_REPEAT(v) \
+ (((v) << 28) & BM_PRE_CTRL_EN_REPEAT)
+#define BP_PRE_CTRL_RSVD2 16
+#define BM_PRE_CTRL_RSVD2 0x0FFF0000
+#define BF_PRE_CTRL_RSVD2(v) \
+ (((v) << 16) & BM_PRE_CTRL_RSVD2)
+#define BP_PRE_CTRL_RSVD1 12
+#define BM_PRE_CTRL_RSVD1 0x0000F000
+#define BF_PRE_CTRL_RSVD1(v) \
+ (((v) << 12) & BM_PRE_CTRL_RSVD1)
+#define BM_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN 0x00000800
+#define BF_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN(v) \
+ (((v) << 11) & BM_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN)
+#define BV_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN__0 0x0
+#define BV_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN__1 0x1
+#define BP_PRE_CTRL_HANDSHAKE_LINE_NUM 9
+#define BM_PRE_CTRL_HANDSHAKE_LINE_NUM 0x00000600
+#define BF_PRE_CTRL_HANDSHAKE_LINE_NUM(v) \
+ (((v) << 9) & BM_PRE_CTRL_HANDSHAKE_LINE_NUM)
+#define BV_PRE_CTRL_HANDSHAKE_LINE_NUM__0 0x0
+#define BV_PRE_CTRL_HANDSHAKE_LINE_NUM__1 0x1
+#define BV_PRE_CTRL_HANDSHAKE_LINE_NUM__2 0x2
+#define BM_PRE_CTRL_HANDSHAKE_EN 0x00000100
+#define BF_PRE_CTRL_HANDSHAKE_EN(v) \
+ (((v) << 8) & BM_PRE_CTRL_HANDSHAKE_EN)
+#define BV_PRE_CTRL_HANDSHAKE_EN__0 0x0
+#define BV_PRE_CTRL_HANDSHAKE_EN__1 0x1
+#define BM_PRE_CTRL_INTERLACED_FIELD 0x00000080
+#define BF_PRE_CTRL_INTERLACED_FIELD(v) \
+ (((v) << 7) & BM_PRE_CTRL_INTERLACED_FIELD)
+#define BM_PRE_CTRL_SO 0x00000040
+#define BF_PRE_CTRL_SO(v) \
+ (((v) << 6) & BM_PRE_CTRL_SO)
+#define BM_PRE_CTRL_VFLIP 0x00000020
+#define BF_PRE_CTRL_VFLIP(v) \
+ (((v) << 5) & BM_PRE_CTRL_VFLIP)
+#define BM_PRE_CTRL_SDW_UPDATE 0x00000010
+#define BF_PRE_CTRL_SDW_UPDATE(v) \
+ (((v) << 4) & BM_PRE_CTRL_SDW_UPDATE)
+#define BM_PRE_CTRL_RSVD0 0x00000008
+#define BF_PRE_CTRL_RSVD0(v) \
+ (((v) << 3) & BM_PRE_CTRL_RSVD0)
+#define BM_PRE_CTRL_BLOCK_16 0x00000004
+#define BF_PRE_CTRL_BLOCK_16(v) \
+ (((v) << 2) & BM_PRE_CTRL_BLOCK_16)
+#define BV_PRE_CTRL_BLOCK_16__32x4 0x0
+#define BV_PRE_CTRL_BLOCK_16__16x4 0x1
+#define BM_PRE_CTRL_BLOCK_EN 0x00000002
+#define BF_PRE_CTRL_BLOCK_EN(v) \
+ (((v) << 1) & BM_PRE_CTRL_BLOCK_EN)
+#define BV_PRE_CTRL_BLOCK_EN__0 0x0
+#define BV_PRE_CTRL_BLOCK_EN__1 0x1
+#define BM_PRE_CTRL_ENABLE 0x00000001
+#define BF_PRE_CTRL_ENABLE(v) \
+ (((v) << 0) & BM_PRE_CTRL_ENABLE)
+
+#define HW_PRE_IRQ_MASK (0x00000010)
+#define HW_PRE_IRQ_MASK_SET (0x00000014)
+#define HW_PRE_IRQ_MASK_CLR (0x00000018)
+#define HW_PRE_IRQ_MASK_TOG (0x0000001c)
+
+#define BP_PRE_IRQ_MASK_RSVD1 4
+#define BM_PRE_IRQ_MASK_RSVD1 0xFFFFFFF0
+#define BF_PRE_IRQ_MASK_RSVD1(v) \
+ (((v) << 4) & BM_PRE_IRQ_MASK_RSVD1)
+#define BM_PRE_IRQ_MASK_TPR_RD_NUM_BYTES_OVFL_IRQ_EN 0x00000008
+#define BF_PRE_IRQ_MASK_TPR_RD_NUM_BYTES_OVFL_IRQ_EN(v) \
+ (((v) << 3) & BM_PRE_IRQ_MASK_TPR_RD_NUM_BYTES_OVFL_IRQ_EN)
+#define BM_PRE_IRQ_MASK_HANDSHAKE_ABORT_IRQ_EN 0x00000004
+#define BF_PRE_IRQ_MASK_HANDSHAKE_ABORT_IRQ_EN(v) \
+ (((v) << 2) & BM_PRE_IRQ_MASK_HANDSHAKE_ABORT_IRQ_EN)
+#define BM_PRE_IRQ_MASK_STORE_IRQ_EN 0x00000002
+#define BF_PRE_IRQ_MASK_STORE_IRQ_EN(v) \
+ (((v) << 1) & BM_PRE_IRQ_MASK_STORE_IRQ_EN)
+#define BM_PRE_IRQ_MASK_PREFETCH_IRQ_EN 0x00000001
+#define BF_PRE_IRQ_MASK_PREFETCH_IRQ_EN(v) \
+ (((v) << 0) & BM_PRE_IRQ_MASK_PREFETCH_IRQ_EN)
+
+#define HW_PRE_IRQ (0x00000020)
+#define HW_PRE_IRQ_SET (0x00000024)
+#define HW_PRE_IRQ_CLR (0x00000028)
+#define HW_PRE_IRQ_TOG (0x0000002c)
+
+#define BP_PRE_IRQ_RSVD1 14
+#define BM_PRE_IRQ_RSVD1 0xFFFFC000
+#define BF_PRE_IRQ_RSVD1(v) \
+ (((v) << 14) & BM_PRE_IRQ_RSVD1)
+#define BP_PRE_IRQ_AXI_ERROR_ID 10
+#define BM_PRE_IRQ_AXI_ERROR_ID 0x00003C00
+#define BF_PRE_IRQ_AXI_ERROR_ID(v) \
+ (((v) << 10) & BM_PRE_IRQ_AXI_ERROR_ID)
+#define BM_PRE_IRQ_AXI_READ_ERROR 0x00000200
+#define BF_PRE_IRQ_AXI_READ_ERROR(v) \
+ (((v) << 9) & BM_PRE_IRQ_AXI_READ_ERROR)
+#define BM_PRE_IRQ_AXI_WRITE_ERROR 0x00000100
+#define BF_PRE_IRQ_AXI_WRITE_ERROR(v) \
+ (((v) << 8) & BM_PRE_IRQ_AXI_WRITE_ERROR)
+#define BP_PRE_IRQ_RSVD0 5
+#define BM_PRE_IRQ_RSVD0 0x000000E0
+#define BF_PRE_IRQ_RSVD0(v) \
+ (((v) << 5) & BM_PRE_IRQ_RSVD0)
+#define BM_PRE_IRQ_HANDSHAKE_ERROR_IRQ 0x00000010
+#define BF_PRE_IRQ_HANDSHAKE_ERROR_IRQ(v) \
+ (((v) << 4) & BM_PRE_IRQ_HANDSHAKE_ERROR_IRQ)
+#define BM_PRE_IRQ_TPR_RD_NUM_BYTES_OVFL_IRQ 0x00000008
+#define BF_PRE_IRQ_TPR_RD_NUM_BYTES_OVFL_IRQ(v) \
+ (((v) << 3) & BM_PRE_IRQ_TPR_RD_NUM_BYTES_OVFL_IRQ)
+#define BM_PRE_IRQ_HANDSHAKE_ABORT_IRQ 0x00000004
+#define BF_PRE_IRQ_HANDSHAKE_ABORT_IRQ(v) \
+ (((v) << 2) & BM_PRE_IRQ_HANDSHAKE_ABORT_IRQ)
+#define BM_PRE_IRQ_STORE_IRQ 0x00000002
+#define BF_PRE_IRQ_STORE_IRQ(v) \
+ (((v) << 1) & BM_PRE_IRQ_STORE_IRQ)
+#define BM_PRE_IRQ_PREFETCH_IRQ 0x00000001
+#define BF_PRE_IRQ_PREFETCH_IRQ(v) \
+ (((v) << 0) & BM_PRE_IRQ_PREFETCH_IRQ)
+
+#define HW_PRE_CUR_BUF (0x00000030)
+
+#define BP_PRE_CUR_BUF_ADDR 0
+#define BM_PRE_CUR_BUF_ADDR 0xFFFFFFFF
+#define BF_PRE_CUR_BUF_ADDR(v) (v)
+
+#define HW_PRE_NEXT_BUF (0x00000040)
+
+#define BP_PRE_NEXT_BUF_ADDR 0
+#define BM_PRE_NEXT_BUF_ADDR 0xFFFFFFFF
+#define BF_PRE_NEXT_BUF_ADDR(v) (v)
+
+#define HW_PRE_U_BUF_OFFSET (0x00000050)
+
+#define BP_PRE_U_BUF_OFFSET_RSVD0 25
+#define BM_PRE_U_BUF_OFFSET_RSVD0 0xFE000000
+#define BF_PRE_U_BUF_OFFSET_RSVD0(v) \
+ (((v) << 25) & BM_PRE_U_BUF_OFFSET_RSVD0)
+#define BP_PRE_U_BUF_OFFSET_UBO 0
+#define BM_PRE_U_BUF_OFFSET_UBO 0x01FFFFFF
+#define BF_PRE_U_BUF_OFFSET_UBO(v) \
+ (((v) << 0) & BM_PRE_U_BUF_OFFSET_UBO)
+
+#define HW_PRE_V_BUF_OFFSET (0x00000060)
+
+#define BP_PRE_V_BUF_OFFSET_RSVD0 25
+#define BM_PRE_V_BUF_OFFSET_RSVD0 0xFE000000
+#define BF_PRE_V_BUF_OFFSET_RSVD0(v) \
+ (((v) << 25) & BM_PRE_V_BUF_OFFSET_RSVD0)
+#define BP_PRE_V_BUF_OFFSET_VBO 0
+#define BM_PRE_V_BUF_OFFSET_VBO 0x01FFFFFF
+#define BF_PRE_V_BUF_OFFSET_VBO(v) \
+ (((v) << 0) & BM_PRE_V_BUF_OFFSET_VBO)
+
+#define HW_PRE_TPR_CTRL (0x00000070)
+#define HW_PRE_TPR_CTRL_SET (0x00000074)
+#define HW_PRE_TPR_CTRL_CLR (0x00000078)
+#define HW_PRE_TPR_CTRL_TOG (0x0000007c)
+
+#define BP_PRE_TPR_CTRL_RSVD 8
+#define BM_PRE_TPR_CTRL_RSVD 0xFFFFFF00
+#define BF_PRE_TPR_CTRL_RSVD(v) \
+ (((v) << 8) & BM_PRE_TPR_CTRL_RSVD)
+#define BP_PRE_TPR_CTRL_TILE_FORMAT 0
+#define BM_PRE_TPR_CTRL_TILE_FORMAT 0x000000FF
+#define BF_PRE_TPR_CTRL_TILE_FORMAT(v) \
+ (((v) << 0) & BM_PRE_TPR_CTRL_TILE_FORMAT)
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__BYPASS 0x00
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU32_SB_ST 0x10
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU32_SB_SRT 0x50
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU32_ST 0x20
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU32_SRT 0x60
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU32_MST 0xA0
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU32_MSRT 0xE0
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU16_SB_ST 0x11
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU16_SB_SRT 0x51
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU16_ST 0x21
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU16_SRT 0x61
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU16_MST 0xA1
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__GPU16_MSRT 0xE1
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__VPU8_PRO 0x22
+#define BV_PRE_TPR_CTRL_TILE_FORMAT__VPU8_SB_INT 0x13
+
+#define HW_PRE_PREFETCH_ENGINE_CTRL (0x00000080)
+#define HW_PRE_PREFETCH_ENGINE_CTRL_SET (0x00000084)
+#define HW_PRE_PREFETCH_ENGINE_CTRL_CLR (0x00000088)
+#define HW_PRE_PREFETCH_ENGINE_CTRL_TOG (0x0000008c)
+
+#define BP_PRE_PREFETCH_ENGINE_CTRL_RSVD1 16
+#define BM_PRE_PREFETCH_ENGINE_CTRL_RSVD1 0xFFFF0000
+#define BF_PRE_PREFETCH_ENGINE_CTRL_RSVD1(v) \
+ (((v) << 16) & BM_PRE_PREFETCH_ENGINE_CTRL_RSVD1)
+#define BM_PRE_PREFETCH_ENGINE_CTRL_TPR_COOR_OFFSET_EN 0x00008000
+#define BF_PRE_PREFETCH_ENGINE_CTRL_TPR_COOR_OFFSET_EN(v) \
+ (((v) << 15) & BM_PRE_PREFETCH_ENGINE_CTRL_TPR_COOR_OFFSET_EN)
+#define BM_PRE_PREFETCH_ENGINE_CTRL_PARTIAL_UV_SWAP 0x00004000
+#define BF_PRE_PREFETCH_ENGINE_CTRL_PARTIAL_UV_SWAP(v) \
+ (((v) << 14) & BM_PRE_PREFETCH_ENGINE_CTRL_PARTIAL_UV_SWAP)
+#define BM_PRE_PREFETCH_ENGINE_CTRL_CROP_EN 0x00002000
+#define BF_PRE_PREFETCH_ENGINE_CTRL_CROP_EN(v) \
+ (((v) << 13) & BM_PRE_PREFETCH_ENGINE_CTRL_CROP_EN)
+#define BV_PRE_PREFETCH_ENGINE_CTRL_CROP_EN__0 0x0
+#define BV_PRE_PREFETCH_ENGINE_CTRL_CROP_EN__1 0x1
+#define BM_PRE_PREFETCH_ENGINE_CTRL_FIELD_INVERSE 0x00001000
+#define BF_PRE_PREFETCH_ENGINE_CTRL_FIELD_INVERSE(v) \
+ (((v) << 12) & BM_PRE_PREFETCH_ENGINE_CTRL_FIELD_INVERSE)
+#define BV_PRE_PREFETCH_ENGINE_CTRL_FIELD_INVERSE__0 0x0
+#define BV_PRE_PREFETCH_ENGINE_CTRL_FIELD_INVERSE__1 0x1
+#define BM_PRE_PREFETCH_ENGINE_CTRL_SHIFT_BYPASS 0x00000800
+#define BF_PRE_PREFETCH_ENGINE_CTRL_SHIFT_BYPASS(v) \
+ (((v) << 11) & BM_PRE_PREFETCH_ENGINE_CTRL_SHIFT_BYPASS)
+#define BV_PRE_PREFETCH_ENGINE_CTRL_SHIFT_BYPASS__0 0x0
+#define BV_PRE_PREFETCH_ENGINE_CTRL_SHIFT_BYPASS__1 0x1
+#define BP_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT 8
+#define BM_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT 0x00000700
+#define BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT(v) \
+ (((v) << 8) & BM_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT)
+#define BV_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT__0 0x0
+#define BV_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT__1 0x1
+#define BV_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT__2 0x2
+#define BV_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT__3 0x3
+#define BV_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT__4 0x4
+#define BV_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT__5 0x5
+#define BP_PRE_PREFETCH_ENGINE_CTRL_RSVD0 6
+#define BM_PRE_PREFETCH_ENGINE_CTRL_RSVD0 0x000000C0
+#define BF_PRE_PREFETCH_ENGINE_CTRL_RSVD0(v) \
+ (((v) << 6) & BM_PRE_PREFETCH_ENGINE_CTRL_RSVD0)
+#define BP_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP 4
+#define BM_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP 0x00000030
+#define BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP(v) \
+ (((v) << 4) & BM_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP)
+#define BV_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP__0 0x0
+#define BV_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP__1 0x1
+#define BV_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP__2 0x2
+#define BV_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP__3 0x3
+#define BP_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES 1
+#define BM_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES 0x0000000E
+#define BF_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES(v) \
+ (((v) << 1) & BM_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES)
+#define BV_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES__8_bytes 0x0
+#define BV_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES__16_bytes 0x1
+#define BV_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES__32_bytes 0x2
+#define BV_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES__64_bytes 0x3
+#define BV_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES__128_bytes 0x4
+#define BM_PRE_PREFETCH_ENGINE_CTRL_PREFETCH_EN 0x00000001
+#define BF_PRE_PREFETCH_ENGINE_CTRL_PREFETCH_EN(v) \
+ (((v) << 0) & BM_PRE_PREFETCH_ENGINE_CTRL_PREFETCH_EN)
+#define BV_PRE_PREFETCH_ENGINE_CTRL_PREFETCH_EN__0 0x0
+#define BV_PRE_PREFETCH_ENGINE_CTRL_PREFETCH_EN__1 0x1
+
+#define HW_PRE_PREFETCH_ENGINE_STATUS (0x00000090)
+
+#define BP_PRE_PREFETCH_ENGINE_STATUS_PREFETCH_BLOCK_Y 16
+#define BM_PRE_PREFETCH_ENGINE_STATUS_PREFETCH_BLOCK_Y 0x3FFF0000
+#define BF_PRE_PREFETCH_ENGINE_STATUS_PREFETCH_BLOCK_Y(v) \
+ (((v) << 16) & BM_PRE_PREFETCH_ENGINE_STATUS_PREFETCH_BLOCK_Y)
+#define BP_PRE_PREFETCH_ENGINE_STATUS_PREFETCH_BLOCK_X 0
+#define BM_PRE_PREFETCH_ENGINE_STATUS_PREFETCH_BLOCK_X 0x0000FFFF
+#define BF_PRE_PREFETCH_ENGINE_STATUS_PREFETCH_BLOCK_X(v) \
+ (((v) << 0) & BM_PRE_PREFETCH_ENGINE_STATUS_PREFETCH_BLOCK_X)
+
+#define HW_PRE_PREFETCH_ENGINE_INPUT_SIZE (0x000000a0)
+
+#define BP_PRE_PREFETCH_ENGINE_INPUT_SIZE_INPUT_HEIGHT 16
+#define BM_PRE_PREFETCH_ENGINE_INPUT_SIZE_INPUT_HEIGHT 0xFFFF0000
+#define BF_PRE_PREFETCH_ENGINE_INPUT_SIZE_INPUT_HEIGHT(v) \
+ (((v) << 16) & BM_PRE_PREFETCH_ENGINE_INPUT_SIZE_INPUT_HEIGHT)
+#define BP_PRE_PREFETCH_ENGINE_INPUT_SIZE_INPUT_WIDTH 0
+#define BM_PRE_PREFETCH_ENGINE_INPUT_SIZE_INPUT_WIDTH 0x0000FFFF
+#define BF_PRE_PREFETCH_ENGINE_INPUT_SIZE_INPUT_WIDTH(v) \
+ (((v) << 0) & BM_PRE_PREFETCH_ENGINE_INPUT_SIZE_INPUT_WIDTH)
+
+#define HW_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC (0x000000b0)
+
+#define BP_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_Y 16
+#define BM_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_Y 0xFFFF0000
+#define BF_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_Y(v) \
+ (((v) << 16) & BM_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_Y)
+#define BP_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_X 0
+#define BM_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_X 0x0000FFFF
+#define BF_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_X(v) \
+ (((v) << 0) & BM_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_X)
+
+#define HW_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_LRC (0x000000c0)
+
+#define BP_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_LRC_OUTPUT_SIZE_LRC_Y 16
+#define BM_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_LRC_OUTPUT_SIZE_LRC_Y 0xFFFF0000
+#define BF_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_LRC_OUTPUT_SIZE_LRC_Y(v) \
+ (((v) << 16) & BM_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_LRC_OUTPUT_SIZE_LRC_Y)
+#define BP_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_LRC_OUTPUT_SIZE_LRC_X 0
+#define BM_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_LRC_OUTPUT_SIZE_LRC_X 0x0000FFFF
+#define BF_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_LRC_OUTPUT_SIZE_LRC_X(v) \
+ (((v) << 0) & BM_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_LRC_OUTPUT_SIZE_LRC_X)
+
+#define HW_PRE_PREFETCH_ENGINE_PITCH (0x000000d0)
+
+#define BP_PRE_PREFETCH_ENGINE_PITCH_INPUT_UV_PITCH 16
+#define BM_PRE_PREFETCH_ENGINE_PITCH_INPUT_UV_PITCH 0xFFFF0000
+#define BF_PRE_PREFETCH_ENGINE_PITCH_INPUT_UV_PITCH(v) \
+ (((v) << 16) & BM_PRE_PREFETCH_ENGINE_PITCH_INPUT_UV_PITCH)
+#define BP_PRE_PREFETCH_ENGINE_PITCH_INPUT_Y_PITCH 0
+#define BM_PRE_PREFETCH_ENGINE_PITCH_INPUT_Y_PITCH 0x0000FFFF
+#define BF_PRE_PREFETCH_ENGINE_PITCH_INPUT_Y_PITCH(v) \
+ (((v) << 0) & BM_PRE_PREFETCH_ENGINE_PITCH_INPUT_Y_PITCH)
+
+#define HW_PRE_PREFETCH_ENGINE_SHIFT_OFFSET (0x000000e0)
+#define HW_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_SET (0x000000e4)
+#define HW_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_CLR (0x000000e8)
+#define HW_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_TOG (0x000000ec)
+
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD0 29
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD0 0xE0000000
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD0(v) \
+ (((v) << 29) & BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD0)
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET3 24
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET3 0x1F000000
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET3(v) \
+ (((v) << 24) & BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET3)
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD1 21
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD1 0x00E00000
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD1(v) \
+ (((v) << 21) & BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD1)
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET2 16
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET2 0x001F0000
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET2(v) \
+ (((v) << 16) & BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET2)
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD2 13
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD2 0x0000E000
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD2(v) \
+ (((v) << 13) & BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD2)
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET1 8
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET1 0x00001F00
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET1(v) \
+ (((v) << 8) & BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET1)
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD3 5
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD3 0x000000E0
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD3(v) \
+ (((v) << 5) & BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_RSVD3)
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET0 0
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET0 0x0000001F
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET0(v) \
+ (((v) << 0) & BM_PRE_PREFETCH_ENGINE_SHIFT_OFFSET_OFFSET0)
+
+#define HW_PRE_PREFETCH_ENGINE_SHIFT_WIDTH (0x000000f0)
+#define HW_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_SET (0x000000f4)
+#define HW_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_CLR (0x000000f8)
+#define HW_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_TOG (0x000000fc)
+
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_RSVD0 16
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_RSVD0 0xFFFF0000
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_RSVD0(v) \
+ (((v) << 16) & BM_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_RSVD0)
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH3 12
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH3 0x0000F000
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH3(v) \
+ (((v) << 12) & BM_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH3)
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH2 8
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH2 0x00000F00
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH2(v) \
+ (((v) << 8) & BM_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH2)
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH1 4
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH1 0x000000F0
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH1(v) \
+ (((v) << 4) & BM_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH1)
+#define BP_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH0 0
+#define BM_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH0 0x0000000F
+#define BF_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH0(v) \
+ (((v) << 0) & BM_PRE_PREFETCH_ENGINE_SHIFT_WIDTH_WIDTH0)
+
+#define HW_PRE_PREFETCH_ENGINE_INTERLACE_OFFSET (0x00000100)
+
+#define BP_PRE_PREFETCH_ENGINE_INTERLACE_OFFSET_RSVD0 23
+#define BM_PRE_PREFETCH_ENGINE_INTERLACE_OFFSET_RSVD0 0xFF800000
+#define BF_PRE_PREFETCH_ENGINE_INTERLACE_OFFSET_RSVD0(v) \
+ (((v) << 23) & BM_PRE_PREFETCH_ENGINE_INTERLACE_OFFSET_RSVD0)
+#define BP_PRE_PREFETCH_ENGINE_INTERLACE_OFFSET_INTERLACE_OFFSET 0
+#define BM_PRE_PREFETCH_ENGINE_INTERLACE_OFFSET_INTERLACE_OFFSET 0xFFFFFFFF
+#define BF_PRE_PREFETCH_ENGINE_INTERLACE_OFFSET_INTERLACE_OFFSET(v) \
+ (((v) << 0) & BM_PRE_PREFETCH_ENGINE_INTERLACE_OFFSET_INTERLACE_OFFSET)
+
+#define HW_PRE_STORE_ENGINE_CTRL (0x00000110)
+#define HW_PRE_STORE_ENGINE_CTRL_SET (0x00000114)
+#define HW_PRE_STORE_ENGINE_CTRL_CLR (0x00000118)
+#define HW_PRE_STORE_ENGINE_CTRL_TOG (0x0000011c)
+
+#define BP_PRE_STORE_ENGINE_CTRL_RSVD0 6
+#define BM_PRE_STORE_ENGINE_CTRL_RSVD0 0xFFFFFFC0
+#define BF_PRE_STORE_ENGINE_CTRL_RSVD0(v) \
+ (((v) << 6) & BM_PRE_STORE_ENGINE_CTRL_RSVD0)
+#define BP_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP 4
+#define BM_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP 0x00000030
+#define BF_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP(v) \
+ (((v) << 4) & BM_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP)
+#define BV_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP__8_bits 0x0
+#define BV_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP__16_bits 0x1
+#define BV_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP__32_bits 0x2
+#define BV_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP__64_bits 0x3
+#define BP_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES 1
+#define BM_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES 0x0000000E
+#define BF_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES(v) \
+ (((v) << 1) & BM_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES)
+#define BV_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES__8_bytes 0x0
+#define BV_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES__16_bytes 0x1
+#define BV_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES__32_bytes 0x2
+#define BV_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES__64_bytes 0x3
+#define BV_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES__128_bytes 0x4
+#define BM_PRE_STORE_ENGINE_CTRL_STORE_EN 0x00000001
+#define BF_PRE_STORE_ENGINE_CTRL_STORE_EN(v) \
+ (((v) << 0) & BM_PRE_STORE_ENGINE_CTRL_STORE_EN)
+#define BV_PRE_STORE_ENGINE_CTRL_STORE_EN__0 0x0
+#define BV_PRE_STORE_ENGINE_CTRL_STORE_EN__1 0x1
+
+#define HW_PRE_STORE_ENGINE_STATUS (0x00000120)
+
+#define BP_PRE_STORE_ENGINE_STATUS_STORE_BLOCK_Y 16
+#define BM_PRE_STORE_ENGINE_STATUS_STORE_BLOCK_Y 0x3FFF0000
+#define BF_PRE_STORE_ENGINE_STATUS_STORE_BLOCK_Y(v) \
+ (((v) << 16) & BM_PRE_STORE_ENGINE_STATUS_STORE_BLOCK_Y)
+#define BP_PRE_STORE_ENGINE_STATUS_STORE_BLOCK_X 0
+#define BM_PRE_STORE_ENGINE_STATUS_STORE_BLOCK_X 0x0000FFFF
+#define BF_PRE_STORE_ENGINE_STATUS_STORE_BLOCK_X(v) \
+ (((v) << 0) & BM_PRE_STORE_ENGINE_STATUS_STORE_BLOCK_X)
+
+#define HW_PRE_STORE_ENGINE_SIZE (0x00000130)
+
+#define BP_PRE_STORE_ENGINE_SIZE_INPUT_TOTAL_HEIGHT 16
+#define BM_PRE_STORE_ENGINE_SIZE_INPUT_TOTAL_HEIGHT 0xFFFF0000
+#define BF_PRE_STORE_ENGINE_SIZE_INPUT_TOTAL_HEIGHT(v) \
+ (((v) << 16) & BM_PRE_STORE_ENGINE_SIZE_INPUT_TOTAL_HEIGHT)
+#define BP_PRE_STORE_ENGINE_SIZE_INPUT_TOTAL_WIDTH 0
+#define BM_PRE_STORE_ENGINE_SIZE_INPUT_TOTAL_WIDTH 0x0000FFFF
+#define BF_PRE_STORE_ENGINE_SIZE_INPUT_TOTAL_WIDTH(v) \
+ (((v) << 0) & BM_PRE_STORE_ENGINE_SIZE_INPUT_TOTAL_WIDTH)
+
+#define HW_PRE_STORE_ENGINE_PITCH (0x00000140)
+
+#define BP_PRE_STORE_ENGINE_PITCH_RSVD0 16
+#define BM_PRE_STORE_ENGINE_PITCH_RSVD0 0xFFFF0000
+#define BF_PRE_STORE_ENGINE_PITCH_RSVD0(v) \
+ (((v) << 16) & BM_PRE_STORE_ENGINE_PITCH_RSVD0)
+#define BP_PRE_STORE_ENGINE_PITCH_OUT_PITCH 0
+#define BM_PRE_STORE_ENGINE_PITCH_OUT_PITCH 0x0000FFFF
+#define BF_PRE_STORE_ENGINE_PITCH_OUT_PITCH(v) \
+ (((v) << 0) & BM_PRE_STORE_ENGINE_PITCH_OUT_PITCH)
+
+#define HW_PRE_STORE_ENGINE_ADDR (0x00000150)
+
+#define BP_PRE_STORE_ENGINE_ADDR_OUT_BASE_ADDR 0
+#define BM_PRE_STORE_ENGINE_ADDR_OUT_BASE_ADDR 0xFFFFFFFF
+#define BF_PRE_STORE_ENGINE_ADDR_OUT_BASE_ADDR(v) (v)
+#endif /* __ARCH_ARM___PRE_H */
diff --git a/drivers/mxc/ipu3/pre.c b/drivers/mxc/ipu3/pre.c
new file mode 100644
index 000000000000..88783febdd08
--- /dev/null
+++ b/drivers/mxc/ipu3/pre.c
@@ -0,0 +1,973 @@
+/*
+ * Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/clk.h>
+#include <linux/genalloc.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ipu-v3.h>
+#include <linux/ipu-v3-pre.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pre-regs.h"
+
+struct ipu_pre_data {
+ unsigned int id;
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+
+ struct mutex mutex; /* for in_use */
+ spinlock_t lock; /* for register access */
+
+ struct list_head list;
+
+ struct gen_pool *iram_pool;
+ unsigned long double_buffer_size;
+ unsigned long double_buffer_base;
+ unsigned long double_buffer_paddr;
+
+ bool in_use;
+ bool enabled;
+};
+
+static LIST_HEAD(pre_list);
+static DEFINE_MUTEX(pre_list_lock);
+
+static inline void pre_write(struct ipu_pre_data *pre,
+ u32 value, unsigned int offset)
+{
+ writel(value, pre->base + offset);
+}
+
+static inline u32 pre_read(struct ipu_pre_data *pre, unsigned offset)
+{
+ return readl(pre->base + offset);
+}
+
+static struct ipu_pre_data *get_pre(unsigned int id)
+{
+ struct ipu_pre_data *pre;
+
+ mutex_lock(&pre_list_lock);
+ list_for_each_entry(pre, &pre_list, list) {
+ if (pre->id == id) {
+ mutex_unlock(&pre_list_lock);
+ return pre;
+ }
+ }
+ mutex_unlock(&pre_list_lock);
+
+ return NULL;
+}
+
+int ipu_pre_alloc(int ipu_id, ipu_channel_t channel)
+{
+ struct ipu_pre_data *pre;
+ int i, fixed;
+
+ if (channel == MEM_BG_SYNC) {
+ fixed = ipu_id ? 3 : 0;
+ pre = get_pre(fixed);
+ if (pre) {
+ mutex_lock(&pre->mutex);
+ if (!pre->in_use) {
+ pre->in_use = true;
+ mutex_unlock(&pre->mutex);
+ return pre->id;
+ }
+ mutex_unlock(&pre->mutex);
+ }
+ return pre ? -EBUSY : -ENOENT;
+ }
+
+ for (i = 1; i < 3; i++) {
+ pre = get_pre(i);
+ if (!pre)
+ continue;
+ mutex_lock(&pre->mutex);
+ if (!pre->in_use) {
+ pre->in_use = true;
+ mutex_unlock(&pre->mutex);
+ return pre->id;
+ }
+ mutex_unlock(&pre->mutex);
+ }
+
+ return pre ? -EBUSY : -ENOENT;
+}
+EXPORT_SYMBOL(ipu_pre_alloc);
+
+void ipu_pre_free(unsigned int *id)
+{
+ struct ipu_pre_data *pre;
+
+ pre = get_pre(*id);
+ if (!pre)
+ return;
+
+ mutex_lock(&pre->mutex);
+ pre->in_use = false;
+ mutex_unlock(&pre->mutex);
+
+ *id = -1;
+}
+EXPORT_SYMBOL(ipu_pre_free);
+
+unsigned long ipu_pre_alloc_double_buffer(unsigned int id, unsigned int size)
+{
+ struct ipu_pre_data *pre = get_pre(id);
+
+ if (!pre)
+ return -ENOENT;
+
+ if (!size)
+ return -EINVAL;
+
+ pre->double_buffer_base = gen_pool_alloc(pre->iram_pool, size);
+ if (!pre->double_buffer_base) {
+ dev_err(pre->dev, "double buffer allocate failed\n");
+ return -ENOMEM;
+ }
+ pre->double_buffer_size = size;
+
+ pre->double_buffer_paddr = gen_pool_virt_to_phys(pre->iram_pool,
+ pre->double_buffer_base);
+
+ return pre->double_buffer_paddr;
+}
+EXPORT_SYMBOL(ipu_pre_alloc_double_buffer);
+
+void ipu_pre_free_double_buffer(unsigned int id)
+{
+ struct ipu_pre_data *pre = get_pre(id);
+
+ if (!pre)
+ return;
+
+ if (pre->double_buffer_base) {
+ gen_pool_free(pre->iram_pool,
+ pre->double_buffer_base,
+ pre->double_buffer_size);
+ pre->double_buffer_base = 0;
+ pre->double_buffer_size = 0;
+ pre->double_buffer_paddr = 0;
+ }
+}
+EXPORT_SYMBOL(ipu_pre_free_double_buffer);
+
+/* PRE register configurations */
+static int ipu_pre_set_ctrl(unsigned int id,
+ bool repeat,
+ bool vflip,
+ bool handshake_en,
+ bool hsk_abort_en,
+ unsigned int hsk_line_num,
+ bool sdw_update,
+ unsigned int block_size,
+ unsigned int interlaced,
+ unsigned int prefetch_mode)
+{
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned long lock_flags;
+ int ret = 0;
+
+ if (!pre)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ pre_write(pre, BF_PRE_CTRL_TPR_RESET_SEL(1), HW_PRE_CTRL_SET);
+
+ if (repeat)
+ pre_write(pre, BF_PRE_CTRL_EN_REPEAT(1), HW_PRE_CTRL_SET);
+ else
+ pre_write(pre, BM_PRE_CTRL_EN_REPEAT, HW_PRE_CTRL_CLR);
+
+ if (vflip)
+ pre_write(pre, BF_PRE_CTRL_VFLIP(1), HW_PRE_CTRL_SET);
+ else
+ pre_write(pre, BM_PRE_CTRL_VFLIP, HW_PRE_CTRL_CLR);
+
+ if (handshake_en) {
+ pre_write(pre, BF_PRE_CTRL_HANDSHAKE_EN(1), HW_PRE_CTRL_SET);
+ if (hsk_abort_en)
+ pre_write(pre, BF_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN(1),
+ HW_PRE_CTRL_SET);
+ else
+ pre_write(pre, BM_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN,
+ HW_PRE_CTRL_CLR);
+
+ switch (hsk_line_num) {
+ case 0 /* 4 lines */:
+ pre_write(pre, BM_PRE_CTRL_HANDSHAKE_LINE_NUM,
+ HW_PRE_CTRL_CLR);
+ break;
+ case 1 /* 8 lines */:
+ pre_write(pre, BM_PRE_CTRL_HANDSHAKE_LINE_NUM,
+ HW_PRE_CTRL_CLR);
+ pre_write(pre, BF_PRE_CTRL_HANDSHAKE_LINE_NUM(1),
+ HW_PRE_CTRL_SET);
+ break;
+ case 2 /* 16 lines */:
+ pre_write(pre, BM_PRE_CTRL_HANDSHAKE_LINE_NUM,
+ HW_PRE_CTRL_CLR);
+ pre_write(pre, BF_PRE_CTRL_HANDSHAKE_LINE_NUM(2),
+ HW_PRE_CTRL_SET);
+ break;
+ default:
+ dev_err(pre->dev, "invalid hanshake line number\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ } else
+ pre_write(pre, BM_PRE_CTRL_HANDSHAKE_EN, HW_PRE_CTRL_CLR);
+
+
+ switch (prefetch_mode) {
+ case 0:
+ pre_write(pre, BM_PRE_CTRL_BLOCK_EN, HW_PRE_CTRL_CLR);
+ break;
+ case 1:
+ pre_write(pre, BF_PRE_CTRL_BLOCK_EN(1), HW_PRE_CTRL_SET);
+ switch (block_size) {
+ case 0:
+ pre_write(pre, BM_PRE_CTRL_BLOCK_16, HW_PRE_CTRL_CLR);
+ break;
+ case 1:
+ pre_write(pre, BF_PRE_CTRL_BLOCK_16(1), HW_PRE_CTRL_SET);
+ break;
+ default:
+ dev_err(pre->dev, "invalid block size for pre\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ dev_err(pre->dev, "invalid prefech mode for pre\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (interlaced) {
+ case 0: /* progressive mode */
+ pre_write(pre, BM_PRE_CTRL_SO, HW_PRE_CTRL_CLR);
+ break;
+ case 2: /* interlaced mode: Pal */
+ pre_write(pre, BF_PRE_CTRL_SO(1), HW_PRE_CTRL_SET);
+ pre_write(pre, BM_PRE_CTRL_INTERLACED_FIELD, HW_PRE_CTRL_CLR);
+ break;
+ case 3: /* interlaced mode: NTSC */
+ pre_write(pre, BF_PRE_CTRL_SO(1), HW_PRE_CTRL_SET);
+ pre_write(pre, BF_PRE_CTRL_INTERLACED_FIELD(1), HW_PRE_CTRL_SET);
+ break;
+ default:
+ dev_err(pre->dev, "invalid interlaced or progressive mode\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (sdw_update)
+ pre_write(pre, BF_PRE_CTRL_SDW_UPDATE(1), HW_PRE_CTRL_SET);
+ else
+ pre_write(pre, BM_PRE_CTRL_SDW_UPDATE, HW_PRE_CTRL_CLR);
+
+err:
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+
+ return ret;
+}
+
+static void ipu_pre_irq_mask(struct ipu_pre_data *pre,
+ unsigned long mask, bool clear)
+{
+ if (clear) {
+ pre_write(pre, mask & 0xf, HW_PRE_IRQ_MASK_CLR);
+ return;
+ }
+ pre_write(pre, mask & 0xf, HW_PRE_IRQ_MASK_SET);
+}
+
+static int ipu_pre_buf_set(unsigned int id, unsigned long cur_buf,
+ unsigned long next_buf)
+{
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned long lock_flags;
+
+ if (!pre)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ pre_write(pre, cur_buf, HW_PRE_CUR_BUF);
+ pre_write(pre, next_buf, HW_PRE_NEXT_BUF);
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+
+ return 0;
+}
+
+static int ipu_pre_plane_buf_off_set(unsigned int id,
+ unsigned long sec_buf_off,
+ unsigned long trd_buf_off)
+{
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned long lock_flags;
+
+ if (!pre || sec_buf_off & BM_PRE_U_BUF_OFFSET_RSVD0 ||
+ trd_buf_off & BM_PRE_V_BUF_OFFSET_RSVD0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ pre_write(pre, sec_buf_off, HW_PRE_U_BUF_OFFSET);
+ pre_write(pre, trd_buf_off, HW_PRE_V_BUF_OFFSET);
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+
+ return 0;
+}
+
+static int ipu_pre_tpr_set(unsigned int id, unsigned int tile_fmt)
+{
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned long lock_flags;
+ unsigned int tpr_ctrl, fmt;
+
+ if (!pre)
+ return -EINVAL;
+
+ switch (tile_fmt) {
+ case 0x0: /* Bypass */
+ fmt = BF_PRE_TPR_CTRL_TILE_FORMAT(0x0);
+ break;
+ case IPU_PIX_FMT_GPU32_SB_ST:
+ fmt = BF_PRE_TPR_CTRL_TILE_FORMAT(0x10);
+ break;
+ case IPU_PIX_FMT_GPU16_SB_ST:
+ fmt = BF_PRE_TPR_CTRL_TILE_FORMAT(0x11);
+ break;
+ case IPU_PIX_FMT_GPU32_ST:
+ fmt = BF_PRE_TPR_CTRL_TILE_FORMAT(0x20);
+ break;
+ case IPU_PIX_FMT_GPU16_ST:
+ fmt = BF_PRE_TPR_CTRL_TILE_FORMAT(0x21);
+ break;
+ case IPU_PIX_FMT_GPU32_SB_SRT:
+ fmt = BF_PRE_TPR_CTRL_TILE_FORMAT(0x50);
+ break;
+ case IPU_PIX_FMT_GPU16_SB_SRT:
+ fmt = BF_PRE_TPR_CTRL_TILE_FORMAT(0x51);
+ break;
+ case IPU_PIX_FMT_GPU32_SRT:
+ fmt = BF_PRE_TPR_CTRL_TILE_FORMAT(0x60);
+ break;
+ case IPU_PIX_FMT_GPU16_SRT:
+ fmt = BF_PRE_TPR_CTRL_TILE_FORMAT(0x61);
+ break;
+ default:
+ dev_err(pre->dev, "invalid tile fmt for pre\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ tpr_ctrl = pre_read(pre, HW_PRE_TPR_CTRL);
+ tpr_ctrl &= ~BM_PRE_TPR_CTRL_TILE_FORMAT;
+ tpr_ctrl |= fmt;
+ pre_write(pre, tpr_ctrl, HW_PRE_TPR_CTRL);
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+
+ return 0;
+}
+
+static int ipu_pre_set_shift(int id, unsigned int offset, unsigned int width)
+{
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned long lock_flags;
+
+ if (!pre)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ pre_write(pre, offset, HW_PRE_PREFETCH_ENGINE_SHIFT_OFFSET);
+ pre_write(pre, width, HW_PRE_PREFETCH_ENGINE_SHIFT_WIDTH);
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+
+ return 0;
+}
+
+static int ipu_pre_prefetch(unsigned int id,
+ unsigned int read_burst,
+ unsigned int input_bpp,
+ unsigned int input_pixel_fmt,
+ bool shift_bypass,
+ bool field_inverse,
+ bool tpr_coor_offset_en,
+ struct ipu_rect output_size,
+ unsigned int input_width,
+ unsigned int input_height,
+ unsigned int input_active_width,
+ unsigned int interlaced,
+ int interlace_offset)
+{
+ unsigned int prefetch_ctrl = 0;
+ unsigned int input_y_pitch = 0, input_uv_pitch = 0;
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned long lock_flags;
+
+ if (!pre)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_PREFETCH_EN(1);
+ switch (read_burst) {
+ case 0x0:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES(0x0);
+ break;
+ case 0x1:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES(0x1);
+ break;
+ case 0x2:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES(0x2);
+ break;
+ case 0x3:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES(0x3);
+ break;
+ case 0x4:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_RD_NUM_BYTES(0x4);
+ break;
+ default:
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+ dev_err(pre->dev, "invalid read burst for prefetch engine\n");
+ return -EINVAL;
+ }
+
+ switch (input_bpp) {
+ case 8:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP(0x0);
+ break;
+ case 16:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP(0x1);
+ break;
+ case 32:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP(0x2);
+ break;
+ case 64:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_ACTIVE_BPP(0x3);
+ break;
+ default:
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+ dev_err(pre->dev, "invalid input bpp for prefetch engine\n");
+ return -EINVAL;
+ }
+
+ switch (input_pixel_fmt) {
+ case 0x1: /* tile */
+ case 0x0: /* generic data */
+ case IPU_PIX_FMT_RGB666:
+ case IPU_PIX_FMT_RGB565:
+ case IPU_PIX_FMT_BGRA4444:
+ case IPU_PIX_FMT_BGRA5551:
+ case IPU_PIX_FMT_BGR24:
+ case IPU_PIX_FMT_RGB24:
+ case IPU_PIX_FMT_GBR24:
+ case IPU_PIX_FMT_BGR32:
+ case IPU_PIX_FMT_BGRA32:
+ case IPU_PIX_FMT_RGB32:
+ case IPU_PIX_FMT_RGBA32:
+ case IPU_PIX_FMT_ABGR32:
+ case IPU_PIX_FMT_YUYV:
+ case IPU_PIX_FMT_UYVY:
+ case IPU_PIX_FMT_YUV444:
+ case IPU_PIX_FMT_AYUV:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT(0x0);
+ input_y_pitch = input_width * (input_bpp >> 3);
+ if (interlaced && input_pixel_fmt != 0x1)
+ input_y_pitch *= 2;
+ break;
+ case IPU_PIX_FMT_YUV444P:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT(0x1);
+ input_y_pitch = input_width;
+ input_uv_pitch = input_width;
+ break;
+ case IPU_PIX_FMT_YUV422P:
+ case IPU_PIX_FMT_YVU422P:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT(0x2);
+ input_y_pitch = input_width;
+ input_uv_pitch = input_width >> 1;
+ break;
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YUV420P:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT(0x3);
+ input_y_pitch = input_width;
+ input_uv_pitch = input_width >> 1;
+ break;
+ case PRE_PIX_FMT_NV61:
+ prefetch_ctrl |= BM_PRE_PREFETCH_ENGINE_CTRL_PARTIAL_UV_SWAP;
+ case IPU_PIX_FMT_NV16:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT(0x4);
+ input_y_pitch = input_width;
+ input_uv_pitch = input_width;
+ break;
+ case PRE_PIX_FMT_NV21:
+ prefetch_ctrl |= BM_PRE_PREFETCH_ENGINE_CTRL_PARTIAL_UV_SWAP;
+ case IPU_PIX_FMT_NV12:
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_INPUT_PIXEL_FORMAT(0x5);
+ input_y_pitch = input_width;
+ input_uv_pitch = input_width;
+ break;
+ default:
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+ dev_err(pre->dev, "invalid input pixel format for prefetch engine\n");
+ return -EINVAL;
+ }
+
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_SHIFT_BYPASS(shift_bypass ? 1 : 0);
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_FIELD_INVERSE(field_inverse ? 1 : 0);
+ prefetch_ctrl |= BF_PRE_PREFETCH_ENGINE_CTRL_TPR_COOR_OFFSET_EN(tpr_coor_offset_en ? 1 : 0);
+
+ pre_write(pre, BF_PRE_PREFETCH_ENGINE_INPUT_SIZE_INPUT_WIDTH(input_active_width) |
+ BF_PRE_PREFETCH_ENGINE_INPUT_SIZE_INPUT_HEIGHT(input_height),
+ HW_PRE_PREFETCH_ENGINE_INPUT_SIZE);
+
+ if (tpr_coor_offset_en)
+ pre_write(pre, BF_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_X(output_size.left) |
+ BF_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_Y(output_size.top),
+ HW_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC);
+
+ pre_write(pre, BF_PRE_PREFETCH_ENGINE_PITCH_INPUT_Y_PITCH(input_y_pitch) |
+ BF_PRE_PREFETCH_ENGINE_PITCH_INPUT_UV_PITCH(input_uv_pitch),
+ HW_PRE_PREFETCH_ENGINE_PITCH);
+
+ pre_write(pre, BF_PRE_PREFETCH_ENGINE_INTERLACE_OFFSET_INTERLACE_OFFSET(interlace_offset), HW_PRE_PREFETCH_ENGINE_INTERLACE_OFFSET);
+
+ pre_write(pre, prefetch_ctrl, HW_PRE_PREFETCH_ENGINE_CTRL);
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+
+ return 0;
+}
+
+static int ipu_pre_store(unsigned int id,
+ bool store_en,
+ unsigned int write_burst,
+ unsigned int output_bpp,
+ /* this means the output
+ * width by prefetch
+ */
+ unsigned int input_width,
+ unsigned int input_height,
+ unsigned int out_pitch,
+ unsigned int output_addr)
+{
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned int store_ctrl = 0;
+ unsigned long lock_flags;
+
+ if (!pre)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ store_ctrl |= BF_PRE_STORE_ENGINE_CTRL_STORE_EN(store_en ? 1 : 0);
+
+ if (store_en) {
+ switch (write_burst) {
+ case 0x0:
+ store_ctrl |= BF_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES(0x0);
+ break;
+ case 0x1:
+ store_ctrl |= BF_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES(0x1);
+ break;
+ case 0x2:
+ store_ctrl |= BF_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES(0x2);
+ break;
+ case 0x3:
+ store_ctrl |= BF_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES(0x3);
+ break;
+ case 0x4:
+ store_ctrl |= BF_PRE_STORE_ENGINE_CTRL_WR_NUM_BYTES(0x4);
+ break;
+ default:
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+ dev_err(pre->dev, "invalid write burst value for store engine\n");
+ return -EINVAL;
+ }
+
+ switch (output_bpp) {
+ case 8:
+ store_ctrl |= BF_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP(0x0);
+ break;
+ case 16:
+ store_ctrl |= BF_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP(0x1);
+ break;
+ case 32:
+ store_ctrl |= BF_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP(0x2);
+ break;
+ case 64:
+ store_ctrl |= BF_PRE_STORE_ENGINE_CTRL_OUTPUT_ACTIVE_BPP(0x3);
+ break;
+ default:
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+ dev_err(pre->dev, "invalid ouput bpp for store engine\n");
+ return -EINVAL;
+ }
+
+ pre_write(pre, BF_PRE_STORE_ENGINE_SIZE_INPUT_TOTAL_WIDTH(input_width) |
+ BF_PRE_STORE_ENGINE_SIZE_INPUT_TOTAL_HEIGHT(input_height),
+ HW_PRE_STORE_ENGINE_SIZE);
+
+ pre_write(pre, BF_PRE_STORE_ENGINE_PITCH_OUT_PITCH(out_pitch),
+ HW_PRE_STORE_ENGINE_PITCH);
+
+ pre_write(pre, BF_PRE_STORE_ENGINE_ADDR_OUT_BASE_ADDR(output_addr),
+ HW_PRE_STORE_ENGINE_ADDR);
+ }
+
+ pre_write(pre, store_ctrl, HW_PRE_STORE_ENGINE_CTRL);
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+
+ return 0;
+}
+/* End */
+
+static irqreturn_t ipu_pre_irq_handle(int irq, void *dev_id)
+{
+ struct ipu_pre_data *pre = dev_id;
+ unsigned int irq_stat, axi_id = 0;
+
+ spin_lock(&pre->lock);
+ irq_stat = pre_read(pre, HW_PRE_IRQ);
+
+ if (irq_stat & BM_PRE_IRQ_HANDSHAKE_ABORT_IRQ) {
+ dev_warn(pre->dev, "handshake abort\n");
+ pre_write(pre, BM_PRE_IRQ_HANDSHAKE_ABORT_IRQ, HW_PRE_IRQ_CLR);
+ }
+
+ if (irq_stat & BM_PRE_IRQ_TPR_RD_NUM_BYTES_OVFL_IRQ) {
+ dev_warn(pre->dev, "tpr read num bytes overflow\n");
+ pre_write(pre, BM_PRE_IRQ_TPR_RD_NUM_BYTES_OVFL_IRQ,
+ HW_PRE_IRQ_CLR);
+ }
+
+ if (irq_stat & BM_PRE_IRQ_HANDSHAKE_ERROR_IRQ) {
+ dev_warn(pre->dev, "handshake error\n");
+ pre_write(pre, BM_PRE_IRQ_HANDSHAKE_ERROR_IRQ, HW_PRE_IRQ_CLR);
+ }
+
+ axi_id = (irq_stat & BM_PRE_IRQ_AXI_ERROR_ID) >>
+ BP_PRE_IRQ_AXI_ERROR_ID;
+ if (irq_stat & BM_PRE_IRQ_AXI_WRITE_ERROR) {
+ dev_warn(pre->dev, "AXI%d write error\n", axi_id);
+ pre_write(pre, BM_PRE_IRQ_AXI_WRITE_ERROR, HW_PRE_IRQ_CLR);
+ }
+
+ if (irq_stat & BM_PRE_IRQ_AXI_READ_ERROR) {
+ dev_warn(pre->dev, "AXI%d read error\n", axi_id);
+ pre_write(pre, BM_PRE_IRQ_AXI_READ_ERROR, HW_PRE_IRQ_CLR);
+ }
+ spin_unlock(&pre->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void ipu_pre_out_of_reset(unsigned int id)
+{
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned long lock_flags;
+
+ if (!pre)
+ return;
+
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ pre_write(pre, BF_PRE_CTRL_SFTRST(1) | BF_PRE_CTRL_CLKGATE(1),
+ HW_PRE_CTRL_CLR);
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+}
+
+int ipu_pre_config(int id, struct ipu_pre_context *config)
+{
+ int ret = 0;
+ struct ipu_pre_data *pre = get_pre(id);
+
+ if (!config || !pre)
+ return -EINVAL;
+
+ config->store_addr = pre->double_buffer_paddr;
+
+ if (!pre->enabled)
+ clk_prepare_enable(pre->clk);
+
+ ipu_pre_out_of_reset(id);
+
+ ret = ipu_pre_plane_buf_off_set(id, config->sec_buf_off,
+ config->trd_buf_off);
+ if (ret < 0)
+ goto out;
+
+ ret = ipu_pre_tpr_set(id, config->tile_fmt);
+ if (ret < 0)
+ goto out;
+
+ ret = ipu_pre_buf_set(id, config->cur_buf, config->next_buf);
+ if (ret < 0)
+ goto out;
+
+ ret = ipu_pre_set_shift(id, config->prefetch_shift_offset,
+ config->prefetch_shift_width);
+ if (ret < 0)
+ goto out;
+
+ ret = ipu_pre_prefetch(id, config->read_burst, config->prefetch_input_bpp,
+ config->prefetch_input_pixel_fmt, config->shift_bypass,
+ config->field_inverse, config->tpr_coor_offset_en,
+ config->prefetch_output_size, config->prefetch_input_width,
+ config->prefetch_input_height,
+ config->prefetch_input_active_width,
+ config->interlaced,
+ config->interlace_offset);
+ if (ret < 0)
+ goto out;
+
+ ret = ipu_pre_store(id, config->store_en,
+ config->write_burst, config->store_output_bpp,
+ config->prefetch_output_size.width, config->prefetch_output_size.height,
+ config->store_pitch,
+ config->store_addr);
+ if (ret < 0)
+ goto out;
+
+ ret = ipu_pre_set_ctrl(id, config->repeat,
+ config->vflip, config->handshake_en,
+ config->hsk_abort_en, config->hsk_line_num,
+ config->sdw_update, config->block_size,
+ config->interlaced, config->prefetch_mode);
+
+ ipu_pre_irq_mask(pre, BM_PRE_IRQ_HANDSHAKE_ABORT_IRQ |
+ BM_PRE_IRQ_TPR_RD_NUM_BYTES_OVFL_IRQ |
+ BM_PRE_IRQ_HANDSHAKE_ERROR_IRQ, false);
+out:
+ if (!pre->enabled)
+ clk_disable_unprepare(pre->clk);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipu_pre_config);
+
+int ipu_pre_enable(int id)
+{
+ int ret = 0;
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned long lock_flags;
+
+ if (!pre)
+ return -EINVAL;
+
+ if (pre->enabled)
+ return 0;
+
+ clk_prepare_enable(pre->clk);
+
+ /* start the pre engine */
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ pre_write(pre, BF_PRE_CTRL_ENABLE(1), HW_PRE_CTRL_SET);
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+
+ pre->enabled = true;
+
+ return ret;
+}
+EXPORT_SYMBOL(ipu_pre_enable);
+
+int ipu_pre_sdw_update(int id)
+{
+ int ret = 0;
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned long lock_flags;
+
+ if (!pre)
+ return -EINVAL;
+
+ if (!pre->enabled)
+ clk_prepare_enable(pre->clk);
+
+ /* start the pre engine */
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ pre_write(pre, BF_PRE_CTRL_SDW_UPDATE(1), HW_PRE_CTRL_SET);
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+
+ if (!pre->enabled)
+ clk_disable_unprepare(pre->clk);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipu_pre_sdw_update);
+
+void ipu_pre_disable(int id)
+{
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned long lock_flags;
+
+ if (!pre)
+ return;
+
+ if (!pre->enabled)
+ return;
+
+ /* stop the pre engine */
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ pre_write(pre, BF_PRE_CTRL_ENABLE(1), HW_PRE_CTRL_CLR);
+ pre_write(pre, BF_PRE_CTRL_SDW_UPDATE(1), HW_PRE_CTRL_SET);
+ pre_write(pre, BF_PRE_CTRL_SFTRST(1), HW_PRE_CTRL_SET);
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+
+ clk_disable_unprepare(pre->clk);
+
+ pre->enabled = false;
+}
+EXPORT_SYMBOL(ipu_pre_disable);
+
+int ipu_pre_set_fb_buffer(int id, unsigned long fb_paddr,
+ unsigned int x_crop,
+ unsigned int y_crop,
+ unsigned int sec_buf_off,
+ unsigned int trd_buf_off)
+{
+ struct ipu_pre_data *pre = get_pre(id);
+ unsigned long lock_flags;
+
+ if (!pre)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pre->lock, lock_flags);
+ pre_write(pre, fb_paddr, HW_PRE_NEXT_BUF);
+ pre_write(pre, sec_buf_off, HW_PRE_U_BUF_OFFSET);
+ pre_write(pre, trd_buf_off, HW_PRE_V_BUF_OFFSET);
+ pre_write(pre, BF_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_X(x_crop) |
+ BF_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC_OUTPUT_SIZE_ULC_Y(y_crop),
+ HW_PRE_PREFETCH_ENGINE_OUTPUT_SIZE_ULC);
+ pre_write(pre, BF_PRE_CTRL_SDW_UPDATE(1), HW_PRE_CTRL_SET);
+ spin_unlock_irqrestore(&pre->lock, lock_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_pre_set_fb_buffer);
+
+static int ipu_pre_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ipu_pre_data *pre;
+ struct resource *res;
+ int id, irq, err;
+
+ pre = devm_kzalloc(&pdev->dev, sizeof(*pre), GFP_KERNEL);
+ if (!pre)
+ return -ENOMEM;
+ pre->dev = &pdev->dev;
+
+ id = of_alias_get_id(np, "pre");
+ if (id < 0) {
+ dev_err(&pdev->dev, "failed to get PRE id\n");
+ return id;
+ }
+ pre->id = id;
+
+ mutex_init(&pre->mutex);
+ spin_lock_init(&pre->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pre->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pre->base))
+ return PTR_ERR(pre->base);
+
+ pre->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pre->clk)) {
+ dev_err(&pdev->dev, "failed to get the pre clk\n");
+ return PTR_ERR(pre->clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ err = devm_request_irq(&pdev->dev, irq, ipu_pre_irq_handle,
+ IRQF_TRIGGER_RISING, pdev->name, pre);
+ if (err) {
+ dev_err(&pdev->dev, "failed to request pre irq\n");
+ return err;
+ }
+
+ pre->iram_pool = of_get_named_gen_pool(pdev->dev.of_node, "ocram", 0);
+ if (!pre->iram_pool) {
+ dev_err(&pdev->dev, "no iram exist for pre\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&pre_list_lock);
+ list_add_tail(&pre->list, &pre_list);
+ mutex_unlock(&pre_list_lock);
+
+ ipu_pre_alloc_double_buffer(pre->id, IPU_PRE_MAX_WIDTH * 8 * IPU_PRE_MAX_BPP);
+
+ /* PRE GATE ON */
+ clk_prepare_enable(pre->clk);
+ pre_write(pre, BF_PRE_CTRL_SFTRST(1) | BF_PRE_CTRL_CLKGATE(1),
+ HW_PRE_CTRL_CLR);
+ pre_write(pre, 0xf, HW_PRE_IRQ_MASK);
+ clk_disable_unprepare(pre->clk);
+
+ platform_set_drvdata(pdev, pre);
+
+ dev_info(&pdev->dev, "driver probed\n");
+
+ return 0;
+}
+
+static int ipu_pre_remove(struct platform_device *pdev)
+{
+ struct ipu_pre_data *pre = platform_get_drvdata(pdev);
+
+ if (pre->iram_pool && pre->double_buffer_base) {
+ gen_pool_free(pre->iram_pool,
+ pre->double_buffer_base,
+ pre->double_buffer_size);
+ }
+
+ mutex_lock(&pre_list_lock);
+ list_del(&pre->list);
+ mutex_unlock(&pre_list_lock);
+
+ return 0;
+}
+
+static const struct of_device_id imx_ipu_pre_dt_ids[] = {
+ { .compatible = "fsl,imx6q-pre", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_ipu_pre_dt_ids);
+
+static struct platform_driver ipu_pre_driver = {
+ .driver = {
+ .name = "imx-pre",
+ .of_match_table = of_match_ptr(imx_ipu_pre_dt_ids),
+ },
+ .probe = ipu_pre_probe,
+ .remove = ipu_pre_remove,
+};
+
+static int __init ipu_pre_init(void)
+{
+ return platform_driver_register(&ipu_pre_driver);
+}
+subsys_initcall(ipu_pre_init);
+
+static void __exit ipu_pre_exit(void)
+{
+ platform_driver_unregister(&ipu_pre_driver);
+}
+module_exit(ipu_pre_exit);
+
+MODULE_DESCRIPTION("i.MX PRE driver");
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mxc/ipu3/prg-regs.h b/drivers/mxc/ipu3/prg-regs.h
new file mode 100644
index 000000000000..bca0e560a6f9
--- /dev/null
+++ b/drivers/mxc/ipu3/prg-regs.h
@@ -0,0 +1,70 @@
+/*
+ * Freescale IPU PRG Register Definitions
+ *
+ * Copyright 2014-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/bitops.h>
+
+#ifndef __IPU_PRG_H__
+#define __IPU_PRG_H__
+
+#define IPU_PR_CTRL 0x00
+#define IPU_PR_STATUS 0x04
+#define IPU_PR_QOS 0x08
+#define IPU_PR_REG_UPDATE 0x0c
+#define IPU_PR_STRIDE(ch) (0x10 + (ch) * 4)
+#define IPU_PR_CROP_LINE 0x1c
+#define IPU_PR_ADDR_THD 0x20
+#define IPU_PR_CH_BADDR(ch) (0x24 + (ch) * 4)
+#define IPU_PR_CH_OFFSET(ch) (0x30 + (ch) * 4)
+#define IPU_PR_CH_ILO(ch) (0x3c + (ch) * 4)
+#define IPU_PR_CH_HEIGHT(ch) (0x48 + (ch) * 4)
+
+#define IPU_PR_CTRL_CH_BYPASS(ch) (0x1 << (ch))
+#define IPU_PR_CTRL_SOFT_CH_ARID(ch, n) ((n) << ((ch) * 2 + 8))
+#define IPU_PR_CTRL_SOFT_CH_ARID_MASK(ch) (0x3 << ((ch) * 2 + 8))
+#define IPU_PR_CTRL_CH_SO(ch, interlace) ((interlace) << ((ch) + 16))
+#define IPU_PR_CTRL_CH_SO_MASK(ch) (0x1 << ((ch) + 16))
+#define IPU_PR_CTRL_CH_VFLIP(ch, vflip) ((vflip) << ((ch) + 19))
+#define IPU_PR_CTRL_CH_VFLIP_MASK(ch) (0x1 << ((ch) + 19))
+#define IPU_PR_CTRL_CH_BLOCK_MODE(ch, mode) ((mode) << ((ch) + 22))
+#define IPU_PR_CTRL_CH_BLOCK_MODE_MASK(ch) (0x1 << ((ch) + 22))
+#define IPU_PR_CTRL_CH_CNT_LOAD_EN(ch) (0x1 << ((ch) + 25))
+#define IPU_PR_CTRL_CH_CNT_LOAD_EN_MASK (0x7 << 25)
+#define IPU_PR_CTRL_SOFTRST BIT(30)
+#define IPU_PR_CTRL_SHADOW_EN BIT(31)
+
+#define IPU_PR_STATUS_BUF_RDY(ch, buf) (1 << ((ch) * 2 + (buf)))
+
+#define IPU_PR_QOS_PRI(id, qos) ((qos) << ((id) * 4))
+#define IPU_PR_QOS_MASK(id) (0xf << ((id) * 4))
+
+#define IPU_PR_REG_UPDATE_EN BIT(0)
+
+#define IPU_PR_STRIDE_MASK 0x3fff
+
+#define IPU_PR_CROP_LINE_NUM(ch, n) ((n) << ((ch) * 4))
+#define IPU_PR_CROP_LINE_MASK(ch) (0xf << ((ch) * 4))
+
+#define IPU_PR_ADDR_THD_MASK 0xffffffff
+
+#define IPU_PR_CH_BADDR_MASK 0xffffffff
+
+#define IPU_PR_CH_OFFSET_MASK 0xffffffff
+
+#define IPU_PR_CH_ILO_MASK 0x007fffff
+#define IPU_PR_CH_ILO_NUM(ilo) ((ilo) & IPU_PR_CH_ILO_MASK)
+
+#define IPU_PR_CH_HEIGHT_MASK 0x00000fff
+#define IPU_PR_CH_HEIGHT_NUM(fh) (((fh) - 1) & IPU_PR_CH_HEIGHT_MASK)
+#define IPU_PR_CH_IPU_HEIGHT_MASK 0x0fff0000
+#define IPU_PR_CH_IPU_HEIGHT_NUM(fh) ((((fh) - 1) << 16) & IPU_PR_CH_IPU_HEIGHT_MASK)
+
+#endif /* __IPU_PRG_H__ */
diff --git a/drivers/mxc/ipu3/prg.c b/drivers/mxc/ipu3/prg.c
new file mode 100644
index 000000000000..45363a843392
--- /dev/null
+++ b/drivers/mxc/ipu3/prg.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ipu-v3-prg.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "prg-regs.h"
+
+#define PRG_CHAN_NUM 3
+
+struct prg_chan {
+ unsigned int pre_num;
+ struct mutex mutex; /* for in_use */
+ bool in_use;
+};
+
+struct ipu_prg_data {
+ unsigned int id;
+ void __iomem *base;
+ unsigned long memory;
+ struct clk *axi_clk;
+ struct clk *apb_clk;
+ struct list_head list;
+ struct device *dev;
+ struct prg_chan chan[PRG_CHAN_NUM];
+ struct regmap *regmap;
+ struct regmap_field *pre_prg_sel[2];
+ spinlock_t lock;
+};
+
+static LIST_HEAD(prg_list);
+static DEFINE_MUTEX(prg_lock);
+
+static inline void prg_write(struct ipu_prg_data *prg,
+ u32 value, unsigned int offset)
+{
+ writel(value, prg->base + offset);
+}
+
+static inline u32 prg_read(struct ipu_prg_data *prg, unsigned offset)
+{
+ return readl(prg->base + offset);
+}
+
+static struct ipu_prg_data *get_prg(unsigned int ipu_id)
+{
+ struct ipu_prg_data *prg;
+
+ mutex_lock(&prg_lock);
+ list_for_each_entry(prg, &prg_list, list) {
+ if (prg->id == ipu_id) {
+ mutex_unlock(&prg_lock);
+ return prg;
+ }
+ }
+ mutex_unlock(&prg_lock);
+
+ return NULL;
+}
+
+static int assign_prg_chan(struct ipu_prg_data *prg, unsigned int pre_num,
+ ipu_channel_t ipu_ch)
+{
+ int prg_ch;
+
+ if (!prg)
+ return -EINVAL;
+
+ switch (ipu_ch) {
+ case MEM_BG_SYNC:
+ prg_ch = 0;
+ break;
+ case MEM_FG_SYNC:
+ prg_ch = 1;
+ break;
+ case MEM_DC_SYNC:
+ prg_ch = 2;
+ break;
+ default:
+ dev_err(prg->dev, "wrong ipu channel type\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&prg->chan[prg_ch].mutex);
+ if (!prg->chan[prg_ch].in_use) {
+ prg->chan[prg_ch].in_use = true;
+ prg->chan[prg_ch].pre_num = pre_num;
+
+ if (prg_ch != 0) {
+ unsigned int pmux, psel; /* primary */
+ unsigned int smux, ssel; /* secondary */
+ struct regmap_field *pfield, *sfield;
+
+ psel = pre_num - 1;
+ ssel = psel ? 0 : 1;
+
+ pfield = prg->pre_prg_sel[psel];
+ sfield = prg->pre_prg_sel[ssel];
+ pmux = (prg_ch - 1) + (prg->id << 1);
+
+ mutex_lock(&prg_lock);
+ regmap_field_write(pfield, pmux);
+
+ /*
+ * PRE1 and PRE2 cannot bind with a same channel of
+ * one PRG even if one of the two PREs is disabled.
+ */
+ regmap_field_read(sfield, &smux);
+ if (smux == pmux) {
+ smux = pmux ^ 0x1;
+ regmap_field_write(sfield, smux);
+ }
+ mutex_unlock(&prg_lock);
+ }
+ mutex_unlock(&prg->chan[prg_ch].mutex);
+ dev_dbg(prg->dev, "bind prg%u ch%d with pre%u\n",
+ prg->id, prg_ch, pre_num);
+ return prg_ch;
+ }
+ mutex_unlock(&prg->chan[prg_ch].mutex);
+ return -EBUSY;
+}
+
+static inline int get_prg_chan(struct ipu_prg_data *prg, unsigned int pre_num)
+{
+ int i;
+
+ if (!prg)
+ return -EINVAL;
+
+ for (i = 0; i < PRG_CHAN_NUM; i++) {
+ mutex_lock(&prg->chan[i].mutex);
+ if (prg->chan[i].in_use &&
+ prg->chan[i].pre_num == pre_num) {
+ mutex_unlock(&prg->chan[i].mutex);
+ return i;
+ }
+ mutex_unlock(&prg->chan[i].mutex);
+ }
+ return -ENOENT;
+}
+
+int ipu_prg_config(struct ipu_prg_config *config)
+{
+ struct ipu_prg_data *prg = get_prg(config->id);
+ struct ipu_soc *ipu = ipu_get_soc(config->id);
+ int prg_ch, axi_id;
+ u32 reg;
+
+ if (!prg || config->crop_line > 3 || !ipu)
+ return -EINVAL;
+
+ if (config->height & ~IPU_PR_CH_HEIGHT_MASK)
+ return -EINVAL;
+
+ prg_ch = assign_prg_chan(prg, config->pre_num, config->ipu_ch);
+ if (prg_ch < 0)
+ return prg_ch;
+
+ axi_id = ipu_ch_param_get_axi_id(ipu, config->ipu_ch, IPU_INPUT_BUFFER);
+
+ clk_prepare_enable(prg->axi_clk);
+ clk_prepare_enable(prg->apb_clk);
+
+ spin_lock(&prg->lock);
+ /* clear all load enable to impact other channels */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg &= ~IPU_PR_CTRL_CH_CNT_LOAD_EN_MASK;
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* counter load enable */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg |= IPU_PR_CTRL_CH_CNT_LOAD_EN(prg_ch);
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* AXI ID */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg &= ~IPU_PR_CTRL_SOFT_CH_ARID_MASK(prg_ch);
+ reg |= IPU_PR_CTRL_SOFT_CH_ARID(prg_ch, axi_id);
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* so */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg &= ~IPU_PR_CTRL_CH_SO_MASK(prg_ch);
+ reg |= IPU_PR_CTRL_CH_SO(prg_ch, config->so);
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* vflip */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg &= ~IPU_PR_CTRL_CH_VFLIP_MASK(prg_ch);
+ reg |= IPU_PR_CTRL_CH_VFLIP(prg_ch, config->vflip);
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* block mode */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg &= ~IPU_PR_CTRL_CH_BLOCK_MODE_MASK(prg_ch);
+ reg |= IPU_PR_CTRL_CH_BLOCK_MODE(prg_ch, config->block_mode);
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* disable bypass */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg &= ~IPU_PR_CTRL_CH_BYPASS(prg_ch);
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* stride */
+ reg = prg_read(prg, IPU_PR_STRIDE(prg_ch));
+ reg &= ~IPU_PR_STRIDE_MASK;
+ reg |= config->stride - 1;
+ prg_write(prg, reg, IPU_PR_STRIDE(prg_ch));
+
+ /* ilo */
+ reg = prg_read(prg, IPU_PR_CH_ILO(prg_ch));
+ reg &= ~IPU_PR_CH_ILO_MASK;
+ reg |= IPU_PR_CH_ILO_NUM(config->ilo);
+ prg_write(prg, reg, IPU_PR_CH_ILO(prg_ch));
+
+ /* height */
+ reg = prg_read(prg, IPU_PR_CH_HEIGHT(prg_ch));
+ reg &= ~IPU_PR_CH_HEIGHT_MASK;
+ reg |= IPU_PR_CH_HEIGHT_NUM(config->height);
+ prg_write(prg, reg, IPU_PR_CH_HEIGHT(prg_ch));
+
+ /* ipu height */
+ reg = prg_read(prg, IPU_PR_CH_HEIGHT(prg_ch));
+ reg &= ~IPU_PR_CH_IPU_HEIGHT_MASK;
+ reg |= IPU_PR_CH_IPU_HEIGHT_NUM(config->ipu_height);
+ prg_write(prg, reg, IPU_PR_CH_HEIGHT(prg_ch));
+
+ /* crop */
+ reg = prg_read(prg, IPU_PR_CROP_LINE);
+ reg &= ~IPU_PR_CROP_LINE_MASK(prg_ch);
+ reg |= IPU_PR_CROP_LINE_NUM(prg_ch, config->crop_line);
+ prg_write(prg, reg, IPU_PR_CROP_LINE);
+
+ /* buffer address */
+ reg = prg_read(prg, IPU_PR_CH_BADDR(prg_ch));
+ reg &= ~IPU_PR_CH_BADDR_MASK;
+ reg |= config->baddr;
+ prg_write(prg, reg, IPU_PR_CH_BADDR(prg_ch));
+
+ /* offset */
+ reg = prg_read(prg, IPU_PR_CH_OFFSET(prg_ch));
+ reg &= ~IPU_PR_CH_OFFSET_MASK;
+ reg |= config->offset;
+ prg_write(prg, reg, IPU_PR_CH_OFFSET(prg_ch));
+
+ /* threshold */
+ reg = prg_read(prg, IPU_PR_ADDR_THD);
+ reg &= ~IPU_PR_ADDR_THD_MASK;
+ reg |= prg->memory;
+ prg_write(prg, reg, IPU_PR_ADDR_THD);
+
+ /* shadow enable */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg |= IPU_PR_CTRL_SHADOW_EN;
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* register update */
+ reg = prg_read(prg, IPU_PR_REG_UPDATE);
+ reg |= IPU_PR_REG_UPDATE_EN;
+ prg_write(prg, reg, IPU_PR_REG_UPDATE);
+ spin_unlock(&prg->lock);
+
+ clk_disable_unprepare(prg->apb_clk);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_prg_config);
+
+int ipu_prg_disable(unsigned int ipu_id, unsigned int pre_num)
+{
+ struct ipu_prg_data *prg = get_prg(ipu_id);
+ int prg_ch;
+ u32 reg;
+
+ if (!prg)
+ return -EINVAL;
+
+ prg_ch = get_prg_chan(prg, pre_num);
+ if (prg_ch < 0)
+ return prg_ch;
+
+ clk_prepare_enable(prg->apb_clk);
+
+ spin_lock(&prg->lock);
+ /* clear all load enable to impact other channels */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg &= ~IPU_PR_CTRL_CH_CNT_LOAD_EN_MASK;
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* counter load enable */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg |= IPU_PR_CTRL_CH_CNT_LOAD_EN(prg_ch);
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* enable bypass */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg |= IPU_PR_CTRL_CH_BYPASS(prg_ch);
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* shadow enable */
+ reg = prg_read(prg, IPU_PR_CTRL);
+ reg |= IPU_PR_CTRL_SHADOW_EN;
+ prg_write(prg, reg, IPU_PR_CTRL);
+
+ /* register update */
+ reg = prg_read(prg, IPU_PR_REG_UPDATE);
+ reg |= IPU_PR_REG_UPDATE_EN;
+ prg_write(prg, reg, IPU_PR_REG_UPDATE);
+ spin_unlock(&prg->lock);
+
+ clk_disable_unprepare(prg->apb_clk);
+ clk_disable_unprepare(prg->axi_clk);
+
+ mutex_lock(&prg->chan[prg_ch].mutex);
+ prg->chan[prg_ch].in_use = false;
+ mutex_unlock(&prg->chan[prg_ch].mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_prg_disable);
+
+int ipu_prg_wait_buf_ready(unsigned int ipu_id, unsigned int pre_num,
+ unsigned int hsk_line_num,
+ int pre_store_out_height)
+{
+ struct ipu_prg_data *prg = get_prg(ipu_id);
+ int prg_ch, timeout = 1000;
+ u32 reg;
+
+ if (!prg)
+ return -EINVAL;
+
+ prg_ch = get_prg_chan(prg, pre_num);
+ if (prg_ch < 0)
+ return prg_ch;
+
+ clk_prepare_enable(prg->apb_clk);
+
+ spin_lock(&prg->lock);
+ if (pre_store_out_height <= (4 << hsk_line_num)) {
+ do {
+ reg = prg_read(prg, IPU_PR_STATUS);
+ udelay(1000);
+ timeout--;
+ } while (!(reg & IPU_PR_STATUS_BUF_RDY(prg_ch, 0)) && timeout);
+ } else {
+ do {
+ reg = prg_read(prg, IPU_PR_STATUS);
+ udelay(1000);
+ timeout--;
+ } while ((!(reg & IPU_PR_STATUS_BUF_RDY(prg_ch, 0)) ||
+ !(reg & IPU_PR_STATUS_BUF_RDY(prg_ch, 1))) && timeout);
+ }
+ spin_unlock(&prg->lock);
+
+ clk_disable_unprepare(prg->apb_clk);
+
+ if (!timeout)
+ dev_err(prg->dev, "wait for buffer ready timeout\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_prg_wait_buf_ready);
+
+static int ipu_prg_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node, *memory;
+ struct ipu_prg_data *prg;
+ struct resource *res;
+ struct reg_field reg_field0 = REG_FIELD(IOMUXC_GPR5,
+ IMX6Q_GPR5_PRE_PRG_SEL0_LSB,
+ IMX6Q_GPR5_PRE_PRG_SEL0_MSB);
+ struct reg_field reg_field1 = REG_FIELD(IOMUXC_GPR5,
+ IMX6Q_GPR5_PRE_PRG_SEL1_LSB,
+ IMX6Q_GPR5_PRE_PRG_SEL1_MSB);
+ int id, i;
+
+ prg = devm_kzalloc(&pdev->dev, sizeof(*prg), GFP_KERNEL);
+ if (!prg)
+ return -ENOMEM;
+ prg->dev = &pdev->dev;
+
+ for (i = 0; i < PRG_CHAN_NUM; i++)
+ mutex_init(&prg->chan[i].mutex);
+
+ spin_lock_init(&prg->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ prg->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(prg->base))
+ return PTR_ERR(prg->base);
+
+ prg->axi_clk = devm_clk_get(&pdev->dev, "axi");
+ if (IS_ERR(prg->axi_clk)) {
+ dev_err(&pdev->dev, "failed to get the axi clk\n");
+ return PTR_ERR(prg->axi_clk);
+ }
+
+ prg->apb_clk = devm_clk_get(&pdev->dev, "apb");
+ if (IS_ERR(prg->apb_clk)) {
+ dev_err(&pdev->dev, "failed to get the apb clk\n");
+ return PTR_ERR(prg->apb_clk);
+ }
+
+ prg->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(prg->regmap)) {
+ dev_err(&pdev->dev, "failed to get regmap\n");
+ return PTR_ERR(prg->regmap);
+ }
+
+ prg->pre_prg_sel[0] = devm_regmap_field_alloc(&pdev->dev, prg->regmap,
+ reg_field0);
+ if (IS_ERR(prg->pre_prg_sel[0]))
+ return PTR_ERR(prg->pre_prg_sel[0]);
+
+ prg->pre_prg_sel[1] = devm_regmap_field_alloc(&pdev->dev, prg->regmap,
+ reg_field1);
+ if (IS_ERR(prg->pre_prg_sel[1]))
+ return PTR_ERR(prg->pre_prg_sel[1]);
+
+ memory = of_parse_phandle(np, "memory-region", 0);
+ if (!memory)
+ return -ENODEV;
+
+ prg->memory = of_translate_address(memory,
+ of_get_address(memory, 0, NULL, NULL));
+
+ id = of_alias_get_id(np, "prg");
+ if (id < 0) {
+ dev_err(&pdev->dev, "failed to get PRG id\n");
+ return id;
+ }
+ prg->id = id;
+
+ mutex_lock(&prg_lock);
+ list_add_tail(&prg->list, &prg_list);
+ mutex_unlock(&prg_lock);
+
+ platform_set_drvdata(pdev, prg);
+
+ dev_info(&pdev->dev, "driver probed\n");
+
+ return 0;
+}
+
+static int ipu_prg_remove(struct platform_device *pdev)
+{
+ struct ipu_prg_data *prg = platform_get_drvdata(pdev);
+
+ mutex_lock(&prg_lock);
+ list_del(&prg->list);
+ mutex_unlock(&prg_lock);
+
+ return 0;
+}
+
+static const struct of_device_id imx_ipu_prg_dt_ids[] = {
+ { .compatible = "fsl,imx6q-prg", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_ipu_prg_dt_ids);
+
+static struct platform_driver ipu_prg_driver = {
+ .driver = {
+ .name = "imx-prg",
+ .of_match_table = of_match_ptr(imx_ipu_prg_dt_ids),
+ },
+ .probe = ipu_prg_probe,
+ .remove = ipu_prg_remove,
+};
+
+static int __init ipu_prg_init(void)
+{
+ return platform_driver_register(&ipu_prg_driver);
+}
+subsys_initcall(ipu_prg_init);
+
+static void __exit ipu_prg_exit(void)
+{
+ platform_driver_unregister(&ipu_prg_driver);
+}
+module_exit(ipu_prg_exit);
+
+MODULE_DESCRIPTION("i.MX PRG driver");
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mxc/ipu3/vdoa.c b/drivers/mxc/ipu3/vdoa.c
new file mode 100644
index 000000000000..e727182a94c0
--- /dev/null
+++ b/drivers/mxc/ipu3/vdoa.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright (C) 2012-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ipu.h>
+#include <linux/genalloc.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "vdoa.h"
+/* 6band(3field* double buffer) * (width*2) * bandline(8)
+ = 6x1024x2x8 = 96k or 72k(1.5byte) */
+#define MAX_VDOA_IRAM_SIZE (1024*96)
+#define VDOA_IRAM_SIZE (1024*72)
+
+#define VDOAC_BAND_HEIGHT_32LINES (32)
+#define VDOAC_BAND_HEIGHT_16LINES (16)
+#define VDOAC_BAND_HEIGHT_8LINES (8)
+#define VDOAC_THREE_FRAMES (0x1 << 2)
+#define VDOAC_SYNC_BAND_MODE (0x1 << 3)
+#define VDOAC_SCAN_ORDER_INTERLACED (0x1 << 4)
+#define VDOAC_PFS_YUYV (0x1 << 5)
+#define VDOAC_IPU_SEL_1 (0x1 << 6)
+#define VDOAFP_FH_MASK (0x1FFF)
+#define VDOAFP_FH_SHIFT (16)
+#define VDOAFP_FW_MASK (0x3FFF)
+#define VDOAFP_FW_SHIFT (0)
+#define VDOASL_VSLY_MASK (0x3FFF)
+#define VDOASL_VSLY_SHIFT (16)
+#define VDOASL_ISLY_MASK (0x7FFF)
+#define VDOASL_ISLY_SHIFT (0)
+#define VDOASRR_START_XFER (0x2)
+#define VDOASRR_SWRST (0x1)
+#define VDOAIEIST_TRANSFER_ERR (0x2)
+#define VDOAIEIST_TRANSFER_END (0x1)
+
+#define VDOAC (0x0) /* Control Register */
+#define VDOASRR (0x4) /* Start and Reset Register */
+#define VDOAIE (0x8) /* Interrupt Enable Register */
+#define VDOAIST (0xc) /* Interrupt Status Register */
+#define VDOAFP (0x10) /* Frame Parameters Register */
+#define VDOAIEBA00 (0x14) /* External Buffer n Frame m Address Register */
+#define VDOAIEBA01 (0x18) /* External Buffer n Frame m Address Register */
+#define VDOAIEBA02 (0x1c) /* External Buffer n Frame m Address Register */
+#define VDOAIEBA10 (0x20) /* External Buffer n Frame m Address Register */
+#define VDOAIEBA11 (0x24) /* External Buffer n Frame m Address Register */
+#define VDOAIEBA12 (0x28) /* External Buffer n Frame m Address Register */
+#define VDOASL (0x2c) /* IPU Stride Line Register */
+#define VDOAIUBO (0x30) /* IPU Chroma Buffer Offset Register */
+#define VDOAVEBA0 (0x34) /* External Buffer m Address Register */
+#define VDOAVEBA1 (0x38) /* External Buffer m Address Register */
+#define VDOAVEBA2 (0x3c) /* External Buffer m Address Register */
+#define VDOAVUBO (0x40) /* VPU Chroma Buffer Offset */
+#define VDOASR (0x44) /* Status Register */
+#define VDOATD (0x48) /* Test Debug Register */
+
+
+enum {
+ VDOA_INIT = 0x1,
+ VDOA_GET = 0x2,
+ VDOA_SETUP = 0x4,
+ VDOA_GET_OBUF = 0x8,
+ VDOA_START = 0x10,
+ VDOA_INIRQ = 0x20,
+ VDOA_STOP = 0x40,
+ VDOA_PUT = VDOA_INIT,
+};
+
+enum {
+ VDOA_NULL = 0,
+ VDOA_FRAME = 1,
+ VDOA_PREV_FIELD = 2,
+ VDOA_CURR_FIELD = 3,
+ VDOA_NEXT_FIELD = 4,
+};
+
+#define CHECK_STATE(expect, retcode) \
+do { \
+ if (!((expect) & vdoa->state)) { \
+ dev_err(vdoa->dev, "ERR: %s state:0x%x, expect:0x%x.\n",\
+ __func__, vdoa->state, (expect)); \
+ retcode; \
+ } \
+} while (0)
+
+#define CHECK_NULL_PTR(ptr) \
+do { \
+ pr_debug("vdoa_ptr:0x%p in %s state:0x%x.\n", \
+ vdoa, __func__, vdoa->state); \
+ if (NULL == (ptr)) { \
+ pr_err("ERR vdoa: %s state:0x%x null ptr.\n", \
+ __func__, vdoa->state); \
+ } \
+} while (0)
+
+struct vdoa_info {
+ int state;
+ struct device *dev;
+ struct clk *vdoa_clk;
+ void __iomem *reg_base;
+ struct gen_pool *iram_pool;
+ unsigned long iram_base;
+ unsigned long iram_paddr;
+ int irq;
+ int field;
+ struct completion comp;
+};
+
+static struct vdoa_info *g_vdoa;
+static unsigned long iram_size;
+static DEFINE_MUTEX(vdoa_lock);
+
+static inline void vdoa_read_register(struct vdoa_info *vdoa,
+ u32 reg, u32 *val)
+{
+ *val = ioread32(vdoa->reg_base + reg);
+ dev_dbg(vdoa->dev, "read_reg:0x%02x, val:0x%08x.\n", reg, *val);
+}
+
+static inline void vdoa_write_register(struct vdoa_info *vdoa,
+ u32 reg, u32 val)
+{
+ iowrite32(val, vdoa->reg_base + reg);
+ dev_dbg(vdoa->dev, "\t\twrite_reg:0x%02x, val:0x%08x.\n", reg, val);
+}
+
+static void dump_registers(struct vdoa_info *vdoa)
+{
+ int i;
+ u32 data;
+
+ for (i = VDOAC; i < VDOATD; i += 4)
+ vdoa_read_register(vdoa, i, &data);
+}
+
+int vdoa_setup(vdoa_handle_t handle, struct vdoa_params *params)
+{
+ int band_size;
+ int total_band_size = 0;
+ int ipu_stride;
+ u32 data;
+ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
+
+ CHECK_NULL_PTR(vdoa);
+ CHECK_STATE(VDOA_GET | VDOA_GET_OBUF | VDOA_STOP, return -EINVAL);
+ if (VDOA_GET == vdoa->state) {
+ dev_dbg(vdoa->dev, "w:%d, h:%d.\n",
+ params->width, params->height);
+ data = (params->band_lines == VDOAC_BAND_HEIGHT_32LINES) ? 2 :
+ ((params->band_lines == VDOAC_BAND_HEIGHT_16LINES) ?
+ 1 : 0);
+ data |= params->scan_order ? VDOAC_SCAN_ORDER_INTERLACED : 0;
+ data |= params->band_mode ? VDOAC_SYNC_BAND_MODE : 0;
+ data |= params->pfs ? VDOAC_PFS_YUYV : 0;
+ data |= params->ipu_num ? VDOAC_IPU_SEL_1 : 0;
+ vdoa_write_register(vdoa, VDOAC, data);
+
+ data = ((params->width & VDOAFP_FW_MASK) << VDOAFP_FW_SHIFT) |
+ ((params->height & VDOAFP_FH_MASK) << VDOAFP_FH_SHIFT);
+ vdoa_write_register(vdoa, VDOAFP, data);
+
+ ipu_stride = params->pfs ? params->width << 1 : params->width;
+ data = ((params->vpu_stride & VDOASL_VSLY_MASK) <<
+ VDOASL_VSLY_SHIFT) |
+ ((ipu_stride & VDOASL_ISLY_MASK) << VDOASL_ISLY_SHIFT);
+ vdoa_write_register(vdoa, VDOASL, data);
+
+ dev_dbg(vdoa->dev, "band_mode:%d, band_line:%d, base:0x%lx.\n",
+ params->band_mode, params->band_lines, vdoa->iram_paddr);
+ }
+ /*
+ * band size = (luma_per_line + chroma_per_line) * bandLines
+ * = width * (3/2 or 2) * bandLines
+ * double buffer mode used.
+ */
+ if (params->pfs)
+ band_size = (params->width << 1) * params->band_lines;
+ else
+ band_size = ((params->width * 3) >> 1) *
+ params->band_lines;
+ if (params->interlaced) {
+ total_band_size = 6 * band_size; /* 3 frames*double buffer */
+ if (iram_size < total_band_size) {
+ dev_err(vdoa->dev, "iram_size:0x%lx is smaller than "
+ "request:0x%x!\n", iram_size, total_band_size);
+ return -EINVAL;
+ }
+ if (params->vfield_buf.prev_veba) {
+ if (params->band_mode) {
+ vdoa_write_register(vdoa, VDOAIEBA00,
+ vdoa->iram_paddr);
+ vdoa_write_register(vdoa, VDOAIEBA10,
+ vdoa->iram_paddr + band_size);
+ } else
+ vdoa_write_register(vdoa, VDOAIEBA00,
+ params->ieba0);
+ vdoa_write_register(vdoa, VDOAVEBA0,
+ params->vfield_buf.prev_veba);
+ vdoa->field = VDOA_PREV_FIELD;
+ }
+ if (params->vfield_buf.cur_veba) {
+ if (params->band_mode) {
+ vdoa_write_register(vdoa, VDOAIEBA01,
+ vdoa->iram_paddr + band_size * 2);
+ vdoa_write_register(vdoa, VDOAIEBA11,
+ vdoa->iram_paddr + band_size * 3);
+ } else
+ vdoa_write_register(vdoa, VDOAIEBA01,
+ params->ieba1);
+ vdoa_write_register(vdoa, VDOAVEBA1,
+ params->vfield_buf.cur_veba);
+ vdoa->field = VDOA_CURR_FIELD;
+ }
+ if (params->vfield_buf.next_veba) {
+ if (params->band_mode) {
+ vdoa_write_register(vdoa, VDOAIEBA02,
+ vdoa->iram_paddr + band_size * 4);
+ vdoa_write_register(vdoa, VDOAIEBA12,
+ vdoa->iram_paddr + band_size * 5);
+ } else
+ vdoa_write_register(vdoa, VDOAIEBA02,
+ params->ieba2);
+ vdoa_write_register(vdoa, VDOAVEBA2,
+ params->vfield_buf.next_veba);
+ vdoa->field = VDOA_NEXT_FIELD;
+ vdoa_read_register(vdoa, VDOAC, &data);
+ data |= VDOAC_THREE_FRAMES;
+ vdoa_write_register(vdoa, VDOAC, data);
+ }
+
+ if (!params->pfs)
+ vdoa_write_register(vdoa, VDOAIUBO,
+ params->width * params->band_lines);
+ vdoa_write_register(vdoa, VDOAVUBO,
+ params->vfield_buf.vubo);
+ dev_dbg(vdoa->dev, "total band_size:0x%x.\n", band_size*6);
+ } else if (params->band_mode) {
+ /* used for progressive frame resize on PrP channel */
+ BUG(); /* currently not support */
+ /* progressvie frame: band mode */
+ vdoa_write_register(vdoa, VDOAIEBA00, vdoa->iram_paddr);
+ vdoa_write_register(vdoa, VDOAIEBA10,
+ vdoa->iram_paddr + band_size);
+ if (!params->pfs)
+ vdoa_write_register(vdoa, VDOAIUBO,
+ params->width * params->band_lines);
+ dev_dbg(vdoa->dev, "total band_size:0x%x\n", band_size*2);
+ } else {
+ /* progressive frame: mem->mem, non-band mode */
+ vdoa->field = VDOA_FRAME;
+ vdoa_write_register(vdoa, VDOAVEBA0, params->vframe_buf.veba);
+ vdoa_write_register(vdoa, VDOAVUBO, params->vframe_buf.vubo);
+ vdoa_write_register(vdoa, VDOAIEBA00, params->ieba0);
+ if (!params->pfs)
+ /* note: iubo is relative value, based on ieba0 */
+ vdoa_write_register(vdoa, VDOAIUBO,
+ params->width * params->height);
+ }
+ vdoa->state = VDOA_SETUP;
+ return 0;
+}
+
+void vdoa_get_output_buf(vdoa_handle_t handle, struct vdoa_ipu_buf *buf)
+{
+ u32 data;
+ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
+
+ CHECK_NULL_PTR(vdoa);
+ CHECK_STATE(VDOA_SETUP, return);
+ vdoa->state = VDOA_GET_OBUF;
+ memset(buf, 0, sizeof(*buf));
+
+ vdoa_read_register(vdoa, VDOAC, &data);
+ switch (vdoa->field) {
+ case VDOA_FRAME:
+ case VDOA_PREV_FIELD:
+ vdoa_read_register(vdoa, VDOAIEBA00, &buf->ieba0);
+ if (data & VDOAC_SYNC_BAND_MODE)
+ vdoa_read_register(vdoa, VDOAIEBA10, &buf->ieba1);
+ break;
+ case VDOA_CURR_FIELD:
+ vdoa_read_register(vdoa, VDOAIEBA01, &buf->ieba0);
+ vdoa_read_register(vdoa, VDOAIEBA11, &buf->ieba1);
+ break;
+ case VDOA_NEXT_FIELD:
+ vdoa_read_register(vdoa, VDOAIEBA02, &buf->ieba0);
+ vdoa_read_register(vdoa, VDOAIEBA12, &buf->ieba1);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ if (!(data & VDOAC_PFS_YUYV))
+ vdoa_read_register(vdoa, VDOAIUBO, &buf->iubo);
+}
+
+int vdoa_start(vdoa_handle_t handle, int timeout_ms)
+{
+ int ret;
+ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
+
+ CHECK_NULL_PTR(vdoa);
+ CHECK_STATE(VDOA_GET_OBUF, return -EINVAL);
+ vdoa->state = VDOA_START;
+ init_completion(&vdoa->comp);
+ vdoa_write_register(vdoa, VDOAIST,
+ VDOAIEIST_TRANSFER_ERR | VDOAIEIST_TRANSFER_END);
+ vdoa_write_register(vdoa, VDOAIE,
+ VDOAIEIST_TRANSFER_ERR | VDOAIEIST_TRANSFER_END);
+
+ enable_irq(vdoa->irq);
+ vdoa_write_register(vdoa, VDOASRR, VDOASRR_START_XFER);
+ dump_registers(vdoa);
+
+ ret = wait_for_completion_timeout(&vdoa->comp,
+ msecs_to_jiffies(timeout_ms));
+
+ return ret > 0 ? 0 : -ETIMEDOUT;
+}
+
+void vdoa_stop(vdoa_handle_t handle)
+{
+ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
+
+ CHECK_NULL_PTR(vdoa);
+ CHECK_STATE(VDOA_GET | VDOA_START | VDOA_INIRQ, return);
+ vdoa->state = VDOA_STOP;
+
+ disable_irq(vdoa->irq);
+
+ vdoa_write_register(vdoa, VDOASRR, VDOASRR_SWRST);
+}
+
+void vdoa_get_handle(vdoa_handle_t *handle)
+{
+ struct vdoa_info *vdoa = g_vdoa;
+
+ CHECK_NULL_PTR(handle);
+ *handle = (vdoa_handle_t *)NULL;
+ CHECK_STATE(VDOA_INIT, return);
+ mutex_lock(&vdoa_lock);
+ clk_prepare_enable(vdoa->vdoa_clk);
+ vdoa->state = VDOA_GET;
+ vdoa->field = VDOA_NULL;
+ vdoa_write_register(vdoa, VDOASRR, VDOASRR_SWRST);
+
+ *handle = (vdoa_handle_t *)vdoa;
+}
+
+void vdoa_put_handle(vdoa_handle_t *handle)
+{
+ struct vdoa_info *vdoa = (struct vdoa_info *)(*handle);
+
+ CHECK_NULL_PTR(vdoa);
+ CHECK_STATE(VDOA_STOP, return);
+ if (vdoa != g_vdoa)
+ BUG();
+
+ clk_disable_unprepare(vdoa->vdoa_clk);
+ vdoa->state = VDOA_PUT;
+ *handle = (vdoa_handle_t *)NULL;
+ mutex_unlock(&vdoa_lock);
+}
+
+static irqreturn_t vdoa_irq_handler(int irq, void *data)
+{
+ u32 status, mask, val;
+ struct vdoa_info *vdoa = data;
+
+ CHECK_NULL_PTR(vdoa);
+ CHECK_STATE(VDOA_START, return IRQ_HANDLED);
+ vdoa->state = VDOA_INIRQ;
+ vdoa_read_register(vdoa, VDOAIST, &status);
+ vdoa_read_register(vdoa, VDOAIE, &mask);
+ val = status & mask;
+ vdoa_write_register(vdoa, VDOAIST, val);
+ if (VDOAIEIST_TRANSFER_ERR & val)
+ dev_err(vdoa->dev, "vdoa Transfer err irq!\n");
+ if (VDOAIEIST_TRANSFER_END & val)
+ dev_dbg(vdoa->dev, "vdoa Transfer end irq!\n");
+ if (0 == val) {
+ dev_err(vdoa->dev, "vdoa unknown irq!\n");
+ BUG();
+ }
+
+ complete(&vdoa->comp);
+ return IRQ_HANDLED;
+}
+
+/* IRAM Size in Kbytes, example:vdoa_iram_size=64, 64KBytes */
+static int __init vdoa_iram_size_setup(char *options)
+{
+ int ret;
+
+ ret = kstrtoul(options, 0, &iram_size);
+ if (ret)
+ iram_size = 0;
+ else
+ iram_size *= SZ_1K;
+
+ return 1;
+}
+__setup("vdoa_iram_size=", vdoa_iram_size_setup);
+
+static const struct of_device_id imx_vdoa_dt_ids[] = {
+ { .compatible = "fsl,imx6q-vdoa", },
+ { /* sentinel */ }
+};
+
+static int vdoa_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct vdoa_info *vdoa;
+ struct resource *res;
+ struct resource *res_irq;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "can't get device resources\n");
+ return -ENOENT;
+ }
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq) {
+ dev_err(dev, "failed to get irq resource\n");
+ return -ENOENT;
+ }
+
+ vdoa = devm_kzalloc(dev, sizeof(struct vdoa_info), GFP_KERNEL);
+ if (!vdoa)
+ return -ENOMEM;
+ vdoa->dev = dev;
+
+ vdoa->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!vdoa->reg_base)
+ return -EBUSY;
+
+ vdoa->irq = res_irq->start;
+ ret = devm_request_irq(dev, vdoa->irq, vdoa_irq_handler, 0,
+ "vdoa", vdoa);
+ if (ret) {
+ dev_err(dev, "can't claim irq %d\n", vdoa->irq);
+ return ret;
+ }
+ disable_irq(vdoa->irq);
+
+ vdoa->vdoa_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(vdoa->vdoa_clk)) {
+ dev_err(dev, "failed to get vdoa_clk\n");
+ return PTR_ERR(vdoa->vdoa_clk);
+ }
+
+ vdoa->iram_pool = of_get_named_gen_pool(np, "iram", 0);
+ if (!vdoa->iram_pool) {
+ dev_err(&pdev->dev, "iram pool not available\n");
+ return -ENOMEM;
+ }
+
+ if ((iram_size == 0) || (iram_size > MAX_VDOA_IRAM_SIZE))
+ iram_size = VDOA_IRAM_SIZE;
+
+ vdoa->iram_base = gen_pool_alloc(vdoa->iram_pool, iram_size);
+ if (!vdoa->iram_base) {
+ dev_err(&pdev->dev, "unable to alloc iram\n");
+ return -ENOMEM;
+ }
+
+ vdoa->iram_paddr = gen_pool_virt_to_phys(vdoa->iram_pool,
+ vdoa->iram_base);
+
+ dev_dbg(dev, "iram_base:0x%lx,iram_paddr:0x%lx,size:0x%lx\n",
+ vdoa->iram_base, vdoa->iram_paddr, iram_size);
+
+ vdoa->state = VDOA_INIT;
+ dev_set_drvdata(dev, vdoa);
+ g_vdoa = vdoa;
+ dev_info(dev, "i.MX Video Data Order Adapter(VDOA) driver probed\n");
+ return 0;
+}
+
+static int vdoa_remove(struct platform_device *pdev)
+{
+ struct vdoa_info *vdoa = dev_get_drvdata(&pdev->dev);
+
+ gen_pool_free(vdoa->iram_pool, vdoa->iram_base, iram_size);
+ kfree(vdoa);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver vdoa_driver = {
+ .driver = {
+ .name = "mxc_vdoa",
+ .of_match_table = imx_vdoa_dt_ids,
+ },
+ .probe = vdoa_probe,
+ .remove = vdoa_remove,
+};
+
+static int __init vdoa_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&vdoa_driver);
+ if (err) {
+ pr_err("vdoa_driver register failed\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void __exit vdoa_cleanup(void)
+{
+ platform_driver_unregister(&vdoa_driver);
+}
+
+module_init(vdoa_init);
+module_exit(vdoa_cleanup);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("i.MX Video Data Order Adapter(VDOA) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mxc/ipu3/vdoa.h b/drivers/mxc/ipu3/vdoa.h
new file mode 100644
index 000000000000..3e4c7356cb26
--- /dev/null
+++ b/drivers/mxc/ipu3/vdoa.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2012-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __VDOA_H__
+#define __VDOA_H__
+
+#define VDOA_PFS_YUYV (1)
+#define VDOA_PFS_NV12 (0)
+
+
+struct vfield_buf {
+ u32 prev_veba;
+ u32 cur_veba;
+ u32 next_veba;
+ u32 vubo;
+};
+
+struct vframe_buf {
+ u32 veba;
+ u32 vubo;
+};
+
+struct vdoa_params {
+ u32 width;
+ u32 height;
+ int vpu_stride;
+ int interlaced;
+ int scan_order;
+ int ipu_num;
+ int band_lines;
+ int band_mode;
+ int pfs;
+ u32 ieba0;
+ u32 ieba1;
+ u32 ieba2;
+ struct vframe_buf vframe_buf;
+ struct vfield_buf vfield_buf;
+};
+struct vdoa_ipu_buf {
+ u32 ieba0;
+ u32 ieba1;
+ u32 iubo;
+};
+
+struct vdoa_info;
+typedef void *vdoa_handle_t;
+
+int vdoa_setup(vdoa_handle_t handle, struct vdoa_params *params);
+void vdoa_get_output_buf(vdoa_handle_t handle, struct vdoa_ipu_buf *buf);
+int vdoa_start(vdoa_handle_t handle, int timeout_ms);
+void vdoa_stop(vdoa_handle_t handle);
+void vdoa_get_handle(vdoa_handle_t *handle);
+void vdoa_put_handle(vdoa_handle_t *handle);
+#endif
diff --git a/drivers/video/fbdev/mxc/Kconfig b/drivers/video/fbdev/mxc/Kconfig
index cfdb9757ba1d..25996284e10e 100644
--- a/drivers/video/fbdev/mxc/Kconfig
+++ b/drivers/video/fbdev/mxc/Kconfig
@@ -1,29 +1,57 @@
config FB_MXC
- tristate "MXC Framebuffer support"
- depends on FB
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB_MODE_HELPERS
- default y
- help
- This is a framebuffer device for the MXC LCD Controller.
- See <http://www.linux-fbdev.org/> for information on framebuffer
- devices.
-
- If you plan to use the LCD display with your MXC system, say
- Y here.
+ tristate "MXC Framebuffer support"
+ depends on FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select FB_MODE_HELPERS
+ default y
+ help
+ This is a framebuffer device for the MXC LCD Controller.
+ See <http://www.linux-fbdev.org/> for information on framebuffer
+ devices.
+
+ If you plan to use the LCD display with your MXC system, say
+ Y here.
config FB_MXC_SYNC_PANEL
- depends on FB_MXC
- tristate "Synchronous Panel Framebuffer"
+ depends on FB_MXC
+ tristate "Synchronous Panel Framebuffer"
config FB_MXC_MIPI_DSI_SAMSUNG
- tristate "MXC MIPI_DSI_SAMSUNG"
- depends on FB_MXC_SYNC_PANEL
- depends on FB_MXS
+ tristate "MXC MIPI_DSI_SAMSUNG"
+ depends on FB_MXC_SYNC_PANEL
+ depends on FB_MXS
config FB_MXC_TRULY_WVGA_SYNC_PANEL
- tristate "TRULY WVGA Panel"
- depends on FB_MXC_SYNC_PANEL
- depends on FB_MXC_MIPI_DSI_SAMSUNG
+ tristate "TRULY WVGA Panel"
+ depends on FB_MXC_SYNC_PANEL
+ depends on FB_MXC_MIPI_DSI_SAMSUNG
+
+config FB_MXC_LDB
+ tristate "MXC LDB"
+ depends on FB_MXC_SYNC_PANEL
+ depends on MXC_IPU_V3 || FB_MXS
+ select VIDEOMODE_HELPERS
+
+config FB_MXC_HDMI
+ depends on FB_MXC_SYNC_PANEL
+ depends on MXC_IPU_V3
+ depends on I2C
+ tristate "MXC HDMI driver support"
+ select MFD_MXC_HDMI
+ help
+ Driver for the on-chip MXC HDMI controller.
+
+config FB_MXC_EDID
+ depends on FB_MXC && I2C
+ tristate "MXC EDID support"
+ default y
+
+config HANNSTAR_CABC
+ tristate "Hannstar CABC function"
+ help
+ Say yes here to support switching on/off Hannstar CABC
+ function. This function turns backlight density of a
+ display panel automatically according to the content
+ shown on the panel.
diff --git a/drivers/video/fbdev/mxc/Makefile b/drivers/video/fbdev/mxc/Makefile
index 5fcecc7f820e..31fa96e5ddf0 100644
--- a/drivers/video/fbdev/mxc/Makefile
+++ b/drivers/video/fbdev/mxc/Makefile
@@ -1,3 +1,8 @@
-obj-$(CONFIG_FB_MXC_SYNC_PANEL) += mxc_dispdrv.o
obj-$(CONFIG_FB_MXC_MIPI_DSI_SAMSUNG) += mipi_dsi_samsung.o
obj-$(CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL) += mxcfb_hx8369_wvga.o
+obj-$(CONFIG_FB_MXC_LDB) += ldb.o
+obj-$(CONFIG_FB_MXC_HDMI) += mxc_hdmi.o
+obj-$(CONFIG_FB_MXC_EDID) += mxc_edid.o
+obj-$(CONFIG_FB_MXC_SYNC_PANEL) += mxc_dispdrv.o mxc_lcdif.o mxc_ipuv3_fb.o
+obj-$(CONFIG_FB_MXS_SII902X) += mxsfb_sii902x.o
+obj-$(CONFIG_HANNSTAR_CABC) += hannstar_cabc.o
diff --git a/drivers/video/fbdev/mxc/hannstar_cabc.c b/drivers/video/fbdev/mxc/hannstar_cabc.c
new file mode 100644
index 000000000000..14528c8c6511
--- /dev/null
+++ b/drivers/video/fbdev/mxc/hannstar_cabc.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2014-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "hannstar-cabc"
+
+static const struct of_device_id cabc_dt_ids[] = {
+ { .compatible = "hannstar,cabc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cabc_dt_ids);
+
+static int cabc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node, *pp;
+ int cabc_gpio, ret = 0;
+ bool cabc_enable;
+ unsigned long gpio_flag;
+ enum of_gpio_flags flags;
+
+ for_each_child_of_node(np, pp) {
+ if (!of_find_property(pp, "gpios", NULL)) {
+ dev_warn(&pdev->dev, "Found interface without "
+ "gpios\n");
+ continue;
+ }
+
+ cabc_gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (!gpio_is_valid(cabc_gpio)) {
+ ret = cabc_gpio;
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to get gpio flags, "
+ "error: %d\n", ret);
+ return ret;
+ }
+
+ cabc_enable = of_property_read_bool(pp, "cabc-enable");
+
+ if (flags & OF_GPIO_ACTIVE_LOW)
+ gpio_flag = cabc_enable ?
+ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH;
+ else
+ gpio_flag = cabc_enable ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+
+ devm_gpio_request_one(&pdev->dev, cabc_gpio,
+ gpio_flag, "hannstar-cabc");
+ }
+
+ return ret;
+}
+
+static struct platform_driver cabc_driver = {
+ .probe = cabc_probe,
+ .driver = {
+ .of_match_table = cabc_dt_ids,
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(cabc_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Hannstar CABC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/video/fbdev/mxc/ldb.c b/drivers/video/fbdev/mxc/ldb.c
new file mode 100644
index 000000000000..e5716cb4b2e3
--- /dev/null
+++ b/drivers/video/fbdev/mxc/ldb.c
@@ -0,0 +1,913 @@
+/*
+ * Copyright (C) 2012-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+#include "mxc_dispdrv.h"
+
+#define DRIVER_NAME "ldb"
+
+#define LDB_BGREF_RMODE_INT (0x1 << 15)
+
+#define LDB_DI1_VS_POL_ACT_LOW (0x1 << 10)
+#define LDB_DI0_VS_POL_ACT_LOW (0x1 << 9)
+
+#define LDB_BIT_MAP_CH1_JEIDA (0x1 << 8)
+#define LDB_BIT_MAP_CH0_JEIDA (0x1 << 6)
+
+#define LDB_DATA_WIDTH_CH1_24 (0x1 << 7)
+#define LDB_DATA_WIDTH_CH0_24 (0x1 << 5)
+
+#define LDB_CH1_MODE_MASK (0x3 << 2)
+#define LDB_CH1_MODE_EN_TO_DI1 (0x3 << 2)
+#define LDB_CH1_MODE_EN_TO_DI0 (0x1 << 2)
+#define LDB_CH0_MODE_MASK (0x3 << 0)
+#define LDB_CH0_MODE_EN_TO_DI1 (0x3 << 0)
+#define LDB_CH0_MODE_EN_TO_DI0 (0x1 << 0)
+
+#define LDB_SPLIT_MODE_EN (0x1 << 4)
+
+#define INVALID_BUS_REG (~0UL)
+
+struct crtc_mux {
+ enum crtc crtc;
+ u32 val;
+};
+
+struct bus_mux {
+ int reg;
+ int shift;
+ int mask;
+ int crtc_mux_num;
+ const struct crtc_mux *crtcs;
+};
+
+struct ldb_info {
+ bool split_cap;
+ bool dual_cap;
+ bool ext_bgref_cap;
+ bool clk_fixup;
+ int ctrl_reg;
+ int bus_mux_num;
+ const struct bus_mux *buses;
+};
+
+struct ldb_data;
+
+struct ldb_chan {
+ struct ldb_data *ldb;
+ struct fb_info *fbi;
+ struct videomode vm;
+ enum crtc crtc;
+ int chno;
+ bool is_used;
+ bool online;
+};
+
+struct ldb_data {
+ struct regmap *regmap;
+ struct device *dev;
+ struct mxc_dispdrv_handle *mddh;
+ struct ldb_chan chan[2];
+ int bus_mux_num;
+ const struct bus_mux *buses;
+ int primary_chno;
+ int ctrl_reg;
+ u32 ctrl;
+ bool spl_mode;
+ bool dual_mode;
+ bool clk_fixup;
+ struct clk *di_clk[4];
+ struct clk *ldb_di_clk[2];
+ struct clk *div_3_5_clk[2];
+ struct clk *div_7_clk[2];
+ struct clk *div_sel_clk[2];
+};
+
+static const struct crtc_mux imx6q_lvds0_crtc_mux[] = {
+ {
+ .crtc = CRTC_IPU1_DI0,
+ .val = IMX6Q_GPR3_LVDS0_MUX_CTL_IPU1_DI0,
+ }, {
+ .crtc = CRTC_IPU1_DI1,
+ .val = IMX6Q_GPR3_LVDS0_MUX_CTL_IPU1_DI1,
+ }, {
+ .crtc = CRTC_IPU2_DI0,
+ .val = IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI0,
+ }, {
+ .crtc = CRTC_IPU2_DI1,
+ .val = IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI1,
+ }
+};
+
+static const struct crtc_mux imx6q_lvds1_crtc_mux[] = {
+ {
+ .crtc = CRTC_IPU1_DI0,
+ .val = IMX6Q_GPR3_LVDS1_MUX_CTL_IPU1_DI0,
+ }, {
+ .crtc = CRTC_IPU1_DI1,
+ .val = IMX6Q_GPR3_LVDS1_MUX_CTL_IPU1_DI1,
+ }, {
+ .crtc = CRTC_IPU2_DI0,
+ .val = IMX6Q_GPR3_LVDS1_MUX_CTL_IPU2_DI0,
+ }, {
+ .crtc = CRTC_IPU2_DI1,
+ .val = IMX6Q_GPR3_LVDS1_MUX_CTL_IPU2_DI1,
+ }
+};
+
+static const struct bus_mux imx6q_ldb_buses[] = {
+ {
+ .reg = IOMUXC_GPR3,
+ .shift = 6,
+ .mask = IMX6Q_GPR3_LVDS0_MUX_CTL_MASK,
+ .crtc_mux_num = ARRAY_SIZE(imx6q_lvds0_crtc_mux),
+ .crtcs = imx6q_lvds0_crtc_mux,
+ }, {
+ .reg = IOMUXC_GPR3,
+ .shift = 8,
+ .mask = IMX6Q_GPR3_LVDS1_MUX_CTL_MASK,
+ .crtc_mux_num = ARRAY_SIZE(imx6q_lvds1_crtc_mux),
+ .crtcs = imx6q_lvds1_crtc_mux,
+ }
+};
+
+static const struct ldb_info imx6q_ldb_info = {
+ .split_cap = true,
+ .dual_cap = true,
+ .ext_bgref_cap = false,
+ .clk_fixup = false,
+ .ctrl_reg = IOMUXC_GPR2,
+ .bus_mux_num = ARRAY_SIZE(imx6q_ldb_buses),
+ .buses = imx6q_ldb_buses,
+};
+
+static const struct ldb_info imx6qp_ldb_info = {
+ .split_cap = true,
+ .dual_cap = true,
+ .ext_bgref_cap = false,
+ .clk_fixup = true,
+ .ctrl_reg = IOMUXC_GPR2,
+ .bus_mux_num = ARRAY_SIZE(imx6q_ldb_buses),
+ .buses = imx6q_ldb_buses,
+};
+
+static const struct crtc_mux imx6dl_lvds0_crtc_mux[] = {
+ {
+ .crtc = CRTC_IPU1_DI0,
+ .val = IMX6DL_GPR3_LVDS0_MUX_CTL_IPU1_DI0,
+ }, {
+ .crtc = CRTC_IPU1_DI1,
+ .val = IMX6DL_GPR3_LVDS0_MUX_CTL_IPU1_DI1,
+ }, {
+ .crtc = CRTC_LCDIF1,
+ .val = IMX6DL_GPR3_LVDS0_MUX_CTL_LCDIF,
+ }
+};
+
+static const struct crtc_mux imx6dl_lvds1_crtc_mux[] = {
+ {
+ .crtc = CRTC_IPU1_DI0,
+ .val = IMX6DL_GPR3_LVDS1_MUX_CTL_IPU1_DI0,
+ }, {
+ .crtc = CRTC_IPU1_DI1,
+ .val = IMX6DL_GPR3_LVDS1_MUX_CTL_IPU1_DI1,
+ }, {
+ .crtc = CRTC_LCDIF1,
+ .val = IMX6DL_GPR3_LVDS1_MUX_CTL_LCDIF,
+ }
+};
+
+static const struct bus_mux imx6dl_ldb_buses[] = {
+ {
+ .reg = IOMUXC_GPR3,
+ .shift = 6,
+ .mask = IMX6DL_GPR3_LVDS0_MUX_CTL_MASK,
+ .crtc_mux_num = ARRAY_SIZE(imx6dl_lvds0_crtc_mux),
+ .crtcs = imx6dl_lvds0_crtc_mux,
+ }, {
+ .reg = IOMUXC_GPR3,
+ .shift = 8,
+ .mask = IMX6DL_GPR3_LVDS1_MUX_CTL_MASK,
+ .crtc_mux_num = ARRAY_SIZE(imx6dl_lvds1_crtc_mux),
+ .crtcs = imx6dl_lvds1_crtc_mux,
+ }
+};
+
+static const struct ldb_info imx6dl_ldb_info = {
+ .split_cap = true,
+ .dual_cap = true,
+ .ext_bgref_cap = false,
+ .clk_fixup = false,
+ .ctrl_reg = IOMUXC_GPR2,
+ .bus_mux_num = ARRAY_SIZE(imx6dl_ldb_buses),
+ .buses = imx6dl_ldb_buses,
+};
+
+static const struct crtc_mux imx6sx_lvds_crtc_mux[] = {
+ {
+ .crtc = CRTC_LCDIF1,
+ .val = IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF1,
+ }, {
+ .crtc = CRTC_LCDIF2,
+ .val = IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF2,
+ }
+};
+
+static const struct bus_mux imx6sx_ldb_buses[] = {
+ {
+ .reg = IOMUXC_GPR5,
+ .shift = 3,
+ .mask = IMX6SX_GPR5_DISP_MUX_LDB_CTRL_MASK,
+ .crtc_mux_num = ARRAY_SIZE(imx6sx_lvds_crtc_mux),
+ .crtcs = imx6sx_lvds_crtc_mux,
+ }
+};
+
+static const struct ldb_info imx6sx_ldb_info = {
+ .split_cap = false,
+ .dual_cap = false,
+ .ext_bgref_cap = false,
+ .clk_fixup = false,
+ .ctrl_reg = IOMUXC_GPR6,
+ .bus_mux_num = ARRAY_SIZE(imx6sx_ldb_buses),
+ .buses = imx6sx_ldb_buses,
+};
+
+static const struct crtc_mux imx53_lvds0_crtc_mux[] = {
+ { .crtc = CRTC_IPU1_DI0, },
+};
+
+static const struct crtc_mux imx53_lvds1_crtc_mux[] = {
+ { .crtc = CRTC_IPU1_DI1, }
+};
+
+static const struct bus_mux imx53_ldb_buses[] = {
+ {
+ .reg = INVALID_BUS_REG,
+ .crtc_mux_num = ARRAY_SIZE(imx53_lvds0_crtc_mux),
+ .crtcs = imx53_lvds0_crtc_mux,
+ }, {
+ .reg = INVALID_BUS_REG,
+ .crtc_mux_num = ARRAY_SIZE(imx53_lvds1_crtc_mux),
+ .crtcs = imx53_lvds1_crtc_mux,
+ }
+};
+
+static const struct ldb_info imx53_ldb_info = {
+ .split_cap = true,
+ .dual_cap = false,
+ .ext_bgref_cap = true,
+ .clk_fixup = false,
+ .ctrl_reg = IOMUXC_GPR2,
+ .bus_mux_num = ARRAY_SIZE(imx53_ldb_buses),
+ .buses = imx53_ldb_buses,
+};
+
+static const struct of_device_id ldb_dt_ids[] = {
+ { .compatible = "fsl,imx6qp-ldb", .data = &imx6qp_ldb_info, },
+ { .compatible = "fsl,imx6q-ldb", .data = &imx6q_ldb_info, },
+ { .compatible = "fsl,imx6dl-ldb", .data = &imx6dl_ldb_info, },
+ { .compatible = "fsl,imx6sx-ldb", .data = &imx6sx_ldb_info, },
+ { .compatible = "fsl,imx53-ldb", .data = &imx53_ldb_info, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ldb_dt_ids);
+
+static int ldb_init(struct mxc_dispdrv_handle *mddh,
+ struct mxc_dispdrv_setting *setting)
+{
+ struct ldb_data *ldb = mxc_dispdrv_getdata(mddh);
+ struct device *dev = ldb->dev;
+ struct fb_info *fbi = setting->fbi;
+ struct ldb_chan *chan;
+ struct fb_videomode fb_vm;
+ int chno;
+
+ chno = ldb->chan[ldb->primary_chno].is_used ?
+ !ldb->primary_chno : ldb->primary_chno;
+
+ chan = &ldb->chan[chno];
+
+ if (chan->is_used) {
+ dev_err(dev, "LVDS channel%d is already used\n", chno);
+ return -EBUSY;
+ }
+ if (!chan->online) {
+ dev_err(dev, "LVDS channel%d is not online\n", chno);
+ return -ENODEV;
+ }
+
+ chan->is_used = true;
+
+ chan->fbi = fbi;
+
+ fb_videomode_from_videomode(&chan->vm, &fb_vm);
+
+ INIT_LIST_HEAD(&fbi->modelist);
+ fb_add_videomode(&fb_vm, &fbi->modelist);
+ fb_videomode_to_var(&fbi->var, &fb_vm);
+
+ setting->crtc = chan->crtc;
+
+ return 0;
+}
+
+static int get_di_clk_id(struct ldb_chan chan, int *id)
+{
+ struct ldb_data *ldb = chan.ldb;
+ int i = 0, chno = chan.chno, mask, shift;
+ enum crtc crtc;
+ u32 val;
+
+ /* no pre-muxing, such as mx53 */
+ if (ldb->buses[chno].reg == INVALID_BUS_REG) {
+ *id = chno;
+ return 0;
+ }
+
+ for (; i < ldb->buses[chno].crtc_mux_num; i++) {
+ crtc = ldb->buses[chno].crtcs[i].crtc;
+ val = ldb->buses[chno].crtcs[i].val;
+ mask = ldb->buses[chno].mask;
+ shift = ldb->buses[chno].shift;
+ if (chan.crtc == crtc) {
+ *id = (val & mask) >> shift;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int get_mux_val(struct bus_mux bus_mux, enum crtc crtc,
+ u32 *mux_val)
+{
+ int i = 0;
+
+ for (; i < bus_mux.crtc_mux_num; i++)
+ if (bus_mux.crtcs[i].crtc == crtc) {
+ *mux_val = bus_mux.crtcs[i].val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int find_ldb_chno(struct ldb_data *ldb,
+ struct fb_info *fbi, int *chno)
+{
+ struct device *dev = ldb->dev;
+ int i = 0;
+
+ for (; i < 2; i++)
+ if (ldb->chan[i].fbi == fbi) {
+ *chno = ldb->chan[i].chno;
+ return 0;
+ }
+ dev_err(dev, "failed to find channel number\n");
+ return -EINVAL;
+}
+
+static void ldb_disable(struct mxc_dispdrv_handle *mddh,
+ struct fb_info *fbi);
+
+static int ldb_setup(struct mxc_dispdrv_handle *mddh,
+ struct fb_info *fbi)
+{
+ struct ldb_data *ldb = mxc_dispdrv_getdata(mddh);
+ struct ldb_chan chan;
+ struct device *dev = ldb->dev;
+ struct clk *ldb_di_parent, *ldb_di_sel, *ldb_di_sel_parent;
+ struct clk *other_ldb_di_sel = NULL;
+ struct bus_mux bus_mux;
+ int ret = 0, id = 0, chno, other_chno;
+ unsigned long serial_clk;
+ u32 mux_val;
+
+ ret = find_ldb_chno(ldb, fbi, &chno);
+ if (ret < 0)
+ return ret;
+
+ other_chno = chno ? 0 : 1;
+
+ chan = ldb->chan[chno];
+
+ bus_mux = ldb->buses[chno];
+
+ ret = get_di_clk_id(chan, &id);
+ if (ret < 0) {
+ dev_err(dev, "failed to get ch%d di clk id\n",
+ chan.chno);
+ return ret;
+ }
+
+ ret = get_mux_val(bus_mux, chan.crtc, &mux_val);
+ if (ret < 0) {
+ dev_err(dev, "failed to get ch%d mux val\n",
+ chan.chno);
+ return ret;
+ }
+
+
+ if (ldb->clk_fixup) {
+ /*
+ * ldb_di_sel_parent(plls) -> ldb_di_sel -> ldb_di[chno] ->
+ *
+ * -> div_3_5[chno] ->
+ * -> | |-> div_sel[chno] -> di[id]
+ * -> div_7[chno] ->
+ */
+ clk_set_parent(ldb->di_clk[id], ldb->div_sel_clk[chno]);
+ } else {
+ /*
+ * ldb_di_sel_parent(plls) -> ldb_di_sel ->
+ *
+ * -> div_3_5[chno] ->
+ * -> | |-> div_sel[chno] ->
+ * -> div_7[chno] ->
+ *
+ * -> ldb_di[chno] -> di[id]
+ */
+ clk_set_parent(ldb->di_clk[id], ldb->ldb_di_clk[chno]);
+ }
+ ldb_di_parent = ldb->spl_mode ? ldb->div_3_5_clk[chno] :
+ ldb->div_7_clk[chno];
+ clk_set_parent(ldb->div_sel_clk[chno], ldb_di_parent);
+ ldb_di_sel = clk_get_parent(ldb_di_parent);
+ ldb_di_sel_parent = clk_get_parent(ldb_di_sel);
+ serial_clk = ldb->spl_mode ? chan.vm.pixelclock * 7 / 2 :
+ chan.vm.pixelclock * 7;
+ clk_set_rate(ldb_di_sel_parent, serial_clk);
+
+ /*
+ * split mode or dual mode:
+ * clock tree for the other channel
+ */
+ if (ldb->spl_mode) {
+ clk_set_parent(ldb->div_sel_clk[other_chno],
+ ldb->div_3_5_clk[other_chno]);
+ other_ldb_di_sel =
+ clk_get_parent(ldb->div_3_5_clk[other_chno]);;
+ }
+
+ if (ldb->dual_mode) {
+ clk_set_parent(ldb->div_sel_clk[other_chno],
+ ldb->div_7_clk[other_chno]);
+ other_ldb_di_sel =
+ clk_get_parent(ldb->div_7_clk[other_chno]);;
+ }
+
+ if (ldb->spl_mode || ldb->dual_mode)
+ clk_set_parent(other_ldb_di_sel, ldb_di_sel_parent);
+
+ if (!(chan.fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)) {
+ if (ldb->spl_mode && bus_mux.reg == INVALID_BUS_REG)
+ /* no pre-muxing, such as mx53 */
+ ldb->ctrl |= (id == 0 ? LDB_DI0_VS_POL_ACT_LOW :
+ LDB_DI1_VS_POL_ACT_LOW);
+ else
+ ldb->ctrl |= (chno == 0 ? LDB_DI0_VS_POL_ACT_LOW :
+ LDB_DI1_VS_POL_ACT_LOW);
+ }
+
+ if (bus_mux.reg != INVALID_BUS_REG)
+ regmap_update_bits(ldb->regmap, bus_mux.reg,
+ bus_mux.mask, mux_val);
+
+ regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ctrl);
+
+ /* disable channel for correct sequence */
+ ldb_disable(mddh, fbi);
+
+ return ret;
+}
+
+static int ldb_enable(struct mxc_dispdrv_handle *mddh,
+ struct fb_info *fbi)
+{
+ struct ldb_data *ldb = mxc_dispdrv_getdata(mddh);
+ struct ldb_chan chan;
+ struct device *dev = ldb->dev;
+ struct bus_mux bus_mux;
+ int ret = 0, id = 0, chno, other_chno;
+
+ ret = find_ldb_chno(ldb, fbi, &chno);
+ if (ret < 0)
+ return ret;
+
+ chan = ldb->chan[chno];
+
+ bus_mux = ldb->buses[chno];
+
+ if (ldb->spl_mode || ldb->dual_mode) {
+ other_chno = chno ? 0 : 1;
+ clk_prepare_enable(ldb->ldb_di_clk[other_chno]);
+ }
+
+ if ((ldb->spl_mode || ldb->dual_mode) &&
+ bus_mux.reg == INVALID_BUS_REG) {
+ /* no pre-muxing, such as mx53 */
+ ret = get_di_clk_id(chan, &id);
+ if (ret < 0) {
+ dev_err(dev, "failed to get ch%d di clk id\n",
+ chan.chno);
+ return ret;
+ }
+
+ ldb->ctrl |= id ?
+ (LDB_CH0_MODE_EN_TO_DI1 | LDB_CH1_MODE_EN_TO_DI1) :
+ (LDB_CH0_MODE_EN_TO_DI0 | LDB_CH1_MODE_EN_TO_DI0);
+ } else {
+ if (ldb->spl_mode || ldb->dual_mode)
+ ldb->ctrl |= LDB_CH0_MODE_EN_TO_DI0 |
+ LDB_CH1_MODE_EN_TO_DI0;
+ else
+ ldb->ctrl |= chno ? LDB_CH1_MODE_EN_TO_DI1 :
+ LDB_CH0_MODE_EN_TO_DI0;
+ }
+
+ regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ctrl);
+ return 0;
+}
+
+static void ldb_disable(struct mxc_dispdrv_handle *mddh,
+ struct fb_info *fbi)
+{
+ struct ldb_data *ldb = mxc_dispdrv_getdata(mddh);
+ int ret, chno, other_chno;
+
+ ret = find_ldb_chno(ldb, fbi, &chno);
+ if (ret < 0)
+ return;
+
+ if (ldb->spl_mode || ldb->dual_mode) {
+ ldb->ctrl &= ~(LDB_CH1_MODE_MASK | LDB_CH0_MODE_MASK);
+ other_chno = chno ? 0 : 1;
+ clk_disable_unprepare(ldb->ldb_di_clk[other_chno]);
+ } else {
+ ldb->ctrl &= ~(chno ? LDB_CH1_MODE_MASK :
+ LDB_CH0_MODE_MASK);
+ }
+
+ regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ctrl);
+ return;
+}
+
+static struct mxc_dispdrv_driver ldb_drv = {
+ .name = DRIVER_NAME,
+ .init = ldb_init,
+ .setup = ldb_setup,
+ .enable = ldb_enable,
+ .disable = ldb_disable
+};
+
+enum {
+ LVDS_BIT_MAP_SPWG,
+ LVDS_BIT_MAP_JEIDA,
+};
+
+static const char *ldb_bit_mappings[] = {
+ [LVDS_BIT_MAP_SPWG] = "spwg",
+ [LVDS_BIT_MAP_JEIDA] = "jeida",
+};
+
+static int of_get_data_mapping(struct device_node *np)
+{
+ const char *bm;
+ int ret, i;
+
+ ret = of_property_read_string(np, "fsl,data-mapping", &bm);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ldb_bit_mappings); i++)
+ if (!strcasecmp(bm, ldb_bit_mappings[i]))
+ return i;
+
+ return -EINVAL;
+}
+
+static const char *ldb_crtc_mappings[] = {
+ [CRTC_IPU_DI0] = "ipu-di0",
+ [CRTC_IPU_DI1] = "ipu-di1",
+ [CRTC_IPU1_DI0] = "ipu1-di0",
+ [CRTC_IPU1_DI1] = "ipu1-di1",
+ [CRTC_IPU2_DI0] = "ipu2-di0",
+ [CRTC_IPU2_DI1] = "ipu2-di1",
+ [CRTC_LCDIF] = "lcdif",
+ [CRTC_LCDIF1] = "lcdif1",
+ [CRTC_LCDIF2] = "lcdif2",
+};
+
+static enum crtc of_get_crtc_mapping(struct device_node *np)
+{
+ const char *cm;
+ enum crtc i;
+ int ret;
+
+ ret = of_property_read_string(np, "crtc", &cm);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ldb_crtc_mappings); i++)
+ if (!strcasecmp(cm, ldb_crtc_mappings[i])) {
+ switch (i) {
+ case CRTC_IPU_DI0:
+ i = CRTC_IPU1_DI0;
+ break;
+ case CRTC_IPU_DI1:
+ i = CRTC_IPU1_DI1;
+ break;
+ case CRTC_LCDIF:
+ i = CRTC_LCDIF1;
+ break;
+ default:
+ break;
+ }
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int mux_count(struct ldb_data *ldb)
+{
+ int i, j, count = 0;
+ bool should_count[CRTC_MAX];
+ enum crtc crtc;
+
+ for (i = 0; i < CRTC_MAX; i++)
+ should_count[i] = true;
+
+ for (i = 0; i < ldb->bus_mux_num; i++) {
+ for (j = 0; j < ldb->buses[i].crtc_mux_num; j++) {
+ crtc = ldb->buses[i].crtcs[j].crtc;
+ if (should_count[crtc]) {
+ count++;
+ should_count[crtc] = false;
+ }
+ }
+ }
+
+ return count;
+}
+
+static bool is_valid_crtc(struct ldb_data *ldb, enum crtc crtc,
+ int chno)
+{
+ int i = 0;
+
+ if (chno > ldb->bus_mux_num - 1)
+ return false;
+
+ for (; i < ldb->buses[chno].crtc_mux_num; i++)
+ if (ldb->buses[chno].crtcs[i].crtc == crtc)
+ return true;
+
+ return false;
+}
+
+static int ldb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id =
+ of_match_device(ldb_dt_ids, dev);
+ const struct ldb_info *ldb_info =
+ (const struct ldb_info *)of_id->data;
+ struct device_node *np = dev->of_node, *child;
+ struct ldb_data *ldb;
+ bool ext_ref;
+ int i, data_width, mapping, child_count = 0;
+ char clkname[16];
+
+ ldb = devm_kzalloc(dev, sizeof(*ldb), GFP_KERNEL);
+ if (!ldb)
+ return -ENOMEM;
+
+ ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(ldb->regmap)) {
+ dev_err(dev, "failed to get parent regmap\n");
+ return PTR_ERR(ldb->regmap);
+ }
+
+ ldb->dev = dev;
+ ldb->bus_mux_num = ldb_info->bus_mux_num;
+ ldb->buses = ldb_info->buses;
+ ldb->ctrl_reg = ldb_info->ctrl_reg;
+ ldb->clk_fixup = ldb_info->clk_fixup;
+ ldb->primary_chno = -1;
+
+ ext_ref = of_property_read_bool(np, "ext-ref");
+ if (!ext_ref && ldb_info->ext_bgref_cap)
+ ldb->ctrl |= LDB_BGREF_RMODE_INT;
+
+ ldb->spl_mode = of_property_read_bool(np, "split-mode");
+ if (ldb->spl_mode) {
+ if (ldb_info->split_cap) {
+ ldb->ctrl |= LDB_SPLIT_MODE_EN;
+ dev_info(dev, "split mode\n");
+ } else {
+ dev_err(dev, "cannot support split mode\n");
+ return -EINVAL;
+ }
+ }
+
+ ldb->dual_mode = of_property_read_bool(np, "dual-mode");
+ if (ldb->dual_mode) {
+ if (ldb_info->dual_cap) {
+ dev_info(dev, "dual mode\n");
+ } else {
+ dev_err(dev, "cannot support dual mode\n");
+ return -EINVAL;
+ }
+ }
+
+ if (ldb->dual_mode && ldb->spl_mode) {
+ dev_err(dev, "cannot support dual mode and split mode "
+ "simultaneously\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mux_count(ldb); i++) {
+ sprintf(clkname, "di%d_sel", i);
+ ldb->di_clk[i] = devm_clk_get(dev, clkname);
+ if (IS_ERR(ldb->di_clk[i])) {
+ dev_err(dev, "failed to get clk %s\n", clkname);
+ return PTR_ERR(ldb->di_clk[i]);
+ }
+ }
+
+ for_each_child_of_node(np, child) {
+ struct ldb_chan *chan;
+ enum crtc crtc;
+ bool is_primary;
+ int ret;
+
+ ret = of_property_read_u32(child, "reg", &i);
+ if (ret || i < 0 || i > 1 || i >= ldb->bus_mux_num) {
+ dev_err(dev, "wrong LVDS channel number\n");
+ return -EINVAL;
+ }
+
+ if ((ldb->spl_mode || ldb->dual_mode) && i > 0) {
+ dev_warn(dev, "split mode or dual mode, ignoring "
+ "second output\n");
+ continue;
+ }
+
+ if (!of_device_is_available(child))
+ continue;
+
+ if (++child_count > ldb->bus_mux_num) {
+ dev_err(dev, "too many LVDS channels\n");
+ return -EINVAL;
+ }
+
+ chan = &ldb->chan[i];
+ chan->chno = i;
+ chan->ldb = ldb;
+ chan->online = true;
+
+ is_primary = of_property_read_bool(child, "primary");
+
+ if (ldb->bus_mux_num == 1 || (ldb->primary_chno == -1 &&
+ (is_primary || ldb->spl_mode || ldb->dual_mode)))
+ ldb->primary_chno = chan->chno;
+
+ ret = of_property_read_u32(child, "fsl,data-width",
+ &data_width);
+ if (ret || (data_width != 18 && data_width != 24)) {
+ dev_err(dev, "data width not specified or invalid\n");
+ return -EINVAL;
+ }
+
+ mapping = of_get_data_mapping(child);
+ switch (mapping) {
+ case LVDS_BIT_MAP_SPWG:
+ if (data_width == 24) {
+ if (i == 0 || ldb->spl_mode || ldb->dual_mode)
+ ldb->ctrl |= LDB_DATA_WIDTH_CH0_24;
+ if (i == 1 || ldb->spl_mode || ldb->dual_mode)
+ ldb->ctrl |= LDB_DATA_WIDTH_CH1_24;
+ }
+ break;
+ case LVDS_BIT_MAP_JEIDA:
+ if (data_width == 18) {
+ dev_err(dev, "JEIDA only support 24bit\n");
+ return -EINVAL;
+ }
+ if (i == 0 || ldb->spl_mode || ldb->dual_mode)
+ ldb->ctrl |= LDB_DATA_WIDTH_CH0_24 |
+ LDB_BIT_MAP_CH0_JEIDA;
+ if (i == 1 || ldb->spl_mode || ldb->dual_mode)
+ ldb->ctrl |= LDB_DATA_WIDTH_CH1_24 |
+ LDB_BIT_MAP_CH1_JEIDA;
+ break;
+ default:
+ dev_err(dev, "data mapping not specified or invalid\n");
+ return -EINVAL;
+ }
+
+ crtc = of_get_crtc_mapping(child);
+ if (is_valid_crtc(ldb, crtc, chan->chno)) {
+ ldb->chan[i].crtc = crtc;
+ } else {
+ dev_err(dev, "crtc not specified or invalid\n");
+ return -EINVAL;
+ }
+
+ ret = of_get_videomode(child, &chan->vm, 0);
+ if (ret)
+ return -EINVAL;
+
+ sprintf(clkname, "ldb_di%d", i);
+ ldb->ldb_di_clk[i] = devm_clk_get(dev, clkname);
+ if (IS_ERR(ldb->ldb_di_clk[i])) {
+ dev_err(dev, "failed to get clk %s\n", clkname);
+ return PTR_ERR(ldb->ldb_di_clk[i]);
+ }
+
+ sprintf(clkname, "ldb_di%d_div_3_5", i);
+ ldb->div_3_5_clk[i] = devm_clk_get(dev, clkname);
+ if (IS_ERR(ldb->div_3_5_clk[i])) {
+ dev_err(dev, "failed to get clk %s\n", clkname);
+ return PTR_ERR(ldb->div_3_5_clk[i]);
+ }
+
+ sprintf(clkname, "ldb_di%d_div_7", i);
+ ldb->div_7_clk[i] = devm_clk_get(dev, clkname);
+ if (IS_ERR(ldb->div_7_clk[i])) {
+ dev_err(dev, "failed to get clk %s\n", clkname);
+ return PTR_ERR(ldb->div_7_clk[i]);
+ }
+
+ sprintf(clkname, "ldb_di%d_div_sel", i);
+ ldb->div_sel_clk[i] = devm_clk_get(dev, clkname);
+ if (IS_ERR(ldb->div_sel_clk[i])) {
+ dev_err(dev, "failed to get clk %s\n", clkname);
+ return PTR_ERR(ldb->div_sel_clk[i]);
+ }
+ }
+
+ if (child_count == 0) {
+ dev_err(dev, "failed to find valid LVDS channel\n");
+ return -EINVAL;
+ }
+
+ if (ldb->primary_chno == -1) {
+ dev_err(dev, "failed to know primary channel\n");
+ return -EINVAL;
+ }
+
+ ldb->mddh = mxc_dispdrv_register(&ldb_drv);
+ mxc_dispdrv_setdata(ldb->mddh, ldb);
+ dev_set_drvdata(&pdev->dev, ldb);
+
+ return 0;
+}
+
+static int ldb_remove(struct platform_device *pdev)
+{
+ struct ldb_data *ldb = dev_get_drvdata(&pdev->dev);
+
+ mxc_dispdrv_puthandle(ldb->mddh);
+ mxc_dispdrv_unregister(ldb->mddh);
+ return 0;
+}
+
+static struct platform_driver ldb_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ldb_dt_ids,
+ },
+ .probe = ldb_probe,
+ .remove = ldb_remove,
+};
+
+module_platform_driver(ldb_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("LDB driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/video/fbdev/mxc/mxc_dispdrv.c b/drivers/video/fbdev/mxc/mxc_dispdrv.c
index 805c1c98b0f9..d8501ebb84e7 100644
--- a/drivers/video/fbdev/mxc/mxc_dispdrv.c
+++ b/drivers/video/fbdev/mxc/mxc_dispdrv.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2014 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2011-2015 Freescale Semiconductor, Inc. All Rights Reserved.
*/
/*
diff --git a/drivers/video/fbdev/mxc/mxc_edid.c b/drivers/video/fbdev/mxc/mxc_edid.c
new file mode 100644
index 000000000000..998b2d4b7b36
--- /dev/null
+++ b/drivers/video/fbdev/mxc/mxc_edid.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright 2009-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @defgroup Framebuffer Framebuffer Driver for SDC and ADC.
+ */
+
+/*!
+ * @file mxc_edid.c
+ *
+ * @brief MXC EDID driver
+ *
+ * @ingroup Framebuffer
+ */
+
+/*!
+ * Include files
+ */
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <video/mxc_edid.h>
+#include "../edid.h"
+
+#undef DEBUG /* define this for verbose EDID parsing output */
+#ifdef DEBUG
+#define DPRINTK(fmt, args...) printk(fmt, ## args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+const struct fb_videomode mxc_cea_mode[64] = {
+ /* #1: 640x480p@59.94/60Hz 4:3 */
+ [1] = {
+ NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
+ },
+ /* #2: 720x480p@59.94/60Hz 4:3 */
+ [2] = {
+ NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
+ },
+ /* #3: 720x480p@59.94/60Hz 16:9 */
+ [3] = {
+ NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #4: 1280x720p@59.94/60Hz 16:9 */
+ [4] = {
+ NULL, 60, 1280, 720, 13468, 220, 110, 20, 5, 40, 5,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
+ },
+ /* #5: 1920x1080i@59.94/60Hz 16:9 */
+ [5] = {
+ NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #6: 720(1440)x480iH@59.94/60Hz 4:3 */
+ [6] = {
+ NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
+ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_4_3, 0,
+ },
+ /* #7: 720(1440)x480iH@59.94/60Hz 16:9 */
+ [7] = {
+ NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
+ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #8: 720(1440)x240pH@59.94/60Hz 4:3 */
+ [8] = {
+ NULL, 60, 1440, 240, 37108, 114, 38, 15, 4, 124, 3, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
+ },
+ /* #9: 720(1440)x240pH@59.94/60Hz 16:9 */
+ [9] = {
+ NULL, 60, 1440, 240, 37108, 114, 38, 15, 4, 124, 3, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #14: 1440x480p@59.94/60Hz 4:3 */
+ [14] = {
+ NULL, 60, 1440, 480, 18500, 120, 32, 30, 9, 124, 6, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
+ },
+ /* #15: 1440x480p@59.94/60Hz 16:9 */
+ [15] = {
+ NULL, 60, 1440, 480, 18500, 120, 32, 30, 9, 124, 6, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #16: 1920x1080p@60Hz 16:9 */
+ [16] = {
+ NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #17: 720x576pH@50Hz 4:3 */
+ [17] = {
+ NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
+ },
+ /* #18: 720x576pH@50Hz 16:9 */
+ [18] = {
+ NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #19: 1280x720p@50Hz */
+ [19] = {
+ NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #20: 1920x1080i@50Hz */
+ [20] = {
+ NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #23: 720(1440)x288pH@50Hz 4:3 */
+ [23] = {
+ NULL, 50, 1440, 288, 37037, 138, 24, 19, 2, 126, 3, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
+ },
+ /* #24: 720(1440)x288pH@50Hz 16:9 */
+ [24] = {
+ NULL, 50, 1440, 288, 37037, 138, 24, 19, 2, 126, 3, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #29: 720(1440)x576pH@50Hz 4:3 */
+ [29] = {
+ NULL, 50, 1440, 576, 18518, 136, 24, 39, 5, 128, 5, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
+ },
+ /* #30: 720(1440)x576pH@50Hz 16:9 */
+ [30] = {
+ NULL, 50, 1440, 576, 18518, 136, 24, 39, 5, 128, 5, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #31: 1920x1080p@50Hz */
+ [31] = {
+ NULL, 50, 1920, 1080, 6734, 148, 528, 36, 4, 44, 5,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #32: 1920x1080p@23.98/24Hz */
+ [32] = {
+ NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #33: 1920x1080p@25Hz */
+ [33] = {
+ NULL, 25, 1920, 1080, 13468, 148, 528, 36, 4, 44, 5,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #34: 1920x1080p@30Hz */
+ [34] = {
+ NULL, 30, 1920, 1080, 13468, 148, 88, 36, 4, 44, 5,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
+ },
+ /* #41: 1280x720p@100Hz 16:9 */
+ [41] = {
+ NULL, 100, 1280, 720, 6734, 220, 440, 20, 5, 40, 5,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
+ },
+ /* #47: 1280x720p@119.88/120Hz 16:9 */
+ [47] = {
+ NULL, 120, 1280, 720, 6734, 220, 110, 20, 5, 40, 5,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
+ },
+};
+
+/*
+ * We have a special version of fb_mode_is_equal that ignores
+ * pixclock, since for many CEA modes, 2 frequencies are supported
+ * e.g. 640x480 @ 60Hz or 59.94Hz
+ */
+int mxc_edid_fb_mode_is_equal(bool use_aspect,
+ const struct fb_videomode *mode1,
+ const struct fb_videomode *mode2)
+{
+ u32 mask;
+
+ if (use_aspect)
+ mask = ~0;
+ else
+ mask = ~FB_VMODE_ASPECT_MASK;
+
+ return (mode1->xres == mode2->xres &&
+ mode1->yres == mode2->yres &&
+ mode1->hsync_len == mode2->hsync_len &&
+ mode1->vsync_len == mode2->vsync_len &&
+ mode1->left_margin == mode2->left_margin &&
+ mode1->right_margin == mode2->right_margin &&
+ mode1->upper_margin == mode2->upper_margin &&
+ mode1->lower_margin == mode2->lower_margin &&
+ mode1->sync == mode2->sync &&
+ /* refresh check, 59.94Hz and 60Hz have the same parameter
+ * in struct of mxc_cea_mode */
+ abs(mode1->refresh - mode2->refresh) <= 1 &&
+ (mode1->vmode & mask) == (mode2->vmode & mask));
+}
+
+static void get_detailed_timing(unsigned char *block,
+ struct fb_videomode *mode)
+{
+ mode->xres = H_ACTIVE;
+ mode->yres = V_ACTIVE;
+ mode->pixclock = PIXEL_CLOCK;
+ mode->pixclock /= 1000;
+ mode->pixclock = KHZ2PICOS(mode->pixclock);
+ mode->right_margin = H_SYNC_OFFSET;
+ mode->left_margin = (H_ACTIVE + H_BLANKING) -
+ (H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH);
+ mode->upper_margin = V_BLANKING - V_SYNC_OFFSET -
+ V_SYNC_WIDTH;
+ mode->lower_margin = V_SYNC_OFFSET;
+ mode->hsync_len = H_SYNC_WIDTH;
+ mode->vsync_len = V_SYNC_WIDTH;
+ if (HSYNC_POSITIVE)
+ mode->sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (VSYNC_POSITIVE)
+ mode->sync |= FB_SYNC_VERT_HIGH_ACT;
+ mode->refresh = PIXEL_CLOCK/((H_ACTIVE + H_BLANKING) *
+ (V_ACTIVE + V_BLANKING));
+ if (INTERLACED) {
+ mode->yres *= 2;
+ mode->upper_margin *= 2;
+ mode->lower_margin *= 2;
+ mode->vsync_len *= 2;
+ mode->vmode |= FB_VMODE_INTERLACED;
+ }
+ mode->flag = FB_MODE_IS_DETAILED;
+
+ if ((H_SIZE / 16) == (V_SIZE / 9))
+ mode->vmode |= FB_VMODE_ASPECT_16_9;
+ else if ((H_SIZE / 4) == (V_SIZE / 3))
+ mode->vmode |= FB_VMODE_ASPECT_4_3;
+ else if ((mode->xres / 16) == (mode->yres / 9))
+ mode->vmode |= FB_VMODE_ASPECT_16_9;
+ else if ((mode->xres / 4) == (mode->yres / 3))
+ mode->vmode |= FB_VMODE_ASPECT_4_3;
+
+ if (mode->vmode & FB_VMODE_ASPECT_16_9)
+ DPRINTK("Aspect ratio: 16:9\n");
+ if (mode->vmode & FB_VMODE_ASPECT_4_3)
+ DPRINTK("Aspect ratio: 4:3\n");
+ DPRINTK(" %d MHz ", PIXEL_CLOCK/1000000);
+ DPRINTK("%d %d %d %d ", H_ACTIVE, H_ACTIVE + H_SYNC_OFFSET,
+ H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH, H_ACTIVE + H_BLANKING);
+ DPRINTK("%d %d %d %d ", V_ACTIVE, V_ACTIVE + V_SYNC_OFFSET,
+ V_ACTIVE + V_SYNC_OFFSET + V_SYNC_WIDTH, V_ACTIVE + V_BLANKING);
+ DPRINTK("%sHSync %sVSync\n\n", (HSYNC_POSITIVE) ? "+" : "-",
+ (VSYNC_POSITIVE) ? "+" : "-");
+}
+
+int mxc_edid_parse_ext_blk(unsigned char *edid,
+ struct mxc_edid_cfg *cfg,
+ struct fb_monspecs *specs)
+{
+ char detail_timing_desc_offset;
+ struct fb_videomode *mode, *m;
+ unsigned char index = 0x0;
+ unsigned char *block;
+ int i, num = 0, revision;
+
+ if (edid[index++] != 0x2) /* only support cea ext block now */
+ return 0;
+ revision = edid[index++];
+ DPRINTK("cea extent revision %d\n", revision);
+ mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
+ if (mode == NULL)
+ return -1;
+
+ detail_timing_desc_offset = edid[index++];
+
+ if (revision >= 2) {
+ cfg->cea_underscan = (edid[index] >> 7) & 0x1;
+ cfg->cea_basicaudio = (edid[index] >> 6) & 0x1;
+ cfg->cea_ycbcr444 = (edid[index] >> 5) & 0x1;
+ cfg->cea_ycbcr422 = (edid[index] >> 4) & 0x1;
+
+ DPRINTK("CEA underscan %d\n", cfg->cea_underscan);
+ DPRINTK("CEA basicaudio %d\n", cfg->cea_basicaudio);
+ DPRINTK("CEA ycbcr444 %d\n", cfg->cea_ycbcr444);
+ DPRINTK("CEA ycbcr422 %d\n", cfg->cea_ycbcr422);
+ }
+
+ if (revision >= 3) {
+ /* short desc */
+ DPRINTK("CEA Short desc timmings\n");
+ index++;
+ while (index < detail_timing_desc_offset) {
+ unsigned char tagcode, blklen;
+
+ tagcode = (edid[index] >> 5) & 0x7;
+ blklen = (edid[index]) & 0x1f;
+
+ DPRINTK("Tagcode %x Len %d\n", tagcode, blklen);
+
+ switch (tagcode) {
+ case 0x2: /*Video data block*/
+ {
+ int cea_idx;
+ i = 0;
+ while (i < blklen) {
+ index++;
+ cea_idx = edid[index] & 0x7f;
+ if (cea_idx < ARRAY_SIZE(mxc_cea_mode) &&
+ (mxc_cea_mode[cea_idx].xres)) {
+ DPRINTK("Support CEA Format #%d\n", cea_idx);
+ mode[num] = mxc_cea_mode[cea_idx];
+ mode[num].flag |= FB_MODE_IS_STANDARD;
+ num++;
+ }
+ i++;
+ }
+ break;
+ }
+ case 0x3: /*Vendor specific data*/
+ {
+ unsigned char IEEE_reg_iden[3];
+ unsigned char deep_color;
+ unsigned char latency_present;
+ unsigned char I_latency_present;
+ unsigned char hdmi_video_present;
+ unsigned char hdmi_3d_present;
+ unsigned char hdmi_3d_multi_present;
+ unsigned char hdmi_vic_len;
+ unsigned char hdmi_3d_len;
+ unsigned char index_inc = 0;
+ unsigned char vsd_end;
+
+ vsd_end = index + blklen;
+
+ IEEE_reg_iden[0] = edid[index+1];
+ IEEE_reg_iden[1] = edid[index+2];
+ IEEE_reg_iden[2] = edid[index+3];
+ cfg->physical_address[0] = (edid[index+4] & 0xf0) >> 4;
+ cfg->physical_address[1] = (edid[index+4] & 0x0f);
+ cfg->physical_address[2] = (edid[index+5] & 0xf0) >> 4;
+ cfg->physical_address[3] = (edid[index+5] & 0x0f);
+
+ if ((IEEE_reg_iden[0] == 0x03) &&
+ (IEEE_reg_iden[1] == 0x0c) &&
+ (IEEE_reg_iden[2] == 0x00))
+ cfg->hdmi_cap = 1;
+
+ if (blklen > 5) {
+ deep_color = edid[index+6];
+ if (deep_color & 0x80)
+ cfg->vsd_support_ai = true;
+ if (deep_color & 0x40)
+ cfg->vsd_dc_48bit = true;
+ if (deep_color & 0x20)
+ cfg->vsd_dc_36bit = true;
+ if (deep_color & 0x10)
+ cfg->vsd_dc_30bit = true;
+ if (deep_color & 0x08)
+ cfg->vsd_dc_y444 = true;
+ if (deep_color & 0x01)
+ cfg->vsd_dvi_dual = true;
+ }
+
+ DPRINTK("VSD hdmi capability %d\n", cfg->hdmi_cap);
+ DPRINTK("VSD support ai %d\n", cfg->vsd_support_ai);
+ DPRINTK("VSD support deep color 48bit %d\n", cfg->vsd_dc_48bit);
+ DPRINTK("VSD support deep color 36bit %d\n", cfg->vsd_dc_36bit);
+ DPRINTK("VSD support deep color 30bit %d\n", cfg->vsd_dc_30bit);
+ DPRINTK("VSD support deep color y444 %d\n", cfg->vsd_dc_y444);
+ DPRINTK("VSD support dvi dual %d\n", cfg->vsd_dvi_dual);
+
+ if (blklen > 6)
+ cfg->vsd_max_tmdsclk_rate = edid[index+7] * 5;
+ DPRINTK("VSD MAX TMDS CLOCK RATE %d\n", cfg->vsd_max_tmdsclk_rate);
+
+ if (blklen > 7) {
+ latency_present = edid[index+8] >> 7;
+ I_latency_present = (edid[index+8] & 0x40) >> 6;
+ hdmi_video_present = (edid[index+8] & 0x20) >> 5;
+ cfg->vsd_cnc3 = (edid[index+8] & 0x8) >> 3;
+ cfg->vsd_cnc2 = (edid[index+8] & 0x4) >> 2;
+ cfg->vsd_cnc1 = (edid[index+8] & 0x2) >> 1;
+ cfg->vsd_cnc0 = edid[index+8] & 0x1;
+
+ DPRINTK("VSD cnc0 %d\n", cfg->vsd_cnc0);
+ DPRINTK("VSD cnc1 %d\n", cfg->vsd_cnc1);
+ DPRINTK("VSD cnc2 %d\n", cfg->vsd_cnc2);
+ DPRINTK("VSD cnc3 %d\n", cfg->vsd_cnc3);
+ DPRINTK("latency_present %d\n", latency_present);
+ DPRINTK("I_latency_present %d\n", I_latency_present);
+ DPRINTK("hdmi_video_present %d\n", hdmi_video_present);
+
+ } else {
+ index += blklen;
+ break;
+ }
+
+ index += 9;
+
+ /*latency present */
+ if (latency_present) {
+ cfg->vsd_video_latency = edid[index++];
+ cfg->vsd_audio_latency = edid[index++];
+
+ if (I_latency_present) {
+ cfg->vsd_I_video_latency = edid[index++];
+ cfg->vsd_I_audio_latency = edid[index++];
+ } else {
+ cfg->vsd_I_video_latency = cfg->vsd_video_latency;
+ cfg->vsd_I_audio_latency = cfg->vsd_audio_latency;
+ }
+
+ DPRINTK("VSD latency video_latency %d\n", cfg->vsd_video_latency);
+ DPRINTK("VSD latency audio_latency %d\n", cfg->vsd_audio_latency);
+ DPRINTK("VSD latency I_video_latency %d\n", cfg->vsd_I_video_latency);
+ DPRINTK("VSD latency I_audio_latency %d\n", cfg->vsd_I_audio_latency);
+ }
+
+ if (hdmi_video_present) {
+ hdmi_3d_present = edid[index] >> 7;
+ hdmi_3d_multi_present = (edid[index] & 0x60) >> 5;
+ index++;
+ hdmi_vic_len = (edid[index] & 0xe0) >> 5;
+ hdmi_3d_len = edid[index] & 0x1f;
+ index++;
+
+ DPRINTK("hdmi_3d_present %d\n", hdmi_3d_present);
+ DPRINTK("hdmi_3d_multi_present %d\n", hdmi_3d_multi_present);
+ DPRINTK("hdmi_vic_len %d\n", hdmi_vic_len);
+ DPRINTK("hdmi_3d_len %d\n", hdmi_3d_len);
+
+ if (hdmi_vic_len > 0) {
+ for (i = 0; i < hdmi_vic_len; i++) {
+ cfg->hdmi_vic[i] = edid[index++];
+ DPRINTK("HDMI_vic=%d\n", cfg->hdmi_vic[i]);
+ }
+ }
+
+ if (hdmi_3d_len > 0) {
+ if (hdmi_3d_present) {
+ if (hdmi_3d_multi_present == 0x1) {
+ cfg->hdmi_3d_struct_all = (edid[index] << 8) | edid[index+1];
+ index_inc = 2;
+ } else if (hdmi_3d_multi_present == 0x2) {
+ cfg->hdmi_3d_struct_all = (edid[index] << 8) | edid[index+1];
+ cfg->hdmi_3d_mask_all = (edid[index+2] << 8) | edid[index+3];
+ index_inc = 4;
+ } else
+ index_inc = 0;
+ }
+
+ DPRINTK("HDMI 3d struct all =0x%x\n", cfg->hdmi_3d_struct_all);
+ DPRINTK("HDMI 3d mask all =0x%x\n", cfg->hdmi_3d_mask_all);
+
+ /* Read 2D vic 3D_struct */
+ if ((hdmi_3d_len - index_inc) > 0) {
+ DPRINTK("Support 3D video format\n");
+ i = 0;
+ while ((hdmi_3d_len - index_inc) > 0) {
+
+ cfg->hdmi_3d_format[i].vic_order_2d = edid[index+index_inc] >> 4;
+ cfg->hdmi_3d_format[i].struct_3d = edid[index+index_inc] & 0x0f;
+ index_inc++;
+
+ if (cfg->hdmi_3d_format[i].struct_3d == 8) {
+ cfg->hdmi_3d_format[i].detail_3d = edid[index+index_inc] >> 4;
+ index_inc++;
+ } else if (cfg->hdmi_3d_format[i].struct_3d > 8) {
+ cfg->hdmi_3d_format[i].detail_3d = 0;
+ index_inc++;
+ }
+
+ DPRINTK("vic_order_2d=%d, 3d_struct=%d, 3d_detail=0x%x\n",
+ cfg->hdmi_3d_format[i].vic_order_2d,
+ cfg->hdmi_3d_format[i].struct_3d,
+ cfg->hdmi_3d_format[i].detail_3d);
+ i++;
+ }
+ }
+ index += index_inc;
+ }
+ }
+
+ index = vsd_end;
+
+ break;
+ }
+ case 0x1: /*Audio data block*/
+ {
+ u8 audio_format, max_ch, byte1, byte2, byte3;
+
+ i = 0;
+ cfg->max_channels = 0;
+ cfg->sample_rates = 0;
+ cfg->sample_sizes = 0;
+
+ while (i < blklen) {
+ byte1 = edid[index + 1];
+ byte2 = edid[index + 2];
+ byte3 = edid[index + 3];
+ index += 3;
+ i += 3;
+
+ audio_format = byte1 >> 3;
+ max_ch = (byte1 & 0x07) + 1;
+
+ DPRINTK("Audio Format Descriptor : %2d\n", audio_format);
+ DPRINTK("Max Number of Channels : %2d\n", max_ch);
+ DPRINTK("Sample Rates : %02x\n", byte2);
+
+ /* ALSA can't specify specific compressed
+ * formats, so only care about PCM for now. */
+ if (audio_format == AUDIO_CODING_TYPE_LPCM) {
+ if (max_ch > cfg->max_channels)
+ cfg->max_channels = max_ch;
+
+ cfg->sample_rates |= byte2;
+ cfg->sample_sizes |= byte3 & 0x7;
+ DPRINTK("Sample Sizes : %02x\n",
+ byte3 & 0x7);
+ }
+ }
+ break;
+ }
+ case 0x4: /*Speaker allocation block*/
+ {
+ i = 0;
+ while (i < blklen) {
+ cfg->speaker_alloc = edid[index + 1];
+ index += 3;
+ i += 3;
+ DPRINTK("Speaker Alloc : %02x\n", cfg->speaker_alloc);
+ }
+ break;
+ }
+ case 0x7: /*User extended block*/
+ default:
+ /* skip */
+ DPRINTK("Not handle block, tagcode = 0x%x\n", tagcode);
+ index += blklen;
+ break;
+ }
+
+ index++;
+ }
+ }
+
+ /* long desc */
+ DPRINTK("CEA long desc timmings\n");
+ index = detail_timing_desc_offset;
+ block = edid + index;
+ while (index < (EDID_LENGTH - DETAILED_TIMING_DESCRIPTION_SIZE)) {
+ if (!(block[0] == 0x00 && block[1] == 0x00)) {
+ get_detailed_timing(block, &mode[num]);
+ num++;
+ }
+ block += DETAILED_TIMING_DESCRIPTION_SIZE;
+ index += DETAILED_TIMING_DESCRIPTION_SIZE;
+ }
+
+ if (!num) {
+ kfree(mode);
+ return 0;
+ }
+
+ m = kmalloc((num + specs->modedb_len) *
+ sizeof(struct fb_videomode), GFP_KERNEL);
+ if (!m)
+ return 0;
+
+ if (specs->modedb_len) {
+ memmove(m, specs->modedb,
+ specs->modedb_len * sizeof(struct fb_videomode));
+ kfree(specs->modedb);
+ }
+ memmove(m+specs->modedb_len, mode,
+ num * sizeof(struct fb_videomode));
+ kfree(mode);
+
+ specs->modedb_len += num;
+ specs->modedb = m;
+
+ return 0;
+}
+EXPORT_SYMBOL(mxc_edid_parse_ext_blk);
+
+static int mxc_edid_readblk(struct i2c_adapter *adp,
+ unsigned short addr, unsigned char *edid)
+{
+ int ret = 0, extblknum = 0;
+ unsigned char regaddr = 0x0;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &regaddr,
+ }, {
+ .addr = addr,
+ .flags = I2C_M_RD,
+ .len = EDID_LENGTH,
+ .buf = edid,
+ },
+ };
+
+ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ DPRINTK("unable to read EDID block\n");
+ return -EIO;
+ }
+
+ if (edid[1] == 0x00)
+ return -ENOENT;
+
+ extblknum = edid[0x7E];
+
+ if (extblknum) {
+ regaddr = 128;
+ msg[1].buf = edid + EDID_LENGTH;
+
+ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ DPRINTK("unable to read EDID ext block\n");
+ return -EIO;
+ }
+ }
+
+ return extblknum;
+}
+
+static int mxc_edid_readsegblk(struct i2c_adapter *adp, unsigned short addr,
+ unsigned char *edid, int seg_num)
+{
+ int ret = 0;
+ unsigned char segment = 0x1, regaddr = 0;
+ struct i2c_msg msg[3] = {
+ {
+ .addr = 0x30,
+ .flags = 0,
+ .len = 1,
+ .buf = &segment,
+ }, {
+ .addr = addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &regaddr,
+ }, {
+ .addr = addr,
+ .flags = I2C_M_RD,
+ .len = EDID_LENGTH,
+ .buf = edid,
+ },
+ };
+
+ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ DPRINTK("unable to read EDID block\n");
+ return -EIO;
+ }
+
+ if (seg_num == 2) {
+ regaddr = 128;
+ msg[2].buf = edid + EDID_LENGTH;
+
+ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ DPRINTK("unable to read EDID block\n");
+ return -EIO;
+ }
+ }
+
+ return ret;
+}
+
+int mxc_edid_var_to_vic(struct fb_var_screeninfo *var)
+{
+ int i;
+ struct fb_videomode m;
+
+ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
+ fb_var_to_videomode(&m, var);
+ if (mxc_edid_fb_mode_is_equal(false, &m, &mxc_cea_mode[i]))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(mxc_cea_mode))
+ return 0;
+
+ return i;
+}
+EXPORT_SYMBOL(mxc_edid_var_to_vic);
+
+int mxc_edid_mode_to_vic(const struct fb_videomode *mode)
+{
+ int i;
+ bool use_aspect = (mode->vmode & FB_VMODE_ASPECT_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
+ if (mxc_edid_fb_mode_is_equal(use_aspect, mode, &mxc_cea_mode[i]))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(mxc_cea_mode))
+ return 0;
+
+ return i;
+}
+EXPORT_SYMBOL(mxc_edid_mode_to_vic);
+
+/* make sure edid has 512 bytes*/
+int mxc_edid_read(struct i2c_adapter *adp, unsigned short addr,
+ unsigned char *edid, struct mxc_edid_cfg *cfg, struct fb_info *fbi)
+{
+ int ret = 0, extblknum;
+ if (!adp || !edid || !cfg || !fbi)
+ return -EINVAL;
+
+ memset(edid, 0, EDID_LENGTH*4);
+ memset(cfg, 0, sizeof(struct mxc_edid_cfg));
+
+ extblknum = mxc_edid_readblk(adp, addr, edid);
+ if (extblknum < 0)
+ return extblknum;
+
+ /* edid first block parsing */
+ memset(&fbi->monspecs, 0, sizeof(fbi->monspecs));
+ fb_edid_to_monspecs(edid, &fbi->monspecs);
+
+ if (extblknum) {
+ int i;
+
+ /* FIXME: mxc_edid_readsegblk() won't read more than 2 blocks
+ * and the for-loop will read past the end of the buffer! :-( */
+ if (extblknum > 3) {
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ /* need read segment block? */
+ if (extblknum > 1) {
+ ret = mxc_edid_readsegblk(adp, addr,
+ edid + EDID_LENGTH*2, extblknum - 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 1; i <= extblknum; i++)
+ /* edid ext block parsing */
+ mxc_edid_parse_ext_blk(edid + i*EDID_LENGTH,
+ cfg, &fbi->monspecs);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mxc_edid_read);
+
diff --git a/drivers/video/fbdev/mxc/mxc_hdmi.c b/drivers/video/fbdev/mxc/mxc_hdmi.c
new file mode 100644
index 000000000000..ab87b473c7c1
--- /dev/null
+++ b/drivers/video/fbdev/mxc/mxc_hdmi.c
@@ -0,0 +1,2948 @@
+/*
+ * Copyright (C) 2011-2015 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+/*
+ * SH-Mobile High-Definition Multimedia Interface (HDMI) driver
+ * for SLISHDMI13T and SLIPHDMIT IP cores
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/uaccess.h>
+#include <linux/cpufreq.h>
+#include <linux/firmware.h>
+#include <linux/kthread.h>
+#include <linux/regulator/driver.h>
+#include <linux/fsl_devices.h>
+#include <linux/ipu.h>
+#include <linux/regmap.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_device.h>
+
+#include <linux/console.h>
+#include <linux/types.h>
+
+#include "../edid.h"
+#include <video/mxc_edid.h>
+#include <video/mxc_hdmi.h>
+#include "mxc_dispdrv.h"
+
+#include <linux/mfd/mxc-hdmi-core.h>
+
+#define DISPDRV_HDMI "hdmi"
+#define HDMI_EDID_LEN 512
+
+/* status codes for reading edid */
+#define HDMI_EDID_SUCCESS 0
+#define HDMI_EDID_FAIL -1
+#define HDMI_EDID_SAME -2
+#define HDMI_EDID_NO_MODES -3
+
+#define NUM_CEA_VIDEO_MODES 64
+#define DEFAULT_VIDEO_MODE 16 /* 1080P */
+
+#define RGB 0
+#define YCBCR444 1
+#define YCBCR422_16BITS 2
+#define YCBCR422_8BITS 3
+#define XVYCC444 4
+
+/*
+ * We follow a flowchart which is in the "Synopsys DesignWare Courses
+ * HDMI Transmitter Controller User Guide, 1.30a", section 3.1
+ * (dwc_hdmi_tx_user.pdf)
+ *
+ * Below are notes that say "HDMI Initialization Step X"
+ * These correspond to the flowchart.
+ */
+
+/*
+ * We are required to configure VGA mode before reading edid
+ * in HDMI Initialization Step B
+ */
+static const struct fb_videomode vga_mode = {
+ /* 640x480 @ 60 Hz, 31.5 kHz hsync */
+ NULL, 60, 640, 480, 39721, 48, 16, 33, 10, 96, 2, 0,
+ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, FB_MODE_IS_VESA,
+};
+
+enum hdmi_datamap {
+ RGB444_8B = 0x01,
+ RGB444_10B = 0x03,
+ RGB444_12B = 0x05,
+ RGB444_16B = 0x07,
+ YCbCr444_8B = 0x09,
+ YCbCr444_10B = 0x0B,
+ YCbCr444_12B = 0x0D,
+ YCbCr444_16B = 0x0F,
+ YCbCr422_8B = 0x16,
+ YCbCr422_10B = 0x14,
+ YCbCr422_12B = 0x12,
+};
+
+enum hdmi_colorimetry {
+ eITU601,
+ eITU709,
+};
+
+struct hdmi_vmode {
+ bool mDVI;
+ bool mHSyncPolarity;
+ bool mVSyncPolarity;
+ bool mInterlaced;
+ bool mDataEnablePolarity;
+
+ unsigned int mPixelClock;
+ unsigned int mPixelRepetitionInput;
+ unsigned int mPixelRepetitionOutput;
+};
+
+struct hdmi_data_info {
+ unsigned int enc_in_format;
+ unsigned int enc_out_format;
+ unsigned int enc_color_depth;
+ unsigned int colorimetry;
+ unsigned int pix_repet_factor;
+ unsigned int hdcp_enable;
+ unsigned int rgb_out_enable;
+ struct hdmi_vmode video_mode;
+};
+
+struct hdmi_phy_reg_config {
+ /* HDMI PHY register config for pass HCT */
+ u16 reg_vlev;
+ u16 reg_cksymtx;
+};
+
+struct mxc_hdmi {
+ struct platform_device *pdev;
+ struct platform_device *core_pdev;
+ struct mxc_dispdrv_handle *disp_mxc_hdmi;
+ struct fb_info *fbi;
+ struct clk *hdmi_isfr_clk;
+ struct clk *hdmi_iahb_clk;
+ struct clk *mipi_core_clk;
+ struct delayed_work hotplug_work;
+ struct delayed_work hdcp_hdp_work;
+
+ struct notifier_block nb;
+
+ struct hdmi_data_info hdmi_data;
+ int vic;
+ struct mxc_edid_cfg edid_cfg;
+ u8 edid[HDMI_EDID_LEN];
+ bool fb_reg;
+ bool cable_plugin;
+ u8 blank;
+ bool dft_mode_set;
+ char *dft_mode_str;
+ int default_bpp;
+ u8 latest_intr_stat;
+ bool irq_enabled;
+ spinlock_t irq_lock;
+ bool phy_enabled;
+ struct fb_videomode default_mode;
+ struct fb_videomode previous_non_vga_mode;
+ bool requesting_vga_for_initialization;
+
+ int *gpr_base;
+ int *gpr_hdmi_base;
+ int *gpr_sdma_base;
+ int cpu_type;
+ int cpu_version;
+ struct hdmi_phy_reg_config phy_config;
+
+ struct pinctrl *pinctrl;
+};
+
+static int hdmi_major;
+static struct class *hdmi_class;
+
+struct i2c_client *hdmi_i2c;
+struct mxc_hdmi *g_hdmi;
+
+static bool hdmi_inited;
+static bool hdcp_init;
+
+extern const struct fb_videomode mxc_cea_mode[64];
+extern void mxc_hdmi_cec_handle(u16 cec_stat);
+
+static void mxc_hdmi_setup(struct mxc_hdmi *hdmi, unsigned long event);
+static void hdmi_enable_overflow_interrupts(void);
+static void hdmi_disable_overflow_interrupts(void);
+
+static struct platform_device_id imx_hdmi_devtype[] = {
+ {
+ .name = "hdmi-imx6DL",
+ .driver_data = IMX6DL_HDMI,
+ }, {
+ .name = "hdmi-imx6Q",
+ .driver_data = IMX6Q_HDMI,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, imx_hdmi_devtype);
+
+static const struct of_device_id imx_hdmi_dt_ids[] = {
+ { .compatible = "fsl,imx6dl-hdmi-video", .data = &imx_hdmi_devtype[IMX6DL_HDMI], },
+ { .compatible = "fsl,imx6q-hdmi-video", .data = &imx_hdmi_devtype[IMX6Q_HDMI], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
+
+static inline int cpu_is_imx6dl(struct mxc_hdmi *hdmi)
+{
+ return hdmi->cpu_type == IMX6DL_HDMI;
+}
+#ifdef DEBUG
+static void dump_fb_videomode(struct fb_videomode *m)
+{
+ pr_debug("fb_videomode = %d %d %d %d %d %d %d %d %d %d %d %d %d\n",
+ m->refresh, m->xres, m->yres, m->pixclock, m->left_margin,
+ m->right_margin, m->upper_margin, m->lower_margin,
+ m->hsync_len, m->vsync_len, m->sync, m->vmode, m->flag);
+}
+#else
+static void dump_fb_videomode(struct fb_videomode *m)
+{}
+#endif
+
+static ssize_t mxc_hdmi_show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
+
+ strcpy(buf, hdmi->fbi->fix.id);
+ sprintf(buf+strlen(buf), "\n");
+
+ return strlen(buf);
+}
+
+static DEVICE_ATTR(fb_name, S_IRUGO, mxc_hdmi_show_name, NULL);
+
+static ssize_t mxc_hdmi_show_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
+
+ if (hdmi->cable_plugin == false)
+ strcpy(buf, "plugout\n");
+ else
+ strcpy(buf, "plugin\n");
+
+ return strlen(buf);
+}
+
+static DEVICE_ATTR(cable_state, S_IRUGO, mxc_hdmi_show_state, NULL);
+
+static ssize_t mxc_hdmi_show_edid(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
+ int i, j, len = 0;
+
+ for (j = 0; j < HDMI_EDID_LEN/16; j++) {
+ for (i = 0; i < 16; i++)
+ len += sprintf(buf+len, "0x%02X ",
+ hdmi->edid[j*16 + i]);
+ len += sprintf(buf+len, "\n");
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(edid, S_IRUGO, mxc_hdmi_show_edid, NULL);
+
+static ssize_t mxc_hdmi_show_rgb_out_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
+
+ if (hdmi->hdmi_data.rgb_out_enable == true)
+ strcpy(buf, "RGB out\n");
+ else
+ strcpy(buf, "YCbCr out\n");
+
+ return strlen(buf);
+}
+
+static ssize_t mxc_hdmi_store_rgb_out_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ hdmi->hdmi_data.rgb_out_enable = value;
+
+ /* Reconfig HDMI for output color space change */
+ mxc_hdmi_setup(hdmi, 0);
+
+ return count;
+}
+
+static DEVICE_ATTR(rgb_out_enable, S_IRUGO | S_IWUSR,
+ mxc_hdmi_show_rgb_out_enable,
+ mxc_hdmi_store_rgb_out_enable);
+
+static ssize_t mxc_hdmi_show_hdcp_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
+
+ if (hdmi->hdmi_data.hdcp_enable == false)
+ strcpy(buf, "hdcp disable\n");
+ else
+ strcpy(buf, "hdcp enable\n");
+
+ return strlen(buf);
+
+}
+
+static ssize_t mxc_hdmi_store_hdcp_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
+ char event_string[32];
+ char *envp[] = { event_string, NULL };
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ hdmi->hdmi_data.hdcp_enable = value;
+
+ /* Reconfig HDMI for HDCP */
+ mxc_hdmi_setup(hdmi, 0);
+
+ if (hdmi->hdmi_data.hdcp_enable == false) {
+ sprintf(event_string, "EVENT=hdcpdisable");
+ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
+ } else {
+ sprintf(event_string, "EVENT=hdcpenable");
+ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
+ }
+
+ return count;
+
+}
+
+static DEVICE_ATTR(hdcp_enable, S_IRUGO | S_IWUSR,
+ mxc_hdmi_show_hdcp_enable, mxc_hdmi_store_hdcp_enable);
+
+/*!
+ * this submodule is responsible for the video data synchronization.
+ * for example, for RGB 4:4:4 input, the data map is defined as
+ * pin{47~40} <==> R[7:0]
+ * pin{31~24} <==> G[7:0]
+ * pin{15~8} <==> B[7:0]
+ */
+static void hdmi_video_sample(struct mxc_hdmi *hdmi)
+{
+ int color_format = 0;
+ u8 val;
+
+ if (hdmi->hdmi_data.enc_in_format == RGB) {
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_format = 0x01;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_format = 0x03;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_format = 0x05;
+ else if (hdmi->hdmi_data.enc_color_depth == 16)
+ color_format = 0x07;
+ else
+ return;
+ } else if (hdmi->hdmi_data.enc_in_format == YCBCR444) {
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_format = 0x09;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_format = 0x0B;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_format = 0x0D;
+ else if (hdmi->hdmi_data.enc_color_depth == 16)
+ color_format = 0x0F;
+ else
+ return;
+ } else if (hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) {
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_format = 0x16;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_format = 0x14;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_format = 0x12;
+ else
+ return;
+ }
+
+ val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE |
+ ((color_format << HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET) &
+ HDMI_TX_INVID0_VIDEO_MAPPING_MASK);
+ hdmi_writeb(val, HDMI_TX_INVID0);
+
+ /* Enable TX stuffing: When DE is inactive, fix the output data to 0 */
+ val = HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE |
+ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE |
+ HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE;
+ hdmi_writeb(val, HDMI_TX_INSTUFFING);
+ hdmi_writeb(0x0, HDMI_TX_GYDATA0);
+ hdmi_writeb(0x0, HDMI_TX_GYDATA1);
+ hdmi_writeb(0x0, HDMI_TX_RCRDATA0);
+ hdmi_writeb(0x0, HDMI_TX_RCRDATA1);
+ hdmi_writeb(0x0, HDMI_TX_BCBDATA0);
+ hdmi_writeb(0x0, HDMI_TX_BCBDATA1);
+}
+
+static int isColorSpaceConversion(struct mxc_hdmi *hdmi)
+{
+ return (hdmi->hdmi_data.enc_in_format !=
+ hdmi->hdmi_data.enc_out_format);
+}
+
+static int isColorSpaceDecimation(struct mxc_hdmi *hdmi)
+{
+ return ((hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) &&
+ (hdmi->hdmi_data.enc_in_format == RGB ||
+ hdmi->hdmi_data.enc_in_format == YCBCR444));
+}
+
+static int isColorSpaceInterpolation(struct mxc_hdmi *hdmi)
+{
+ return ((hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) &&
+ (hdmi->hdmi_data.enc_out_format == RGB
+ || hdmi->hdmi_data.enc_out_format == YCBCR444));
+}
+
+/*!
+ * update the color space conversion coefficients.
+ */
+static void update_csc_coeffs(struct mxc_hdmi *hdmi)
+{
+ unsigned short csc_coeff[3][4];
+ unsigned int csc_scale = 1;
+ u8 val;
+ bool coeff_selected = false;
+
+ if (isColorSpaceConversion(hdmi)) { /* csc needed */
+ if (hdmi->hdmi_data.enc_out_format == RGB) {
+ if (hdmi->hdmi_data.colorimetry == eITU601) {
+ csc_coeff[0][0] = 0x2000;
+ csc_coeff[0][1] = 0x6926;
+ csc_coeff[0][2] = 0x74fd;
+ csc_coeff[0][3] = 0x010e;
+
+ csc_coeff[1][0] = 0x2000;
+ csc_coeff[1][1] = 0x2cdd;
+ csc_coeff[1][2] = 0x0000;
+ csc_coeff[1][3] = 0x7e9a;
+
+ csc_coeff[2][0] = 0x2000;
+ csc_coeff[2][1] = 0x0000;
+ csc_coeff[2][2] = 0x38b4;
+ csc_coeff[2][3] = 0x7e3b;
+
+ csc_scale = 1;
+ coeff_selected = true;
+ } else if (hdmi->hdmi_data.colorimetry == eITU709) {
+ csc_coeff[0][0] = 0x2000;
+ csc_coeff[0][1] = 0x7106;
+ csc_coeff[0][2] = 0x7a02;
+ csc_coeff[0][3] = 0x00a7;
+
+ csc_coeff[1][0] = 0x2000;
+ csc_coeff[1][1] = 0x3264;
+ csc_coeff[1][2] = 0x0000;
+ csc_coeff[1][3] = 0x7e6d;
+
+ csc_coeff[2][0] = 0x2000;
+ csc_coeff[2][1] = 0x0000;
+ csc_coeff[2][2] = 0x3b61;
+ csc_coeff[2][3] = 0x7e25;
+
+ csc_scale = 1;
+ coeff_selected = true;
+ }
+ } else if (hdmi->hdmi_data.enc_in_format == RGB) {
+ if (hdmi->hdmi_data.colorimetry == eITU601) {
+ csc_coeff[0][0] = 0x2591;
+ csc_coeff[0][1] = 0x1322;
+ csc_coeff[0][2] = 0x074b;
+ csc_coeff[0][3] = 0x0000;
+
+ csc_coeff[1][0] = 0x6535;
+ csc_coeff[1][1] = 0x2000;
+ csc_coeff[1][2] = 0x7acc;
+ csc_coeff[1][3] = 0x0200;
+
+ csc_coeff[2][0] = 0x6acd;
+ csc_coeff[2][1] = 0x7534;
+ csc_coeff[2][2] = 0x2000;
+ csc_coeff[2][3] = 0x0200;
+
+ csc_scale = 0;
+ coeff_selected = true;
+ } else if (hdmi->hdmi_data.colorimetry == eITU709) {
+ csc_coeff[0][0] = 0x2dc5;
+ csc_coeff[0][1] = 0x0d9b;
+ csc_coeff[0][2] = 0x049e;
+ csc_coeff[0][3] = 0x0000;
+
+ csc_coeff[1][0] = 0x62f0;
+ csc_coeff[1][1] = 0x2000;
+ csc_coeff[1][2] = 0x7d11;
+ csc_coeff[1][3] = 0x0200;
+
+ csc_coeff[2][0] = 0x6756;
+ csc_coeff[2][1] = 0x78ab;
+ csc_coeff[2][2] = 0x2000;
+ csc_coeff[2][3] = 0x0200;
+
+ csc_scale = 0;
+ coeff_selected = true;
+ }
+ }
+ }
+
+ if (!coeff_selected) {
+ csc_coeff[0][0] = 0x2000;
+ csc_coeff[0][1] = 0x0000;
+ csc_coeff[0][2] = 0x0000;
+ csc_coeff[0][3] = 0x0000;
+
+ csc_coeff[1][0] = 0x0000;
+ csc_coeff[1][1] = 0x2000;
+ csc_coeff[1][2] = 0x0000;
+ csc_coeff[1][3] = 0x0000;
+
+ csc_coeff[2][0] = 0x0000;
+ csc_coeff[2][1] = 0x0000;
+ csc_coeff[2][2] = 0x2000;
+ csc_coeff[2][3] = 0x0000;
+
+ csc_scale = 1;
+ }
+
+ /* Update CSC parameters in HDMI CSC registers */
+ hdmi_writeb((unsigned char)(csc_coeff[0][0] & 0xFF),
+ HDMI_CSC_COEF_A1_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[0][0] >> 8),
+ HDMI_CSC_COEF_A1_MSB);
+ hdmi_writeb((unsigned char)(csc_coeff[0][1] & 0xFF),
+ HDMI_CSC_COEF_A2_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[0][1] >> 8),
+ HDMI_CSC_COEF_A2_MSB);
+ hdmi_writeb((unsigned char)(csc_coeff[0][2] & 0xFF),
+ HDMI_CSC_COEF_A3_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[0][2] >> 8),
+ HDMI_CSC_COEF_A3_MSB);
+ hdmi_writeb((unsigned char)(csc_coeff[0][3] & 0xFF),
+ HDMI_CSC_COEF_A4_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[0][3] >> 8),
+ HDMI_CSC_COEF_A4_MSB);
+
+ hdmi_writeb((unsigned char)(csc_coeff[1][0] & 0xFF),
+ HDMI_CSC_COEF_B1_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[1][0] >> 8),
+ HDMI_CSC_COEF_B1_MSB);
+ hdmi_writeb((unsigned char)(csc_coeff[1][1] & 0xFF),
+ HDMI_CSC_COEF_B2_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[1][1] >> 8),
+ HDMI_CSC_COEF_B2_MSB);
+ hdmi_writeb((unsigned char)(csc_coeff[1][2] & 0xFF),
+ HDMI_CSC_COEF_B3_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[1][2] >> 8),
+ HDMI_CSC_COEF_B3_MSB);
+ hdmi_writeb((unsigned char)(csc_coeff[1][3] & 0xFF),
+ HDMI_CSC_COEF_B4_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[1][3] >> 8),
+ HDMI_CSC_COEF_B4_MSB);
+
+ hdmi_writeb((unsigned char)(csc_coeff[2][0] & 0xFF),
+ HDMI_CSC_COEF_C1_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[2][0] >> 8),
+ HDMI_CSC_COEF_C1_MSB);
+ hdmi_writeb((unsigned char)(csc_coeff[2][1] & 0xFF),
+ HDMI_CSC_COEF_C2_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[2][1] >> 8),
+ HDMI_CSC_COEF_C2_MSB);
+ hdmi_writeb((unsigned char)(csc_coeff[2][2] & 0xFF),
+ HDMI_CSC_COEF_C3_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[2][2] >> 8),
+ HDMI_CSC_COEF_C3_MSB);
+ hdmi_writeb((unsigned char)(csc_coeff[2][3] & 0xFF),
+ HDMI_CSC_COEF_C4_LSB);
+ hdmi_writeb((unsigned char)(csc_coeff[2][3] >> 8),
+ HDMI_CSC_COEF_C4_MSB);
+
+ val = hdmi_readb(HDMI_CSC_SCALE);
+ val &= ~HDMI_CSC_SCALE_CSCSCALE_MASK;
+ val |= csc_scale & HDMI_CSC_SCALE_CSCSCALE_MASK;
+ hdmi_writeb(val, HDMI_CSC_SCALE);
+}
+
+static void hdmi_video_csc(struct mxc_hdmi *hdmi)
+{
+ int color_depth = 0;
+ int interpolation = HDMI_CSC_CFG_INTMODE_DISABLE;
+ int decimation = 0;
+ u8 val;
+
+ /* YCC422 interpolation to 444 mode */
+ if (isColorSpaceInterpolation(hdmi))
+ interpolation = HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1;
+ else if (isColorSpaceDecimation(hdmi))
+ decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3;
+
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP;
+ else if (hdmi->hdmi_data.enc_color_depth == 16)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP;
+ else
+ return;
+
+ /*configure the CSC registers */
+ hdmi_writeb(interpolation | decimation, HDMI_CSC_CFG);
+ val = hdmi_readb(HDMI_CSC_SCALE);
+ val &= ~HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK;
+ val |= color_depth;
+ hdmi_writeb(val, HDMI_CSC_SCALE);
+
+ update_csc_coeffs(hdmi);
+}
+
+/*!
+ * HDMI video packetizer is used to packetize the data.
+ * for example, if input is YCC422 mode or repeater is used,
+ * data should be repacked this module can be bypassed.
+ */
+static void hdmi_video_packetize(struct mxc_hdmi *hdmi)
+{
+ unsigned int color_depth = 0;
+ unsigned int remap_size = HDMI_VP_REMAP_YCC422_16bit;
+ unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP;
+ struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
+ u8 val;
+
+ if (hdmi_data->enc_out_format == RGB
+ || hdmi_data->enc_out_format == YCBCR444) {
+ if (hdmi_data->enc_color_depth == 0)
+ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
+ else if (hdmi_data->enc_color_depth == 8) {
+ color_depth = 4;
+ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
+ } else if (hdmi_data->enc_color_depth == 10)
+ color_depth = 5;
+ else if (hdmi_data->enc_color_depth == 12)
+ color_depth = 6;
+ else if (hdmi_data->enc_color_depth == 16)
+ color_depth = 7;
+ else
+ return;
+ } else if (hdmi_data->enc_out_format == YCBCR422_8BITS) {
+ if (hdmi_data->enc_color_depth == 0 ||
+ hdmi_data->enc_color_depth == 8)
+ remap_size = HDMI_VP_REMAP_YCC422_16bit;
+ else if (hdmi_data->enc_color_depth == 10)
+ remap_size = HDMI_VP_REMAP_YCC422_20bit;
+ else if (hdmi_data->enc_color_depth == 12)
+ remap_size = HDMI_VP_REMAP_YCC422_24bit;
+ else
+ return;
+ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422;
+ } else
+ return;
+
+ /* HDMI not support deep color,
+ * because IPU MAX support color depth is 24bit */
+ color_depth = 0;
+
+ /* set the packetizer registers */
+ val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) &
+ HDMI_VP_PR_CD_COLOR_DEPTH_MASK) |
+ ((hdmi_data->pix_repet_factor <<
+ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET) &
+ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK);
+ hdmi_writeb(val, HDMI_VP_PR_CD);
+
+ val = hdmi_readb(HDMI_VP_STUFF);
+ val &= ~HDMI_VP_STUFF_PR_STUFFING_MASK;
+ val |= HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE;
+ hdmi_writeb(val, HDMI_VP_STUFF);
+
+ /* Data from pixel repeater block */
+ if (hdmi_data->pix_repet_factor > 1) {
+ val = hdmi_readb(HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
+ HDMI_VP_CONF_BYPASS_SELECT_MASK);
+ val |= HDMI_VP_CONF_PR_EN_ENABLE |
+ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER;
+ hdmi_writeb(val, HDMI_VP_CONF);
+ } else { /* data from packetizer block */
+ val = hdmi_readb(HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
+ HDMI_VP_CONF_BYPASS_SELECT_MASK);
+ val |= HDMI_VP_CONF_PR_EN_DISABLE |
+ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER;
+ hdmi_writeb(val, HDMI_VP_CONF);
+ }
+
+ val = hdmi_readb(HDMI_VP_STUFF);
+ val &= ~HDMI_VP_STUFF_IDEFAULT_PHASE_MASK;
+ val |= 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET;
+ hdmi_writeb(val, HDMI_VP_STUFF);
+
+ hdmi_writeb(remap_size, HDMI_VP_REMAP);
+
+ if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_PP) {
+ val = hdmi_readb(HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
+ HDMI_VP_CONF_PP_EN_ENMASK |
+ HDMI_VP_CONF_YCC422_EN_MASK);
+ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
+ HDMI_VP_CONF_PP_EN_ENABLE |
+ HDMI_VP_CONF_YCC422_EN_DISABLE;
+ hdmi_writeb(val, HDMI_VP_CONF);
+ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422) {
+ val = hdmi_readb(HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
+ HDMI_VP_CONF_PP_EN_ENMASK |
+ HDMI_VP_CONF_YCC422_EN_MASK);
+ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
+ HDMI_VP_CONF_PP_EN_DISABLE |
+ HDMI_VP_CONF_YCC422_EN_ENABLE;
+ hdmi_writeb(val, HDMI_VP_CONF);
+ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS) {
+ val = hdmi_readb(HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
+ HDMI_VP_CONF_PP_EN_ENMASK |
+ HDMI_VP_CONF_YCC422_EN_MASK);
+ val |= HDMI_VP_CONF_BYPASS_EN_ENABLE |
+ HDMI_VP_CONF_PP_EN_DISABLE |
+ HDMI_VP_CONF_YCC422_EN_DISABLE;
+ hdmi_writeb(val, HDMI_VP_CONF);
+ } else {
+ return;
+ }
+
+ val = hdmi_readb(HDMI_VP_STUFF);
+ val &= ~(HDMI_VP_STUFF_PP_STUFFING_MASK |
+ HDMI_VP_STUFF_YCC422_STUFFING_MASK);
+ val |= HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE |
+ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE;
+ hdmi_writeb(val, HDMI_VP_STUFF);
+
+ val = hdmi_readb(HDMI_VP_CONF);
+ val &= ~HDMI_VP_CONF_OUTPUT_SELECTOR_MASK;
+ val |= output_select;
+ hdmi_writeb(val, HDMI_VP_CONF);
+}
+
+#if 0
+/* Force a fixed color screen */
+static void hdmi_video_force_output(struct mxc_hdmi *hdmi, unsigned char force)
+{
+ u8 val;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ if (force) {
+ hdmi_writeb(0x00, HDMI_FC_DBGTMDS2); /* R */
+ hdmi_writeb(0x00, HDMI_FC_DBGTMDS1); /* G */
+ hdmi_writeb(0xFF, HDMI_FC_DBGTMDS0); /* B */
+ val = hdmi_readb(HDMI_FC_DBGFORCE);
+ val |= HDMI_FC_DBGFORCE_FORCEVIDEO;
+ hdmi_writeb(val, HDMI_FC_DBGFORCE);
+ } else {
+ val = hdmi_readb(HDMI_FC_DBGFORCE);
+ val &= ~HDMI_FC_DBGFORCE_FORCEVIDEO;
+ hdmi_writeb(val, HDMI_FC_DBGFORCE);
+ hdmi_writeb(0x00, HDMI_FC_DBGTMDS2); /* R */
+ hdmi_writeb(0x00, HDMI_FC_DBGTMDS1); /* G */
+ hdmi_writeb(0x00, HDMI_FC_DBGTMDS0); /* B */
+ }
+}
+#endif
+
+static inline void hdmi_phy_test_clear(struct mxc_hdmi *hdmi,
+ unsigned char bit)
+{
+ u8 val = hdmi_readb(HDMI_PHY_TST0);
+ val &= ~HDMI_PHY_TST0_TSTCLR_MASK;
+ val |= (bit << HDMI_PHY_TST0_TSTCLR_OFFSET) &
+ HDMI_PHY_TST0_TSTCLR_MASK;
+ hdmi_writeb(val, HDMI_PHY_TST0);
+}
+
+static inline void hdmi_phy_test_enable(struct mxc_hdmi *hdmi,
+ unsigned char bit)
+{
+ u8 val = hdmi_readb(HDMI_PHY_TST0);
+ val &= ~HDMI_PHY_TST0_TSTEN_MASK;
+ val |= (bit << HDMI_PHY_TST0_TSTEN_OFFSET) &
+ HDMI_PHY_TST0_TSTEN_MASK;
+ hdmi_writeb(val, HDMI_PHY_TST0);
+}
+
+static inline void hdmi_phy_test_clock(struct mxc_hdmi *hdmi,
+ unsigned char bit)
+{
+ u8 val = hdmi_readb(HDMI_PHY_TST0);
+ val &= ~HDMI_PHY_TST0_TSTCLK_MASK;
+ val |= (bit << HDMI_PHY_TST0_TSTCLK_OFFSET) &
+ HDMI_PHY_TST0_TSTCLK_MASK;
+ hdmi_writeb(val, HDMI_PHY_TST0);
+}
+
+static inline void hdmi_phy_test_din(struct mxc_hdmi *hdmi,
+ unsigned char bit)
+{
+ hdmi_writeb(bit, HDMI_PHY_TST1);
+}
+
+static inline void hdmi_phy_test_dout(struct mxc_hdmi *hdmi,
+ unsigned char bit)
+{
+ hdmi_writeb(bit, HDMI_PHY_TST2);
+}
+
+static bool hdmi_phy_wait_i2c_done(struct mxc_hdmi *hdmi, int msec)
+{
+ unsigned char val = 0;
+ val = hdmi_readb(HDMI_IH_I2CMPHY_STAT0) & 0x3;
+ while (val == 0) {
+ udelay(1000);
+ if (msec-- == 0)
+ return false;
+ val = hdmi_readb(HDMI_IH_I2CMPHY_STAT0) & 0x3;
+ }
+ return true;
+}
+
+static void hdmi_phy_i2c_write(struct mxc_hdmi *hdmi, unsigned short data,
+ unsigned char addr)
+{
+ hdmi_writeb(0xFF, HDMI_IH_I2CMPHY_STAT0);
+ hdmi_writeb(addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
+ hdmi_writeb((unsigned char)(data >> 8),
+ HDMI_PHY_I2CM_DATAO_1_ADDR);
+ hdmi_writeb((unsigned char)(data >> 0),
+ HDMI_PHY_I2CM_DATAO_0_ADDR);
+ hdmi_writeb(HDMI_PHY_I2CM_OPERATION_ADDR_WRITE,
+ HDMI_PHY_I2CM_OPERATION_ADDR);
+ hdmi_phy_wait_i2c_done(hdmi, 1000);
+}
+
+#if 0
+static unsigned short hdmi_phy_i2c_read(struct mxc_hdmi *hdmi,
+ unsigned char addr)
+{
+ unsigned short data;
+ unsigned char msb = 0, lsb = 0;
+ hdmi_writeb(0xFF, HDMI_IH_I2CMPHY_STAT0);
+ hdmi_writeb(addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
+ hdmi_writeb(HDMI_PHY_I2CM_OPERATION_ADDR_READ,
+ HDMI_PHY_I2CM_OPERATION_ADDR);
+ hdmi_phy_wait_i2c_done(hdmi, 1000);
+ msb = hdmi_readb(HDMI_PHY_I2CM_DATAI_1_ADDR);
+ lsb = hdmi_readb(HDMI_PHY_I2CM_DATAI_0_ADDR);
+ data = (msb << 8) | lsb;
+ return data;
+}
+
+static int hdmi_phy_i2c_write_verify(struct mxc_hdmi *hdmi, unsigned short data,
+ unsigned char addr)
+{
+ unsigned short val = 0;
+ hdmi_phy_i2c_write(hdmi, data, addr);
+ val = hdmi_phy_i2c_read(hdmi, addr);
+ return (val == data);
+}
+#endif
+
+static bool hdmi_edid_wait_i2c_done(struct mxc_hdmi *hdmi, int msec)
+{
+ unsigned char val = 0;
+ val = hdmi_readb(HDMI_IH_I2CM_STAT0) & 0x2;
+ while (val == 0) {
+
+ udelay(1000);
+ if (msec-- == 0) {
+ dev_dbg(&hdmi->pdev->dev,
+ "HDMI EDID i2c operation time out!!\n");
+ return false;
+ }
+ val = hdmi_readb(HDMI_IH_I2CM_STAT0) & 0x2;
+ }
+ return true;
+}
+
+static u8 hdmi_edid_i2c_read(struct mxc_hdmi *hdmi,
+ u8 addr, u8 blockno)
+{
+ u8 spointer = blockno / 2;
+ u8 edidaddress = ((blockno % 2) * 0x80) + addr;
+ u8 data;
+
+ hdmi_writeb(0xFF, HDMI_IH_I2CM_STAT0);
+ hdmi_writeb(edidaddress, HDMI_I2CM_ADDRESS);
+ hdmi_writeb(spointer, HDMI_I2CM_SEGADDR);
+ if (spointer == 0)
+ hdmi_writeb(HDMI_I2CM_OPERATION_READ,
+ HDMI_I2CM_OPERATION);
+ else
+ hdmi_writeb(HDMI_I2CM_OPERATION_READ_EXT,
+ HDMI_I2CM_OPERATION);
+
+ hdmi_edid_wait_i2c_done(hdmi, 30);
+ data = hdmi_readb(HDMI_I2CM_DATAI);
+ hdmi_writeb(0xFF, HDMI_IH_I2CM_STAT0);
+ return data;
+}
+
+
+/* "Power-down enable (active low)"
+ * That mean that power up == 1! */
+static void mxc_hdmi_phy_enable_power(u8 enable)
+{
+ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_PDZ_OFFSET,
+ HDMI_PHY_CONF0_PDZ_MASK);
+}
+
+static void mxc_hdmi_phy_enable_tmds(u8 enable)
+{
+ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_ENTMDS_OFFSET,
+ HDMI_PHY_CONF0_ENTMDS_MASK);
+}
+
+static void mxc_hdmi_phy_gen2_pddq(u8 enable)
+{
+ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET,
+ HDMI_PHY_CONF0_GEN2_PDDQ_MASK);
+}
+
+static void mxc_hdmi_phy_gen2_txpwron(u8 enable)
+{
+ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET,
+ HDMI_PHY_CONF0_GEN2_TXPWRON_MASK);
+}
+
+#if 0
+static void mxc_hdmi_phy_gen2_enhpdrxsense(u8 enable)
+{
+ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_OFFSET,
+ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_MASK);
+}
+#endif
+
+static void mxc_hdmi_phy_sel_data_en_pol(u8 enable)
+{
+ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_SELDATAENPOL_OFFSET,
+ HDMI_PHY_CONF0_SELDATAENPOL_MASK);
+}
+
+static void mxc_hdmi_phy_sel_interface_control(u8 enable)
+{
+ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_SELDIPIF_OFFSET,
+ HDMI_PHY_CONF0_SELDIPIF_MASK);
+}
+
+static int hdmi_phy_configure(struct mxc_hdmi *hdmi, unsigned char pRep,
+ unsigned char cRes, int cscOn)
+{
+ u8 val;
+ u8 msec;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ /* color resolution 0 is 8 bit colour depth */
+ if (cRes == 0)
+ cRes = 8;
+
+ if (pRep != 0)
+ return false;
+ else if (cRes != 8 && cRes != 12)
+ return false;
+
+ /* Enable csc path */
+ if (cscOn)
+ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH;
+ else
+ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS;
+
+ hdmi_writeb(val, HDMI_MC_FLOWCTRL);
+
+ /* gen2 tx power off */
+ mxc_hdmi_phy_gen2_txpwron(0);
+
+ /* gen2 pddq */
+ mxc_hdmi_phy_gen2_pddq(1);
+
+ /* PHY reset */
+ hdmi_writeb(HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
+ hdmi_writeb(HDMI_MC_PHYRSTZ_ASSERT, HDMI_MC_PHYRSTZ);
+
+ hdmi_writeb(HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST);
+
+ hdmi_phy_test_clear(hdmi, 1);
+ hdmi_writeb(HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2,
+ HDMI_PHY_I2CM_SLAVE_ADDR);
+ hdmi_phy_test_clear(hdmi, 0);
+
+ if (hdmi->hdmi_data.video_mode.mPixelClock < 0) {
+ dev_dbg(&hdmi->pdev->dev, "Pixel clock (%d) must be positive\n",
+ hdmi->hdmi_data.video_mode.mPixelClock);
+ return false;
+ }
+
+ if (hdmi->hdmi_data.video_mode.mPixelClock <= 45250000) {
+ switch (cRes) {
+ case 8:
+ /* PLL/MPLL Cfg */
+ hdmi_phy_i2c_write(hdmi, 0x01e0, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15); /* GMPCTRL */
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x21e1, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x41e2, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
+ break;
+ default:
+ return false;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 92500000) {
+ switch (cRes) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x0140, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x2141, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x4142, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
+ default:
+ return false;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 148500000) {
+ switch (cRes) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x20a1, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x40a2, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ default:
+ return false;
+ }
+ } else {
+ switch (cRes) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x2001, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x4002, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
+ default:
+ return false;
+ }
+ }
+
+ if (hdmi->hdmi_data.video_mode.mPixelClock <= 54000000) {
+ switch (cRes) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); /* CURRCTRL */
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ default:
+ return false;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 58400000) {
+ switch (cRes) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ default:
+ return false;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 72000000) {
+ switch (cRes) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ default:
+ return false;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 74250000) {
+ switch (cRes) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ default:
+ return false;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 118800000) {
+ switch (cRes) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ default:
+ return false;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 216000000) {
+ switch (cRes) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ default:
+ return false;
+ }
+ } else {
+ dev_err(&hdmi->pdev->dev,
+ "Pixel clock %d - unsupported by HDMI\n",
+ hdmi->hdmi_data.video_mode.mPixelClock);
+ return false;
+ }
+
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
+ hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
+ /* RESISTANCE TERM 133Ohm Cfg */
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x19); /* TXTERM */
+ /* PREEMP Cgf 0.00 */
+ hdmi_phy_i2c_write(hdmi, 0x800d, 0x09); /* CKSYMTXCTRL */
+ /* TX/CK LVL 10 */
+ hdmi_phy_i2c_write(hdmi, 0x01ad, 0x0E); /* VLEVCTRL */
+
+ /* Board specific setting for PHY register 0x09, 0x0e to pass HCT */
+ if (hdmi->phy_config.reg_cksymtx != 0)
+ hdmi_phy_i2c_write(hdmi, hdmi->phy_config.reg_cksymtx, 0x09);
+
+ if (hdmi->phy_config.reg_vlev != 0)
+ hdmi_phy_i2c_write(hdmi, hdmi->phy_config.reg_vlev, 0x0E);
+
+ /* REMOVE CLK TERM */
+ hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */
+
+ if (hdmi->hdmi_data.video_mode.mPixelClock > 148500000) {
+ hdmi_phy_i2c_write(hdmi, 0x800b, 0x09);
+ hdmi_phy_i2c_write(hdmi, 0x0129, 0x0E);
+ }
+
+ mxc_hdmi_phy_enable_power(1);
+
+ /* toggle TMDS enable */
+ mxc_hdmi_phy_enable_tmds(0);
+ mxc_hdmi_phy_enable_tmds(1);
+
+ /* gen2 tx power on */
+ mxc_hdmi_phy_gen2_txpwron(1);
+ mxc_hdmi_phy_gen2_pddq(0);
+
+ /*Wait for PHY PLL lock */
+ msec = 4;
+ val = hdmi_readb(HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK;
+ while (val == 0) {
+ udelay(1000);
+ if (msec-- == 0) {
+ dev_dbg(&hdmi->pdev->dev, "PHY PLL not locked\n");
+ return false;
+ }
+ val = hdmi_readb(HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK;
+ }
+
+ return true;
+}
+
+static void mxc_hdmi_phy_init(struct mxc_hdmi *hdmi)
+{
+ int i;
+ bool cscon = false;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ /* Never do phy init if pixel clock is gated.
+ * Otherwise HDMI PHY will get messed up and generate an overflow
+ * interrupt that can't be cleared or detected by accessing the
+ * status register. */
+ if (!hdmi->fb_reg || !hdmi->cable_plugin
+ || (hdmi->blank != FB_BLANK_UNBLANK))
+ return;
+
+ if (!hdmi->hdmi_data.video_mode.mDVI)
+ hdmi_enable_overflow_interrupts();
+
+ /*check csc whether needed activated in HDMI mode */
+ cscon = (isColorSpaceConversion(hdmi) &&
+ !hdmi->hdmi_data.video_mode.mDVI);
+
+ /* HDMI Phy spec says to do the phy initialization sequence twice */
+ for (i = 0 ; i < 2 ; i++) {
+ mxc_hdmi_phy_sel_data_en_pol(1);
+ mxc_hdmi_phy_sel_interface_control(0);
+ mxc_hdmi_phy_enable_tmds(0);
+ mxc_hdmi_phy_enable_power(0);
+
+ /* Enable CSC */
+ hdmi_phy_configure(hdmi, 0, 8, cscon);
+ }
+
+ hdmi->phy_enabled = true;
+}
+
+static void hdmi_config_AVI(struct mxc_hdmi *hdmi)
+{
+ u8 val;
+ u8 pix_fmt;
+ u8 under_scan;
+ u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry;
+ struct fb_videomode mode;
+ const struct fb_videomode *edid_mode;
+ bool aspect_16_9;
+
+ dev_dbg(&hdmi->pdev->dev, "set up AVI frame\n");
+
+ fb_var_to_videomode(&mode, &hdmi->fbi->var);
+ /* Use mode from list extracted from EDID to get aspect ratio */
+ if (!list_empty(&hdmi->fbi->modelist)) {
+ edid_mode = fb_find_nearest_mode(&mode, &hdmi->fbi->modelist);
+ if (edid_mode->vmode & FB_VMODE_ASPECT_16_9)
+ aspect_16_9 = true;
+ else
+ aspect_16_9 = false;
+ } else
+ aspect_16_9 = false;
+
+ /********************************************
+ * AVI Data Byte 1
+ ********************************************/
+ if (hdmi->hdmi_data.enc_out_format == YCBCR444)
+ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444;
+ else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS)
+ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422;
+ else
+ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB;
+
+ if (hdmi->edid_cfg.cea_underscan)
+ under_scan = HDMI_FC_AVICONF0_SCAN_INFO_UNDERSCAN;
+ else
+ under_scan = HDMI_FC_AVICONF0_SCAN_INFO_NODATA;
+
+ /*
+ * Active format identification data is present in the AVI InfoFrame.
+ * Under scan info, no bar data
+ */
+ val = pix_fmt | under_scan |
+ HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT |
+ HDMI_FC_AVICONF0_BAR_DATA_NO_DATA;
+
+ hdmi_writeb(val, HDMI_FC_AVICONF0);
+
+ /********************************************
+ * AVI Data Byte 2
+ ********************************************/
+
+ /* Set the Aspect Ratio */
+ if (aspect_16_9) {
+ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9;
+ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9;
+ } else {
+ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3;
+ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3;
+ }
+
+ /* Set up colorimetry */
+ if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
+ if (hdmi->hdmi_data.colorimetry == eITU601)
+ ext_colorimetry =
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ else /* hdmi->hdmi_data.colorimetry == eITU709 */
+ ext_colorimetry =
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
+ } else if (hdmi->hdmi_data.enc_out_format != RGB) {
+ if (hdmi->hdmi_data.colorimetry == eITU601)
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
+ else /* hdmi->hdmi_data.colorimetry == eITU709 */
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
+ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ } else { /* Carries no data */
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA;
+ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ }
+
+ val = colorimetry | coded_ratio | act_ratio;
+ hdmi_writeb(val, HDMI_FC_AVICONF1);
+
+ /********************************************
+ * AVI Data Byte 3
+ ********************************************/
+
+ val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry |
+ HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT |
+ HDMI_FC_AVICONF2_SCALING_NONE;
+ hdmi_writeb(val, HDMI_FC_AVICONF2);
+
+ /********************************************
+ * AVI Data Byte 4
+ ********************************************/
+ hdmi_writeb(hdmi->vic, HDMI_FC_AVIVID);
+
+ /********************************************
+ * AVI Data Byte 5
+ ********************************************/
+
+ /* Set up input and output pixel repetition */
+ val = (((hdmi->hdmi_data.video_mode.mPixelRepetitionInput + 1) <<
+ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET) &
+ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK) |
+ ((hdmi->hdmi_data.video_mode.mPixelRepetitionOutput <<
+ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET) &
+ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK);
+ hdmi_writeb(val, HDMI_FC_PRCONF);
+
+ /* IT Content and quantization range = don't care */
+ val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS |
+ HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED;
+ hdmi_writeb(val, HDMI_FC_AVICONF3);
+
+ /********************************************
+ * AVI Data Bytes 6-13
+ ********************************************/
+ hdmi_writeb(0, HDMI_FC_AVIETB0);
+ hdmi_writeb(0, HDMI_FC_AVIETB1);
+ hdmi_writeb(0, HDMI_FC_AVISBB0);
+ hdmi_writeb(0, HDMI_FC_AVISBB1);
+ hdmi_writeb(0, HDMI_FC_AVIELB0);
+ hdmi_writeb(0, HDMI_FC_AVIELB1);
+ hdmi_writeb(0, HDMI_FC_AVISRB0);
+ hdmi_writeb(0, HDMI_FC_AVISRB1);
+}
+
+/*!
+ * this submodule is responsible for the video/audio data composition.
+ */
+static void hdmi_av_composer(struct mxc_hdmi *hdmi)
+{
+ u8 inv_val;
+ struct fb_info *fbi = hdmi->fbi;
+ struct fb_videomode fb_mode;
+ struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
+ int hblank, vblank;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ fb_var_to_videomode(&fb_mode, &fbi->var);
+
+ vmode->mHSyncPolarity = ((fb_mode.sync & FB_SYNC_HOR_HIGH_ACT) != 0);
+ vmode->mVSyncPolarity = ((fb_mode.sync & FB_SYNC_VERT_HIGH_ACT) != 0);
+ vmode->mInterlaced = ((fb_mode.vmode & FB_VMODE_INTERLACED) != 0);
+ vmode->mPixelClock = (fb_mode.xres + fb_mode.left_margin +
+ fb_mode.right_margin + fb_mode.hsync_len) * (fb_mode.yres +
+ fb_mode.upper_margin + fb_mode.lower_margin +
+ fb_mode.vsync_len) * fb_mode.refresh;
+
+ dev_dbg(&hdmi->pdev->dev, "final pixclk = %d\n", vmode->mPixelClock);
+
+ /* Set up HDMI_FC_INVIDCONF */
+ inv_val = (vmode->mVSyncPolarity ?
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW);
+
+ inv_val |= (vmode->mHSyncPolarity ?
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW);
+
+ inv_val |= (vmode->mDataEnablePolarity ?
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW);
+
+ if (hdmi->vic == 39)
+ inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH;
+ else
+ inv_val |= (vmode->mInterlaced ?
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW);
+
+ inv_val |= (vmode->mInterlaced ?
+ HDMI_FC_INVIDCONF_IN_I_P_INTERLACED :
+ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE);
+
+ inv_val |= (vmode->mDVI ?
+ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE :
+ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE);
+
+ hdmi_writeb(inv_val, HDMI_FC_INVIDCONF);
+
+ /* Set up horizontal active pixel region width */
+ hdmi_writeb(fb_mode.xres >> 8, HDMI_FC_INHACTV1);
+ hdmi_writeb(fb_mode.xres, HDMI_FC_INHACTV0);
+
+ /* Set up vertical blanking pixel region width */
+ hdmi_writeb(fb_mode.yres >> 8, HDMI_FC_INVACTV1);
+ hdmi_writeb(fb_mode.yres, HDMI_FC_INVACTV0);
+
+ /* Set up horizontal blanking pixel region width */
+ hblank = fb_mode.left_margin + fb_mode.right_margin +
+ fb_mode.hsync_len;
+ hdmi_writeb(hblank >> 8, HDMI_FC_INHBLANK1);
+ hdmi_writeb(hblank, HDMI_FC_INHBLANK0);
+
+ /* Set up vertical blanking pixel region width */
+ vblank = fb_mode.upper_margin + fb_mode.lower_margin +
+ fb_mode.vsync_len;
+ hdmi_writeb(vblank, HDMI_FC_INVBLANK);
+
+ /* Set up HSYNC active edge delay width (in pixel clks) */
+ hdmi_writeb(fb_mode.right_margin >> 8, HDMI_FC_HSYNCINDELAY1);
+ hdmi_writeb(fb_mode.right_margin, HDMI_FC_HSYNCINDELAY0);
+
+ /* Set up VSYNC active edge delay (in pixel clks) */
+ hdmi_writeb(fb_mode.lower_margin, HDMI_FC_VSYNCINDELAY);
+
+ /* Set up HSYNC active pulse width (in pixel clks) */
+ hdmi_writeb(fb_mode.hsync_len >> 8, HDMI_FC_HSYNCINWIDTH1);
+ hdmi_writeb(fb_mode.hsync_len, HDMI_FC_HSYNCINWIDTH0);
+
+ /* Set up VSYNC active edge delay (in pixel clks) */
+ hdmi_writeb(fb_mode.vsync_len, HDMI_FC_VSYNCINWIDTH);
+
+ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
+}
+
+static int mxc_edid_read_internal(struct mxc_hdmi *hdmi, unsigned char *edid,
+ struct mxc_edid_cfg *cfg, struct fb_info *fbi)
+{
+ int extblknum;
+ int i, j, ret;
+ unsigned char *ediddata = edid;
+ unsigned char tmpedid[EDID_LENGTH];
+
+ dev_info(&hdmi->pdev->dev, "%s\n", __func__);
+
+ if (!edid || !cfg || !fbi)
+ return -EINVAL;
+
+ /* init HDMI I2CM for read edid*/
+ hdmi_writeb(0x0, HDMI_I2CM_DIV);
+ hdmi_writeb(0x00, HDMI_I2CM_SS_SCL_HCNT_1_ADDR);
+ hdmi_writeb(0x79, HDMI_I2CM_SS_SCL_HCNT_0_ADDR);
+ hdmi_writeb(0x00, HDMI_I2CM_SS_SCL_LCNT_1_ADDR);
+ hdmi_writeb(0x91, HDMI_I2CM_SS_SCL_LCNT_0_ADDR);
+
+ hdmi_writeb(0x00, HDMI_I2CM_FS_SCL_HCNT_1_ADDR);
+ hdmi_writeb(0x0F, HDMI_I2CM_FS_SCL_HCNT_0_ADDR);
+ hdmi_writeb(0x00, HDMI_I2CM_FS_SCL_LCNT_1_ADDR);
+ hdmi_writeb(0x21, HDMI_I2CM_FS_SCL_LCNT_0_ADDR);
+
+ hdmi_writeb(0x50, HDMI_I2CM_SLAVE);
+ hdmi_writeb(0x30, HDMI_I2CM_SEGADDR);
+
+ /* Umask edid interrupt */
+ hdmi_writeb(HDMI_I2CM_INT_DONE_POL,
+ HDMI_I2CM_INT);
+
+ hdmi_writeb(HDMI_I2CM_CTLINT_NAC_POL |
+ HDMI_I2CM_CTLINT_ARBITRATION_POL,
+ HDMI_I2CM_CTLINT);
+
+ /* reset edid data zero */
+ memset(edid, 0, EDID_LENGTH*4);
+ memset(cfg, 0, sizeof(struct mxc_edid_cfg));
+
+ /* Check first three byte of EDID head */
+ if (!(hdmi_edid_i2c_read(hdmi, 0, 0) == 0x00) ||
+ !(hdmi_edid_i2c_read(hdmi, 1, 0) == 0xFF) ||
+ !(hdmi_edid_i2c_read(hdmi, 2, 0) == 0xFF)) {
+ dev_info(&hdmi->pdev->dev, "EDID head check failed!");
+ return -ENOENT;
+ }
+
+ for (i = 0; i < 128; i++) {
+ *ediddata = hdmi_edid_i2c_read(hdmi, i, 0);
+ ediddata++;
+ }
+
+ extblknum = edid[0x7E];
+ if (extblknum == 255)
+ extblknum = 0;
+
+ if (extblknum) {
+ ediddata = edid + EDID_LENGTH;
+ for (i = 0; i < 128; i++) {
+ *ediddata = hdmi_edid_i2c_read(hdmi, i, 1);
+ ediddata++;
+ }
+ }
+
+ /* edid first block parsing */
+ memset(&fbi->monspecs, 0, sizeof(fbi->monspecs));
+ fb_edid_to_monspecs(edid, &fbi->monspecs);
+
+ if (extblknum) {
+ ret = mxc_edid_parse_ext_blk(edid + EDID_LENGTH,
+ cfg, &fbi->monspecs);
+ if (ret < 0)
+ return -ENOENT;
+ }
+
+ /* need read segment block? */
+ if (extblknum > 1) {
+ for (j = 2; j <= extblknum; j++) {
+ for (i = 0; i < 128; i++)
+ tmpedid[i] = hdmi_edid_i2c_read(hdmi, i, j);
+
+ /* edid ext block parsing */
+ ret = mxc_edid_parse_ext_blk(tmpedid,
+ cfg, &fbi->monspecs);
+ if (ret < 0)
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
+static int mxc_hdmi_read_edid(struct mxc_hdmi *hdmi)
+{
+ int ret;
+ u8 edid_old[HDMI_EDID_LEN];
+ u8 clkdis;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ /* save old edid */
+ memcpy(edid_old, hdmi->edid, HDMI_EDID_LEN);
+
+ /* Read EDID via HDMI DDC when HDCP Enable */
+ if (!hdcp_init)
+ ret = mxc_edid_read(hdmi_i2c->adapter, hdmi_i2c->addr,
+ hdmi->edid, &hdmi->edid_cfg, hdmi->fbi);
+ else {
+
+ /* Disable HDCP clk */
+ if (hdmi->hdmi_data.hdcp_enable) {
+ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
+ clkdis |= HDMI_MC_CLKDIS_HDCPCLK_DISABLE;
+ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
+ }
+
+ ret = mxc_edid_read_internal(hdmi, hdmi->edid,
+ &hdmi->edid_cfg, hdmi->fbi);
+
+ /* Enable HDCP clk */
+ if (hdmi->hdmi_data.hdcp_enable) {
+ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
+ clkdis &= ~HDMI_MC_CLKDIS_HDCPCLK_DISABLE;
+ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
+ }
+
+ }
+ if (ret < 0) {
+ dev_dbg(&hdmi->pdev->dev, "read failed\n");
+ return HDMI_EDID_FAIL;
+ }
+
+ /* Save edid cfg for audio driver */
+ hdmi_set_edid_cfg(&hdmi->edid_cfg);
+
+ if (!memcmp(edid_old, hdmi->edid, HDMI_EDID_LEN)) {
+ dev_info(&hdmi->pdev->dev, "same edid\n");
+ return HDMI_EDID_SAME;
+ }
+
+ if (hdmi->fbi->monspecs.modedb_len == 0) {
+ dev_info(&hdmi->pdev->dev, "No modes read from edid\n");
+ return HDMI_EDID_NO_MODES;
+ }
+
+ return HDMI_EDID_SUCCESS;
+}
+
+static void mxc_hdmi_phy_disable(struct mxc_hdmi *hdmi)
+{
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ if (!hdmi->phy_enabled)
+ return;
+
+ hdmi_disable_overflow_interrupts();
+
+ /* Setting PHY to reset status */
+ hdmi_writeb(HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
+
+ /* Power down PHY */
+ mxc_hdmi_phy_enable_tmds(0);
+ mxc_hdmi_phy_enable_power(0);
+ mxc_hdmi_phy_gen2_txpwron(0);
+ mxc_hdmi_phy_gen2_pddq(1);
+
+ hdmi->phy_enabled = false;
+ dev_dbg(&hdmi->pdev->dev, "%s - exit\n", __func__);
+}
+
+/* HDMI Initialization Step B.4 */
+static void mxc_hdmi_enable_video_path(struct mxc_hdmi *hdmi)
+{
+ u8 clkdis;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ /* control period minimum duration */
+ hdmi_writeb(12, HDMI_FC_CTRLDUR);
+ hdmi_writeb(32, HDMI_FC_EXCTRLDUR);
+ hdmi_writeb(1, HDMI_FC_EXCTRLSPAC);
+
+ /* Set to fill TMDS data channels */
+ hdmi_writeb(0x0B, HDMI_FC_CH0PREAM);
+ hdmi_writeb(0x16, HDMI_FC_CH1PREAM);
+ hdmi_writeb(0x21, HDMI_FC_CH2PREAM);
+
+ /* Enable pixel clock and tmds data path */
+ clkdis = 0x7F;
+ clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
+ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
+
+ clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
+ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
+
+ /* Enable csc path */
+ if (isColorSpaceConversion(hdmi)) {
+ clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
+ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
+ }
+}
+
+static void hdmi_enable_audio_clk(struct mxc_hdmi *hdmi)
+{
+ u8 clkdis;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
+ clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
+ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
+}
+
+/* Workaround to clear the overflow condition */
+static void mxc_hdmi_clear_overflow(struct mxc_hdmi *hdmi)
+{
+ int count;
+ u8 val;
+
+ /* TMDS software reset */
+ hdmi_writeb((u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ);
+
+ val = hdmi_readb(HDMI_FC_INVIDCONF);
+
+ if (cpu_is_imx6dl(hdmi)) {
+ hdmi_writeb(val, HDMI_FC_INVIDCONF);
+ return;
+ }
+
+ for (count = 0 ; count < 5 ; count++)
+ hdmi_writeb(val, HDMI_FC_INVIDCONF);
+}
+
+static void hdmi_enable_overflow_interrupts(void)
+{
+ pr_debug("%s\n", __func__);
+ hdmi_writeb(0, HDMI_FC_MASK2);
+ hdmi_writeb(0, HDMI_IH_MUTE_FC_STAT2);
+}
+
+static void hdmi_disable_overflow_interrupts(void)
+{
+ pr_debug("%s\n", __func__);
+ hdmi_writeb(HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK,
+ HDMI_IH_MUTE_FC_STAT2);
+ hdmi_writeb(0xff, HDMI_FC_MASK2);
+}
+
+static void mxc_hdmi_notify_fb(struct mxc_hdmi *hdmi)
+{
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ /* Don't notify if we aren't registered yet */
+ WARN_ON(!hdmi->fb_reg);
+
+ /* disable the phy before ipu changes mode */
+ mxc_hdmi_phy_disable(hdmi);
+
+ /*
+ * Note that fb_set_var will block. During this time,
+ * FB_EVENT_MODE_CHANGE callback will happen.
+ * So by the end of this function, mxc_hdmi_setup()
+ * will be done.
+ */
+ hdmi->fbi->var.activate |= FB_ACTIVATE_FORCE;
+ console_lock();
+ hdmi->fbi->flags |= FBINFO_MISC_USEREVENT;
+ fb_set_var(hdmi->fbi, &hdmi->fbi->var);
+ hdmi->fbi->flags &= ~FBINFO_MISC_USEREVENT;
+ console_unlock();
+
+ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
+}
+
+static void mxc_hdmi_edid_rebuild_modelist(struct mxc_hdmi *hdmi)
+{
+ int i;
+ struct fb_videomode *mode;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ console_lock();
+
+ fb_destroy_modelist(&hdmi->fbi->modelist);
+ fb_add_videomode(&vga_mode, &hdmi->fbi->modelist);
+
+ for (i = 0; i < hdmi->fbi->monspecs.modedb_len; i++) {
+ /*
+ * We might check here if mode is supported by HDMI.
+ * We do not currently support interlaced modes.
+ * And add CEA modes in the modelist.
+ */
+ mode = &hdmi->fbi->monspecs.modedb[i];
+
+ if (!(mode->vmode & FB_VMODE_INTERLACED) &&
+ (mxc_edid_mode_to_vic(mode) != 0)) {
+
+ dev_dbg(&hdmi->pdev->dev, "Added mode %d:", i);
+ dev_dbg(&hdmi->pdev->dev,
+ "xres = %d, yres = %d, freq = %d, vmode = %d, flag = %d\n",
+ hdmi->fbi->monspecs.modedb[i].xres,
+ hdmi->fbi->monspecs.modedb[i].yres,
+ hdmi->fbi->monspecs.modedb[i].refresh,
+ hdmi->fbi->monspecs.modedb[i].vmode,
+ hdmi->fbi->monspecs.modedb[i].flag);
+
+ fb_add_videomode(mode, &hdmi->fbi->modelist);
+ }
+ }
+
+ fb_new_modelist(hdmi->fbi);
+
+ console_unlock();
+}
+
+static void mxc_hdmi_default_edid_cfg(struct mxc_hdmi *hdmi)
+{
+ /* Default setting HDMI working in HDMI mode */
+ hdmi->edid_cfg.hdmi_cap = true;
+}
+
+static void mxc_hdmi_default_modelist(struct mxc_hdmi *hdmi)
+{
+ u32 i;
+ const struct fb_videomode *mode;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ /* If not EDID data read, set up default modelist */
+ dev_info(&hdmi->pdev->dev, "create default modelist\n");
+
+ console_lock();
+
+ fb_destroy_modelist(&hdmi->fbi->modelist);
+
+ /*Add all no interlaced CEA mode to default modelist */
+ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
+ mode = &mxc_cea_mode[i];
+ if (!(mode->vmode & FB_VMODE_INTERLACED) && (mode->xres != 0))
+ fb_add_videomode(mode, &hdmi->fbi->modelist);
+ }
+
+ fb_new_modelist(hdmi->fbi);
+
+ console_unlock();
+}
+
+static void mxc_hdmi_set_mode_to_vga_dvi(struct mxc_hdmi *hdmi)
+{
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ hdmi_disable_overflow_interrupts();
+
+ fb_videomode_to_var(&hdmi->fbi->var, &vga_mode);
+
+ hdmi->requesting_vga_for_initialization = true;
+ mxc_hdmi_notify_fb(hdmi);
+ hdmi->requesting_vga_for_initialization = false;
+}
+
+static void mxc_hdmi_set_mode(struct mxc_hdmi *hdmi)
+{
+ const struct fb_videomode *mode;
+ struct fb_videomode m;
+ struct fb_var_screeninfo var;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ /* Set the default mode only once. */
+ if (!hdmi->dft_mode_set) {
+ fb_videomode_to_var(&var, &hdmi->default_mode);
+ hdmi->dft_mode_set = true;
+ } else
+ fb_videomode_to_var(&var, &hdmi->previous_non_vga_mode);
+
+ fb_var_to_videomode(&m, &var);
+ dump_fb_videomode(&m);
+
+ mode = fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
+ if (!mode) {
+ pr_err("%s: could not find mode in modelist\n", __func__);
+ return;
+ }
+
+ /* If both video mode and work mode same as previous,
+ * init HDMI again */
+ if (fb_mode_is_equal(&hdmi->previous_non_vga_mode, mode) &&
+ (hdmi->edid_cfg.hdmi_cap != hdmi->hdmi_data.video_mode.mDVI)) {
+ dev_dbg(&hdmi->pdev->dev,
+ "%s: Video mode same as previous\n", __func__);
+ /* update fbi mode in case modelist is updated */
+ hdmi->fbi->mode = (struct fb_videomode *)mode;
+ fb_videomode_to_var(&hdmi->fbi->var, mode);
+ /* update hdmi setting in case EDID data updated */
+ mxc_hdmi_setup(hdmi, 0);
+ } else {
+ dev_dbg(&hdmi->pdev->dev, "%s: New video mode\n", __func__);
+ mxc_hdmi_set_mode_to_vga_dvi(hdmi);
+ fb_videomode_to_var(&hdmi->fbi->var, mode);
+ dump_fb_videomode((struct fb_videomode *)mode);
+ mxc_hdmi_notify_fb(hdmi);
+ }
+
+}
+
+static void mxc_hdmi_cable_connected(struct mxc_hdmi *hdmi)
+{
+ int edid_status;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ hdmi->cable_plugin = true;
+
+ /* HDMI Initialization Step C */
+ edid_status = mxc_hdmi_read_edid(hdmi);
+
+ /* Read EDID again if first EDID read failed */
+ if (edid_status == HDMI_EDID_NO_MODES ||
+ edid_status == HDMI_EDID_FAIL) {
+ int retry_status;
+ dev_info(&hdmi->pdev->dev, "Read EDID again\n");
+ msleep(200);
+ retry_status = mxc_hdmi_read_edid(hdmi);
+ /* If we get NO_MODES on the 1st and SAME on the 2nd attempt we
+ * want NO_MODES as final result. */
+ if (retry_status != HDMI_EDID_SAME)
+ edid_status = retry_status;
+ }
+
+ /* HDMI Initialization Steps D, E, F */
+ switch (edid_status) {
+ case HDMI_EDID_SUCCESS:
+ mxc_hdmi_edid_rebuild_modelist(hdmi);
+ break;
+
+ /* Nothing to do if EDID same */
+ case HDMI_EDID_SAME:
+ break;
+
+ case HDMI_EDID_FAIL:
+ mxc_hdmi_default_edid_cfg(hdmi);
+ /* No break here */
+ case HDMI_EDID_NO_MODES:
+ default:
+ mxc_hdmi_default_modelist(hdmi);
+ break;
+ }
+
+ /* Setting video mode */
+ mxc_hdmi_set_mode(hdmi);
+
+ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
+}
+
+static int mxc_hdmi_power_on(struct mxc_dispdrv_handle *disp,
+ struct fb_info *fbi)
+{
+ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
+ mxc_hdmi_phy_init(hdmi);
+ return 0;
+}
+
+static void mxc_hdmi_power_off(struct mxc_dispdrv_handle *disp,
+ struct fb_info *fbi)
+{
+ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
+ mxc_hdmi_phy_disable(hdmi);
+}
+
+static void mxc_hdmi_cable_disconnected(struct mxc_hdmi *hdmi)
+{
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ /* Disable All HDMI clock */
+ hdmi_writeb(0xff, HDMI_MC_CLKDIS);
+
+ mxc_hdmi_phy_disable(hdmi);
+
+ hdmi_disable_overflow_interrupts();
+
+ hdmi->cable_plugin = false;
+}
+
+static void hotplug_worker(struct work_struct *work)
+{
+ struct delayed_work *delay_work = to_delayed_work(work);
+ struct mxc_hdmi *hdmi =
+ container_of(delay_work, struct mxc_hdmi, hotplug_work);
+ u32 phy_int_stat, phy_int_pol, phy_int_mask;
+ u8 val;
+ unsigned long flags;
+ char event_string[32];
+ char *envp[] = { event_string, NULL };
+
+ phy_int_stat = hdmi->latest_intr_stat;
+ phy_int_pol = hdmi_readb(HDMI_PHY_POL0);
+
+ dev_dbg(&hdmi->pdev->dev, "phy_int_stat=0x%x, phy_int_pol=0x%x\n",
+ phy_int_stat, phy_int_pol);
+
+ /* check cable status */
+ if (phy_int_stat & HDMI_IH_PHY_STAT0_HPD) {
+ /* cable connection changes */
+ if (phy_int_pol & HDMI_PHY_HPD) {
+ /* Plugin event */
+ dev_dbg(&hdmi->pdev->dev, "EVENT=plugin\n");
+ mxc_hdmi_cable_connected(hdmi);
+
+ /* Make HPD intr active low to capture unplug event */
+ val = hdmi_readb(HDMI_PHY_POL0);
+ val &= ~HDMI_PHY_HPD;
+ hdmi_writeb(val, HDMI_PHY_POL0);
+
+ hdmi_set_cable_state(1);
+
+ sprintf(event_string, "EVENT=plugin");
+ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
+#ifdef CONFIG_MXC_HDMI_CEC
+ mxc_hdmi_cec_handle(0x80);
+#endif
+ } else if (!(phy_int_pol & HDMI_PHY_HPD)) {
+ /* Plugout event */
+ dev_dbg(&hdmi->pdev->dev, "EVENT=plugout\n");
+ hdmi_set_cable_state(0);
+ mxc_hdmi_abort_stream();
+ mxc_hdmi_cable_disconnected(hdmi);
+
+ /* Make HPD intr active high to capture plugin event */
+ val = hdmi_readb(HDMI_PHY_POL0);
+ val |= HDMI_PHY_HPD;
+ hdmi_writeb(val, HDMI_PHY_POL0);
+
+ sprintf(event_string, "EVENT=plugout");
+ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
+#ifdef CONFIG_MXC_HDMI_CEC
+ mxc_hdmi_cec_handle(0x100);
+#endif
+
+ } else
+ dev_dbg(&hdmi->pdev->dev, "EVENT=none?\n");
+ }
+
+ /* Lock here to ensure full powerdown sequence
+ * completed before next interrupt processed */
+ spin_lock_irqsave(&hdmi->irq_lock, flags);
+
+ /* Re-enable HPD interrupts */
+ phy_int_mask = hdmi_readb(HDMI_PHY_MASK0);
+ phy_int_mask &= ~HDMI_PHY_HPD;
+ hdmi_writeb(phy_int_mask, HDMI_PHY_MASK0);
+
+ /* Unmute interrupts */
+ hdmi_writeb(~HDMI_IH_MUTE_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
+
+ if (hdmi_readb(HDMI_IH_FC_STAT2) & HDMI_IH_FC_STAT2_OVERFLOW_MASK)
+ mxc_hdmi_clear_overflow(hdmi);
+
+ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
+}
+
+static void hdcp_hdp_worker(struct work_struct *work)
+{
+ struct delayed_work *delay_work = to_delayed_work(work);
+ struct mxc_hdmi *hdmi =
+ container_of(delay_work, struct mxc_hdmi, hdcp_hdp_work);
+ char event_string[32];
+ char *envp[] = { event_string, NULL };
+
+ /* HDCP interrupt */
+ sprintf(event_string, "EVENT=hdcpint");
+ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
+
+ /* Unmute interrupts in HDCP application*/
+}
+
+static irqreturn_t mxc_hdmi_hotplug(int irq, void *data)
+{
+ struct mxc_hdmi *hdmi = data;
+ u8 val, intr_stat;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->irq_lock, flags);
+
+ /* Check and clean packet overflow interrupt.*/
+ if (hdmi_readb(HDMI_IH_FC_STAT2) &
+ HDMI_IH_FC_STAT2_OVERFLOW_MASK) {
+ mxc_hdmi_clear_overflow(hdmi);
+
+ dev_dbg(&hdmi->pdev->dev, "Overflow interrupt received\n");
+ /* clear irq status */
+ hdmi_writeb(HDMI_IH_FC_STAT2_OVERFLOW_MASK,
+ HDMI_IH_FC_STAT2);
+ }
+
+ /*
+ * We could not disable the irq. Probably the audio driver
+ * has enabled it. Masking off the HDMI interrupts using
+ * HDMI registers.
+ */
+ /* Capture status - used in hotplug_worker ISR */
+ intr_stat = hdmi_readb(HDMI_IH_PHY_STAT0);
+
+ if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
+
+ dev_dbg(&hdmi->pdev->dev, "Hotplug interrupt received\n");
+ hdmi->latest_intr_stat = intr_stat;
+
+ /* Mute interrupts until handled */
+
+ val = hdmi_readb(HDMI_IH_MUTE_PHY_STAT0);
+ val |= HDMI_IH_MUTE_PHY_STAT0_HPD;
+ hdmi_writeb(val, HDMI_IH_MUTE_PHY_STAT0);
+
+ val = hdmi_readb(HDMI_PHY_MASK0);
+ val |= HDMI_PHY_HPD;
+ hdmi_writeb(val, HDMI_PHY_MASK0);
+
+ /* Clear Hotplug interrupts */
+ hdmi_writeb(HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
+
+ schedule_delayed_work(&(hdmi->hotplug_work), msecs_to_jiffies(20));
+ }
+
+ /* Check HDCP interrupt state */
+ if (hdmi->hdmi_data.hdcp_enable) {
+ val = hdmi_readb(HDMI_A_APIINTSTAT);
+ if (val != 0) {
+ /* Mute interrupts until interrupt handled */
+ val = 0xFF;
+ hdmi_writeb(val, HDMI_A_APIINTMSK);
+ schedule_delayed_work(&(hdmi->hdcp_hdp_work), msecs_to_jiffies(50));
+ }
+ }
+
+ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void mxc_hdmi_setup(struct mxc_hdmi *hdmi, unsigned long event)
+{
+ struct fb_videomode m;
+ const struct fb_videomode *edid_mode;
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ fb_var_to_videomode(&m, &hdmi->fbi->var);
+ dump_fb_videomode(&m);
+
+ dev_dbg(&hdmi->pdev->dev, "%s - video mode changed\n", __func__);
+
+ hdmi->vic = 0;
+ if (!hdmi->requesting_vga_for_initialization) {
+ /* Save mode if this isn't the result of requesting
+ * vga default. */
+ memcpy(&hdmi->previous_non_vga_mode, &m,
+ sizeof(struct fb_videomode));
+ if (!list_empty(&hdmi->fbi->modelist)) {
+ edid_mode = fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
+ pr_debug("edid mode ");
+ dump_fb_videomode((struct fb_videomode *)edid_mode);
+ /* update fbi mode */
+ hdmi->fbi->mode = (struct fb_videomode *)edid_mode;
+ hdmi->vic = mxc_edid_mode_to_vic(edid_mode);
+ }
+ }
+
+ hdmi_disable_overflow_interrupts();
+
+ dev_dbg(&hdmi->pdev->dev, "CEA mode used vic=%d\n", hdmi->vic);
+ if (hdmi->edid_cfg.hdmi_cap)
+ hdmi->hdmi_data.video_mode.mDVI = false;
+ else {
+ dev_dbg(&hdmi->pdev->dev, "CEA mode vic=%d work in DVI\n", hdmi->vic);
+ hdmi->hdmi_data.video_mode.mDVI = true;
+ }
+
+ if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
+ (hdmi->vic == 21) || (hdmi->vic == 22) ||
+ (hdmi->vic == 2) || (hdmi->vic == 3) ||
+ (hdmi->vic == 17) || (hdmi->vic == 18))
+ hdmi->hdmi_data.colorimetry = eITU601;
+ else
+ hdmi->hdmi_data.colorimetry = eITU709;
+
+ if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
+ (hdmi->vic == 12) || (hdmi->vic == 13) ||
+ (hdmi->vic == 14) || (hdmi->vic == 15) ||
+ (hdmi->vic == 25) || (hdmi->vic == 26) ||
+ (hdmi->vic == 27) || (hdmi->vic == 28) ||
+ (hdmi->vic == 29) || (hdmi->vic == 30) ||
+ (hdmi->vic == 35) || (hdmi->vic == 36) ||
+ (hdmi->vic == 37) || (hdmi->vic == 38))
+ hdmi->hdmi_data.video_mode.mPixelRepetitionOutput = 1;
+ else
+ hdmi->hdmi_data.video_mode.mPixelRepetitionOutput = 0;
+
+ hdmi->hdmi_data.video_mode.mPixelRepetitionInput = 0;
+
+ /* TODO: Get input format from IPU (via FB driver iface) */
+ hdmi->hdmi_data.enc_in_format = RGB;
+
+ hdmi->hdmi_data.enc_out_format = RGB;
+
+ /* YCbCr only enabled in HDMI mode */
+ if (!hdmi->hdmi_data.video_mode.mDVI &&
+ !hdmi->hdmi_data.rgb_out_enable) {
+ if (hdmi->edid_cfg.cea_ycbcr444)
+ hdmi->hdmi_data.enc_out_format = YCBCR444;
+ else if (hdmi->edid_cfg.cea_ycbcr422)
+ hdmi->hdmi_data.enc_out_format = YCBCR422_8BITS;
+ }
+
+ /* IPU not support depth color output */
+ hdmi->hdmi_data.enc_color_depth = 8;
+ hdmi->hdmi_data.pix_repet_factor = 0;
+ hdmi->hdmi_data.video_mode.mDataEnablePolarity = true;
+
+ /* HDMI Initialization Step B.1 */
+ hdmi_av_composer(hdmi);
+
+ /* HDMI Initializateion Step B.2 */
+ mxc_hdmi_phy_init(hdmi);
+
+ /* HDMI Initialization Step B.3 */
+ mxc_hdmi_enable_video_path(hdmi);
+
+ /* not for DVI mode */
+ if (hdmi->hdmi_data.video_mode.mDVI)
+ dev_dbg(&hdmi->pdev->dev, "%s DVI mode\n", __func__);
+ else {
+ dev_dbg(&hdmi->pdev->dev, "%s CEA mode\n", __func__);
+
+ /* HDMI Initialization Step E - Configure audio */
+ hdmi_clk_regenerator_update_pixel_clock(hdmi->fbi->var.pixclock);
+ hdmi_enable_audio_clk(hdmi);
+
+ /* HDMI Initialization Step F - Configure AVI InfoFrame */
+ hdmi_config_AVI(hdmi);
+ }
+
+ hdmi_video_packetize(hdmi);
+ hdmi_video_csc(hdmi);
+ hdmi_video_sample(hdmi);
+
+ mxc_hdmi_clear_overflow(hdmi);
+
+ dev_dbg(&hdmi->pdev->dev, "%s exit\n\n", __func__);
+
+}
+
+/* Wait until we are registered to enable interrupts */
+static void mxc_hdmi_fb_registered(struct mxc_hdmi *hdmi)
+{
+ unsigned long flags;
+
+ if (hdmi->fb_reg)
+ return;
+
+ spin_lock_irqsave(&hdmi->irq_lock, flags);
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ hdmi_writeb(HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
+ HDMI_PHY_I2CM_INT_ADDR);
+
+ hdmi_writeb(HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL |
+ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL,
+ HDMI_PHY_I2CM_CTLINT_ADDR);
+
+ /* enable cable hot plug irq */
+ hdmi_writeb((u8)~HDMI_PHY_HPD, HDMI_PHY_MASK0);
+
+ /* Clear Hotplug interrupts */
+ hdmi_writeb(HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
+
+ /* Unmute interrupts */
+ hdmi_writeb(~HDMI_IH_MUTE_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
+
+ hdmi->fb_reg = true;
+
+ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
+
+}
+
+static int mxc_hdmi_fb_event(struct notifier_block *nb,
+ unsigned long val, void *v)
+{
+ struct fb_event *event = v;
+ struct mxc_hdmi *hdmi = container_of(nb, struct mxc_hdmi, nb);
+
+ if (strcmp(event->info->fix.id, hdmi->fbi->fix.id))
+ return 0;
+
+ switch (val) {
+ case FB_EVENT_FB_REGISTERED:
+ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_FB_REGISTERED\n");
+ mxc_hdmi_fb_registered(hdmi);
+ hdmi_set_registered(1);
+ break;
+
+ case FB_EVENT_FB_UNREGISTERED:
+ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_FB_UNREGISTERED\n");
+ hdmi->fb_reg = false;
+ hdmi_set_registered(0);
+ break;
+
+ case FB_EVENT_MODE_CHANGE:
+ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_MODE_CHANGE\n");
+ if (hdmi->fb_reg)
+ mxc_hdmi_setup(hdmi, val);
+ break;
+
+ case FB_EVENT_BLANK:
+ if ((*((int *)event->data) == FB_BLANK_UNBLANK) &&
+ (*((int *)event->data) != hdmi->blank)) {
+ dev_dbg(&hdmi->pdev->dev,
+ "event=FB_EVENT_BLANK - UNBLANK\n");
+
+ hdmi->blank = *((int *)event->data);
+
+ if (hdmi->fb_reg && hdmi->cable_plugin)
+ mxc_hdmi_setup(hdmi, val);
+ hdmi_set_blank_state(1);
+
+ } else if (*((int *)event->data) != hdmi->blank) {
+ dev_dbg(&hdmi->pdev->dev,
+ "event=FB_EVENT_BLANK - BLANK\n");
+ hdmi_set_blank_state(0);
+ mxc_hdmi_abort_stream();
+
+ mxc_hdmi_phy_disable(hdmi);
+
+ hdmi->blank = *((int *)event->data);
+ } else
+ dev_dbg(&hdmi->pdev->dev,
+ "FB BLANK state no changed!\n");
+
+ break;
+
+ case FB_EVENT_SUSPEND:
+ dev_dbg(&hdmi->pdev->dev,
+ "event=FB_EVENT_SUSPEND\n");
+
+ if (hdmi->blank == FB_BLANK_UNBLANK) {
+ mxc_hdmi_phy_disable(hdmi);
+ clk_disable(hdmi->hdmi_iahb_clk);
+ clk_disable(hdmi->hdmi_isfr_clk);
+ clk_disable(hdmi->mipi_core_clk);
+ }
+ break;
+
+ case FB_EVENT_RESUME:
+ dev_dbg(&hdmi->pdev->dev,
+ "event=FB_EVENT_RESUME\n");
+
+ if (hdmi->blank == FB_BLANK_UNBLANK) {
+ clk_enable(hdmi->mipi_core_clk);
+ clk_enable(hdmi->hdmi_iahb_clk);
+ clk_enable(hdmi->hdmi_isfr_clk);
+ mxc_hdmi_phy_init(hdmi);
+ }
+ break;
+
+ }
+ return 0;
+}
+
+static void hdmi_init_route(struct mxc_hdmi *hdmi)
+{
+ uint32_t hdmi_mux_setting, reg;
+ int ipu_id, disp_id;
+
+ ipu_id = mxc_hdmi_ipu_id;
+ disp_id = mxc_hdmi_disp_id;
+
+ if ((ipu_id > 1) || (ipu_id < 0)) {
+ pr_err("Invalid IPU select for HDMI: %d. Set to 0\n", ipu_id);
+ ipu_id = 0;
+ }
+
+ if ((disp_id > 1) || (disp_id < 0)) {
+ pr_err("Invalid DI select for HDMI: %d. Set to 0\n", disp_id);
+ disp_id = 0;
+ }
+
+ reg = readl(hdmi->gpr_hdmi_base);
+
+ /* Configure the connection between IPU1/2 and HDMI */
+ hdmi_mux_setting = 2*ipu_id + disp_id;
+
+ /* GPR3, bits 2-3 = HDMI_MUX_CTL */
+ reg &= ~0xd;
+ reg |= hdmi_mux_setting << 2;
+
+ writel(reg, hdmi->gpr_hdmi_base);
+
+ /* Set HDMI event as SDMA event2 for HDMI audio */
+ reg = readl(hdmi->gpr_sdma_base);
+ reg |= 0x1;
+ writel(reg, hdmi->gpr_sdma_base);
+}
+
+static void hdmi_hdcp_get_property(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ /* Check hdcp enable by dts.*/
+ hdcp_init = of_property_read_bool(np, "fsl,hdcp");
+ if (hdcp_init)
+ dev_dbg(&pdev->dev, "hdcp enable\n");
+ else
+ dev_dbg(&pdev->dev, "hdcp disable\n");
+}
+
+static void hdmi_get_of_property(struct mxc_hdmi *hdmi)
+{
+ struct platform_device *pdev = hdmi->pdev;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(imx_hdmi_dt_ids, &pdev->dev);
+ int ret;
+ u32 phy_reg_vlev = 0, phy_reg_cksymtx = 0;
+
+ if (of_id) {
+ pdev->id_entry = of_id->data;
+ hdmi->cpu_type = pdev->id_entry->driver_data;
+ }
+
+ /* HDMI PHY register vlev and cksymtx preperty is optional.
+ * It is for specific board to pass HCT electrical part.
+ * Default value will been setting in HDMI PHY config function
+ * if it is not define in device tree.
+ */
+ ret = of_property_read_u32(np, "fsl,phy_reg_vlev", &phy_reg_vlev);
+ if (ret)
+ dev_dbg(&pdev->dev, "No board specific HDMI PHY vlev\n");
+
+ ret = of_property_read_u32(np, "fsl,phy_reg_cksymtx", &phy_reg_cksymtx);
+ if (ret)
+ dev_dbg(&pdev->dev, "No board specific HDMI PHY cksymtx\n");
+
+ /* Specific phy config */
+ hdmi->phy_config.reg_cksymtx = phy_reg_cksymtx;
+ hdmi->phy_config.reg_vlev = phy_reg_vlev;
+
+}
+
+/* HDMI Initialization Step A */
+static int mxc_hdmi_disp_init(struct mxc_dispdrv_handle *disp,
+ struct mxc_dispdrv_setting *setting)
+{
+ int ret = 0;
+ u32 i;
+ const struct fb_videomode *mode;
+ struct fb_videomode m;
+ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
+ int irq = platform_get_irq(hdmi->pdev, 0);
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ /* Check hdmi disp init once */
+ if (hdmi_inited) {
+ dev_err(&hdmi->pdev->dev,
+ "Error only one HDMI output support now!\n");
+ return -1;
+ }
+
+ hdmi_get_of_property(hdmi);
+
+ if (irq < 0)
+ return -ENODEV;
+
+ /* Setting HDMI default to blank state */
+ hdmi->blank = FB_BLANK_POWERDOWN;
+
+ ret = ipu_di_to_crtc(&hdmi->pdev->dev, mxc_hdmi_ipu_id,
+ mxc_hdmi_disp_id, &setting->crtc);
+ if (ret < 0)
+ return ret;
+
+ setting->if_fmt = IPU_PIX_FMT_RGB24;
+
+ hdmi->dft_mode_str = setting->dft_mode_str;
+ hdmi->default_bpp = setting->default_bpp;
+ dev_dbg(&hdmi->pdev->dev, "%s - default mode %s bpp=%d\n",
+ __func__, hdmi->dft_mode_str, hdmi->default_bpp);
+
+ hdmi->fbi = setting->fbi;
+
+ hdmi_init_route(hdmi);
+
+ hdmi->mipi_core_clk = clk_get(&hdmi->pdev->dev, "mipi_core");
+ if (IS_ERR(hdmi->mipi_core_clk)) {
+ ret = PTR_ERR(hdmi->mipi_core_clk);
+ dev_err(&hdmi->pdev->dev,
+ "Unable to get mipi core clk: %d\n", ret);
+ goto egetclk;
+ }
+
+ ret = clk_prepare_enable(hdmi->mipi_core_clk);
+ if (ret < 0) {
+ dev_err(&hdmi->pdev->dev,
+ "Cannot enable mipi core clock: %d\n", ret);
+ goto erate;
+ }
+
+ hdmi->hdmi_isfr_clk = clk_get(&hdmi->pdev->dev, "hdmi_isfr");
+ if (IS_ERR(hdmi->hdmi_isfr_clk)) {
+ ret = PTR_ERR(hdmi->hdmi_isfr_clk);
+ dev_err(&hdmi->pdev->dev,
+ "Unable to get HDMI clk: %d\n", ret);
+ goto egetclk1;
+ }
+
+ ret = clk_prepare_enable(hdmi->hdmi_isfr_clk);
+ if (ret < 0) {
+ dev_err(&hdmi->pdev->dev,
+ "Cannot enable HDMI isfr clock: %d\n", ret);
+ goto erate1;
+ }
+
+ hdmi->hdmi_iahb_clk = clk_get(&hdmi->pdev->dev, "hdmi_iahb");
+ if (IS_ERR(hdmi->hdmi_iahb_clk)) {
+ ret = PTR_ERR(hdmi->hdmi_iahb_clk);
+ dev_err(&hdmi->pdev->dev,
+ "Unable to get HDMI clk: %d\n", ret);
+ goto egetclk2;
+ }
+
+ ret = clk_prepare_enable(hdmi->hdmi_iahb_clk);
+ if (ret < 0) {
+ dev_err(&hdmi->pdev->dev,
+ "Cannot enable HDMI iahb clock: %d\n", ret);
+ goto erate2;
+ }
+
+ dev_dbg(&hdmi->pdev->dev, "Enabled HDMI clocks\n");
+
+ /* Init DDC pins for HDCP */
+ if (hdcp_init) {
+ hdmi->pinctrl = devm_pinctrl_get_select_default(&hdmi->pdev->dev);
+ if (IS_ERR(hdmi->pinctrl)) {
+ dev_err(&hdmi->pdev->dev, "can't get/select DDC pinctrl\n");
+ goto erate2;
+ }
+ }
+
+ /* Product and revision IDs */
+ dev_info(&hdmi->pdev->dev,
+ "Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n",
+ hdmi_readb(HDMI_DESIGN_ID),
+ hdmi_readb(HDMI_REVISION_ID),
+ hdmi_readb(HDMI_PRODUCT_ID0),
+ hdmi_readb(HDMI_PRODUCT_ID1));
+
+ /* To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
+ * N and cts values before enabling phy */
+ hdmi_init_clk_regenerator();
+
+ INIT_LIST_HEAD(&hdmi->fbi->modelist);
+
+ spin_lock_init(&hdmi->irq_lock);
+
+ /* Set the default mode and modelist when disp init. */
+ fb_find_mode(&hdmi->fbi->var, hdmi->fbi,
+ hdmi->dft_mode_str, NULL, 0, NULL,
+ hdmi->default_bpp);
+
+ console_lock();
+
+ fb_destroy_modelist(&hdmi->fbi->modelist);
+
+ /*Add all no interlaced CEA mode to default modelist */
+ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
+ mode = &mxc_cea_mode[i];
+ if (!(mode->vmode & FB_VMODE_INTERLACED) && (mode->xres != 0))
+ fb_add_videomode(mode, &hdmi->fbi->modelist);
+ }
+
+ console_unlock();
+
+ /* Find a nearest mode in default modelist */
+ fb_var_to_videomode(&m, &hdmi->fbi->var);
+ dump_fb_videomode(&m);
+
+ hdmi->dft_mode_set = false;
+ /* Save default video mode */
+ memcpy(&hdmi->default_mode, &m, sizeof(struct fb_videomode));
+
+ mode = fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
+ if (!mode) {
+ pr_err("%s: could not find mode in modelist\n", __func__);
+ return -1;
+ }
+
+ fb_videomode_to_var(&hdmi->fbi->var, mode);
+
+ /* update fbi mode */
+ hdmi->fbi->mode = (struct fb_videomode *)mode;
+
+ /* Default setting HDMI working in HDMI mode*/
+ hdmi->edid_cfg.hdmi_cap = true;
+
+ INIT_DELAYED_WORK(&hdmi->hotplug_work, hotplug_worker);
+ INIT_DELAYED_WORK(&hdmi->hdcp_hdp_work, hdcp_hdp_worker);
+
+ /* Configure registers related to HDMI interrupt
+ * generation before registering IRQ. */
+ hdmi_writeb(HDMI_PHY_HPD, HDMI_PHY_POL0);
+
+ /* Clear Hotplug interrupts */
+ hdmi_writeb(HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
+
+ hdmi->nb.notifier_call = mxc_hdmi_fb_event;
+ ret = fb_register_client(&hdmi->nb);
+ if (ret < 0)
+ goto efbclient;
+
+ memset(&hdmi->hdmi_data, 0, sizeof(struct hdmi_data_info));
+
+ /* Default HDMI working in RGB mode */
+ hdmi->hdmi_data.rgb_out_enable = true;
+
+ ret = devm_request_irq(&hdmi->pdev->dev, irq, mxc_hdmi_hotplug, IRQF_SHARED,
+ dev_name(&hdmi->pdev->dev), hdmi);
+ if (ret < 0) {
+ dev_err(&hdmi->pdev->dev,
+ "Unable to request irq: %d\n", ret);
+ goto ereqirq;
+ }
+
+ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_fb_name);
+ if (ret < 0)
+ dev_warn(&hdmi->pdev->dev,
+ "cound not create sys node for fb name\n");
+ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_cable_state);
+ if (ret < 0)
+ dev_warn(&hdmi->pdev->dev,
+ "cound not create sys node for cable state\n");
+ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_edid);
+ if (ret < 0)
+ dev_warn(&hdmi->pdev->dev,
+ "cound not create sys node for edid\n");
+
+ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_rgb_out_enable);
+ if (ret < 0)
+ dev_warn(&hdmi->pdev->dev,
+ "cound not create sys node for rgb out enable\n");
+
+ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_hdcp_enable);
+ if (ret < 0)
+ dev_warn(&hdmi->pdev->dev,
+ "cound not create sys node for hdcp enable\n");
+
+ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
+
+ hdmi_inited = true;
+
+ return ret;
+
+efbclient:
+ free_irq(irq, hdmi);
+ereqirq:
+ clk_disable_unprepare(hdmi->hdmi_iahb_clk);
+erate2:
+ clk_put(hdmi->hdmi_iahb_clk);
+egetclk2:
+ clk_disable_unprepare(hdmi->hdmi_isfr_clk);
+erate1:
+ clk_put(hdmi->hdmi_isfr_clk);
+egetclk1:
+ clk_disable_unprepare(hdmi->mipi_core_clk);
+erate:
+ clk_put(hdmi->mipi_core_clk);
+egetclk:
+ dev_dbg(&hdmi->pdev->dev, "%s error exit\n", __func__);
+
+ return ret;
+}
+
+static void mxc_hdmi_disp_deinit(struct mxc_dispdrv_handle *disp)
+{
+ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
+
+ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
+
+ fb_unregister_client(&hdmi->nb);
+
+ clk_disable_unprepare(hdmi->hdmi_isfr_clk);
+ clk_put(hdmi->hdmi_isfr_clk);
+ clk_disable_unprepare(hdmi->hdmi_iahb_clk);
+ clk_put(hdmi->hdmi_iahb_clk);
+ clk_disable_unprepare(hdmi->mipi_core_clk);
+ clk_put(hdmi->mipi_core_clk);
+
+ platform_device_unregister(hdmi->pdev);
+
+ hdmi_inited = false;
+}
+
+static struct mxc_dispdrv_driver mxc_hdmi_drv = {
+ .name = DISPDRV_HDMI,
+ .init = mxc_hdmi_disp_init,
+ .deinit = mxc_hdmi_disp_deinit,
+ .enable = mxc_hdmi_power_on,
+ .disable = mxc_hdmi_power_off,
+};
+
+
+static int mxc_hdmi_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static long mxc_hdmi_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int __user *argp = (void __user *)arg;
+ int ret = 0;
+
+ switch (cmd) {
+ case HDMI_IOC_GET_RESOURCE:
+ ret = copy_to_user(argp, &g_hdmi->hdmi_data,
+ sizeof(g_hdmi->hdmi_data)) ? -EFAULT : 0;
+ break;
+ case HDMI_IOC_GET_CPU_TYPE:
+ *argp = g_hdmi->cpu_type;
+ break;
+ default:
+ pr_debug("Unsupport cmd %d\n", cmd);
+ break;
+ }
+ return ret;
+}
+
+static int mxc_hdmi_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations mxc_hdmi_fops = {
+ .owner = THIS_MODULE,
+ .open = mxc_hdmi_open,
+ .release = mxc_hdmi_release,
+ .unlocked_ioctl = mxc_hdmi_ioctl,
+};
+
+
+static int mxc_hdmi_probe(struct platform_device *pdev)
+{
+ struct mxc_hdmi *hdmi;
+ struct device *temp_class;
+ struct resource *res;
+ int ret = 0;
+
+ /* Check I2C driver is loaded and available
+ * check hdcp function is enable by dts */
+ hdmi_hdcp_get_property(pdev);
+ if (!hdmi_i2c && !hdcp_init)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ hdmi = devm_kzalloc(&pdev->dev,
+ sizeof(struct mxc_hdmi),
+ GFP_KERNEL);
+ if (!hdmi) {
+ dev_err(&pdev->dev, "Cannot allocate device data\n");
+ ret = -ENOMEM;
+ goto ealloc;
+ }
+ g_hdmi = hdmi;
+
+ hdmi_major = register_chrdev(hdmi_major, "mxc_hdmi", &mxc_hdmi_fops);
+ if (hdmi_major < 0) {
+ printk(KERN_ERR "HDMI: unable to get a major for HDMI\n");
+ ret = -EBUSY;
+ goto ealloc;
+ }
+
+ hdmi_class = class_create(THIS_MODULE, "mxc_hdmi");
+ if (IS_ERR(hdmi_class)) {
+ ret = PTR_ERR(hdmi_class);
+ goto err_out_chrdev;
+ }
+
+ temp_class = device_create(hdmi_class, NULL, MKDEV(hdmi_major, 0),
+ NULL, "mxc_hdmi");
+ if (IS_ERR(temp_class)) {
+ ret = PTR_ERR(temp_class);
+ goto err_out_class;
+ }
+
+ hdmi->pdev = pdev;
+
+ hdmi->core_pdev = platform_device_alloc("mxc_hdmi_core", -1);
+ if (!hdmi->core_pdev) {
+ pr_err("%s failed platform_device_alloc for hdmi core\n",
+ __func__);
+ ret = -ENOMEM;
+ goto ecore;
+ }
+
+ hdmi->gpr_base = ioremap(res->start, resource_size(res));
+ if (!hdmi->gpr_base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto eiomap;
+ }
+
+ hdmi->gpr_hdmi_base = hdmi->gpr_base + 3;
+ hdmi->gpr_sdma_base = hdmi->gpr_base;
+
+ hdmi_inited = false;
+
+ hdmi->disp_mxc_hdmi = mxc_dispdrv_register(&mxc_hdmi_drv);
+ if (IS_ERR(hdmi->disp_mxc_hdmi)) {
+ dev_err(&pdev->dev, "Failed to register dispdrv - 0x%x\n",
+ (int)hdmi->disp_mxc_hdmi);
+ ret = (int)hdmi->disp_mxc_hdmi;
+ goto edispdrv;
+ }
+ mxc_dispdrv_setdata(hdmi->disp_mxc_hdmi, hdmi);
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return 0;
+edispdrv:
+ iounmap(hdmi->gpr_base);
+eiomap:
+ platform_device_put(hdmi->core_pdev);
+ecore:
+ kfree(hdmi);
+err_out_class:
+ device_destroy(hdmi_class, MKDEV(hdmi_major, 0));
+ class_destroy(hdmi_class);
+err_out_chrdev:
+ unregister_chrdev(hdmi_major, "mxc_hdmi");
+ealloc:
+ return ret;
+}
+
+static int mxc_hdmi_remove(struct platform_device *pdev)
+{
+ struct mxc_hdmi *hdmi = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ fb_unregister_client(&hdmi->nb);
+
+ mxc_dispdrv_puthandle(hdmi->disp_mxc_hdmi);
+ mxc_dispdrv_unregister(hdmi->disp_mxc_hdmi);
+ iounmap(hdmi->gpr_base);
+ /* No new work will be scheduled, wait for running ISR */
+ free_irq(irq, hdmi);
+ kfree(hdmi);
+ g_hdmi = NULL;
+
+ return 0;
+}
+
+static struct platform_driver mxc_hdmi_driver = {
+ .probe = mxc_hdmi_probe,
+ .remove = mxc_hdmi_remove,
+ .driver = {
+ .name = "mxc_hdmi",
+ .of_match_table = imx_hdmi_dt_ids,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mxc_hdmi_init(void)
+{
+ return platform_driver_register(&mxc_hdmi_driver);
+}
+module_init(mxc_hdmi_init);
+
+static void __exit mxc_hdmi_exit(void)
+{
+ if (hdmi_major > 0) {
+ device_destroy(hdmi_class, MKDEV(hdmi_major, 0));
+ class_destroy(hdmi_class);
+ unregister_chrdev(hdmi_major, "mxc_hdmi");
+ hdmi_major = 0;
+ }
+
+ platform_driver_unregister(&mxc_hdmi_driver);
+}
+module_exit(mxc_hdmi_exit);
+
+static int mxc_hdmi_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
+ return -ENODEV;
+
+ hdmi_i2c = client;
+
+ return 0;
+}
+
+static int mxc_hdmi_i2c_remove(struct i2c_client *client)
+{
+ hdmi_i2c = NULL;
+ return 0;
+}
+
+static const struct of_device_id imx_hdmi_i2c_match[] = {
+ { .compatible = "fsl,imx6-hdmi-i2c", },
+ { /* sentinel */ }
+};
+
+static const struct i2c_device_id mxc_hdmi_i2c_id[] = {
+ { "mxc_hdmi_i2c", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, mxc_hdmi_i2c_id);
+
+static struct i2c_driver mxc_hdmi_i2c_driver = {
+ .driver = {
+ .name = "mxc_hdmi_i2c",
+ .of_match_table = imx_hdmi_i2c_match,
+ },
+ .probe = mxc_hdmi_i2c_probe,
+ .remove = mxc_hdmi_i2c_remove,
+ .id_table = mxc_hdmi_i2c_id,
+};
+
+static int __init mxc_hdmi_i2c_init(void)
+{
+ return i2c_add_driver(&mxc_hdmi_i2c_driver);
+}
+
+static void __exit mxc_hdmi_i2c_exit(void)
+{
+ i2c_del_driver(&mxc_hdmi_i2c_driver);
+}
+
+module_init(mxc_hdmi_i2c_init);
+module_exit(mxc_hdmi_i2c_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
diff --git a/drivers/video/fbdev/mxc/mxc_ipuv3_fb.c b/drivers/video/fbdev/mxc/mxc_ipuv3_fb.c
new file mode 100644
index 000000000000..a72450c16c4c
--- /dev/null
+++ b/drivers/video/fbdev/mxc/mxc_ipuv3_fb.c
@@ -0,0 +1,3563 @@
+/*
+ * Copyright 2004-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @defgroup Framebuffer Framebuffer Driver for SDC and ADC.
+ */
+
+/*!
+ * @file mxcfb.c
+ *
+ * @brief MXC Frame buffer driver for SDC
+ *
+ * @ingroup Framebuffer
+ */
+
+/*!
+ * Include files
+ */
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/fsl_devices.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/ipu.h>
+#include <linux/ipu-v3.h>
+#include <linux/ipu-v3-pre.h>
+#include <linux/ipu-v3-prg.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mxcfb.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+
+#include "mxc_dispdrv.h"
+
+/*
+ * Driver name
+ */
+#define MXCFB_NAME "mxc_sdc_fb"
+
+/* Display port number */
+#define MXCFB_PORT_NUM 2
+/*!
+ * Structure containing the MXC specific framebuffer information.
+ */
+struct mxcfb_info {
+ int default_bpp;
+ int cur_blank;
+ int next_blank;
+ ipu_channel_t ipu_ch;
+ int ipu_id;
+ int ipu_di;
+ int pre_num;
+ u32 ipu_di_pix_fmt;
+ bool ipu_int_clk;
+ bool overlay;
+ bool alpha_chan_en;
+ bool late_init;
+ bool first_set_par;
+ bool resolve;
+ bool prefetch;
+ bool on_the_fly;
+ uint32_t final_pfmt;
+ unsigned long gpu_sec_buf_off;
+ dma_addr_t store_addr;
+ dma_addr_t alpha_phy_addr0;
+ dma_addr_t alpha_phy_addr1;
+ void *alpha_virt_addr0;
+ void *alpha_virt_addr1;
+ uint32_t alpha_mem_len;
+ uint32_t ipu_ch_irq;
+ uint32_t ipu_ch_nf_irq;
+ uint32_t ipu_alp_ch_irq;
+ uint32_t cur_ipu_buf;
+ uint32_t cur_ipu_alpha_buf;
+
+ u32 pseudo_palette[16];
+
+ bool mode_found;
+ struct completion flip_complete;
+ struct completion alpha_flip_complete;
+ struct completion vsync_complete;
+
+ void *ipu;
+ struct fb_info *ovfbi;
+
+ struct mxc_dispdrv_handle *dispdrv;
+
+ struct fb_var_screeninfo cur_var;
+ uint32_t cur_ipu_pfmt;
+ uint32_t cur_fb_pfmt;
+ bool cur_prefetch;
+};
+
+struct mxcfb_pfmt {
+ u32 fb_pix_fmt;
+ int bpp;
+ struct fb_bitfield red;
+ struct fb_bitfield green;
+ struct fb_bitfield blue;
+ struct fb_bitfield transp;
+};
+
+struct mxcfb_tile_block {
+ u32 fb_pix_fmt;
+ int bw; /* in pixel */
+ int bh; /* in pixel */
+};
+
+#define NA (~0x0UL)
+static const struct mxcfb_pfmt mxcfb_pfmts[] = {
+ /* pixel bpp red green blue transp */
+ {IPU_PIX_FMT_RGB565, 16, {11, 5, 0}, { 5, 6, 0}, { 0, 5, 0}, { 0, 0, 0} },
+ {IPU_PIX_FMT_BGRA4444, 16, { 8, 4, 0}, { 4, 4, 0}, { 0, 4, 0}, { 12, 4, 0} },
+ {IPU_PIX_FMT_BGRA5551, 16, {10, 5, 0}, { 5, 5, 0}, { 0, 5, 0}, { 15, 1, 0} },
+ {IPU_PIX_FMT_RGB24, 24, { 0, 8, 0}, { 8, 8, 0}, {16, 8, 0}, { 0, 0, 0} },
+ {IPU_PIX_FMT_BGR24, 24, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0}, { 0, 0, 0} },
+ {IPU_PIX_FMT_RGB32, 32, { 0, 8, 0}, { 8, 8, 0}, {16, 8, 0}, {24, 8, 0} },
+ {IPU_PIX_FMT_BGR32, 32, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0}, {24, 8, 0} },
+ {IPU_PIX_FMT_ABGR32, 32, {24, 8, 0}, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0} },
+ /* pixel bpp red green blue transp */
+ {IPU_PIX_FMT_YUV420P2, 12, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_YUV420P, 12, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_YVU420P, 12, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_NV12, 12, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {PRE_PIX_FMT_NV21, 12, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_NV16, 16, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {PRE_PIX_FMT_NV61, 16, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_YUV422P, 16, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_YVU422P, 16, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_UYVY, 16, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_YUYV, 16, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_YUV444, 24, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_YUV444P, 24, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_AYUV, 32, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ /* pixel bpp red green blue transp */
+ {IPU_PIX_FMT_GPU32_SB_ST, 32, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_GPU32_SB_SRT, 32, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_GPU32_ST, 32, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_GPU32_SRT, 32, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_GPU16_SB_ST, 16, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_GPU16_SB_SRT, 16, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_GPU16_ST, 16, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+ {IPU_PIX_FMT_GPU16_SRT, 16, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA}, {NA, NA, NA} },
+};
+
+/* Tile fb alignment */
+static const struct mxcfb_tile_block tas[] = {
+ {IPU_PIX_FMT_GPU32_SB_ST, 16, 8},
+ {IPU_PIX_FMT_GPU32_SB_SRT, 64, 128},
+ {IPU_PIX_FMT_GPU32_ST, 16, 4},
+ {IPU_PIX_FMT_GPU32_SRT, 64, 64},
+ {IPU_PIX_FMT_GPU16_SB_ST, 16, 8},
+ {IPU_PIX_FMT_GPU16_SB_SRT, 64, 128},
+ {IPU_PIX_FMT_GPU16_ST, 16, 4},
+ {IPU_PIX_FMT_GPU16_SRT, 64, 64},
+};
+
+/* The block can be resolved */
+static const struct mxcfb_tile_block trs[] = {
+ /* pixel w h */
+ {IPU_PIX_FMT_GPU32_SB_ST, 16, 4},
+ {IPU_PIX_FMT_GPU32_SB_SRT, 16, 4},
+ {IPU_PIX_FMT_GPU32_ST, 16, 4},
+ {IPU_PIX_FMT_GPU32_SRT, 16, 4},
+ {IPU_PIX_FMT_GPU16_SB_ST, 16, 4},
+ {IPU_PIX_FMT_GPU16_SB_SRT, 16, 4},
+ {IPU_PIX_FMT_GPU16_ST, 16, 4},
+ {IPU_PIX_FMT_GPU16_SRT, 16, 4},
+};
+
+struct mxcfb_alloc_list {
+ struct list_head list;
+ dma_addr_t phy_addr;
+ void *cpu_addr;
+ u32 size;
+};
+
+enum {
+ BOTH_ON,
+ SRC_ON,
+ TGT_ON,
+ BOTH_OFF
+};
+
+static bool g_dp_in_use[2];
+LIST_HEAD(fb_alloc_list);
+
+/* Return default standard(RGB) pixel format */
+static uint32_t bpp_to_pixfmt(int bpp)
+{
+ uint32_t pixfmt = 0;
+
+ switch (bpp) {
+ case 24:
+ pixfmt = IPU_PIX_FMT_BGR24;
+ break;
+ case 32:
+ pixfmt = IPU_PIX_FMT_BGR32;
+ break;
+ case 16:
+ pixfmt = IPU_PIX_FMT_RGB565;
+ break;
+ }
+ return pixfmt;
+}
+
+static inline int bitfield_is_equal(struct fb_bitfield f1,
+ struct fb_bitfield f2)
+{
+ return !memcmp(&f1, &f2, sizeof(f1));
+}
+
+static int pixfmt_to_var(uint32_t pixfmt, struct fb_var_screeninfo *var)
+{
+ int i, ret = -1;
+
+ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
+ if (pixfmt == mxcfb_pfmts[i].fb_pix_fmt) {
+ var->red = mxcfb_pfmts[i].red;
+ var->green = mxcfb_pfmts[i].green;
+ var->blue = mxcfb_pfmts[i].blue;
+ var->transp = mxcfb_pfmts[i].transp;
+ var->bits_per_pixel = mxcfb_pfmts[i].bpp;
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int bpp_to_var(int bpp, struct fb_var_screeninfo *var)
+{
+ uint32_t pixfmt = 0;
+
+ if (var->nonstd)
+ return -1;
+
+ pixfmt = bpp_to_pixfmt(bpp);
+ if (pixfmt)
+ return pixfmt_to_var(pixfmt, var);
+ else
+ return -1;
+}
+
+static int check_var_pixfmt(struct fb_var_screeninfo *var)
+{
+ int i, ret = -1;
+
+ if (var->nonstd) {
+ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
+ if (mxcfb_pfmts[i].fb_pix_fmt == var->nonstd) {
+ var->bits_per_pixel = mxcfb_pfmts[i].bpp;
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
+ if (bitfield_is_equal(var->red, mxcfb_pfmts[i].red) &&
+ bitfield_is_equal(var->green, mxcfb_pfmts[i].green) &&
+ bitfield_is_equal(var->blue, mxcfb_pfmts[i].blue) &&
+ bitfield_is_equal(var->transp, mxcfb_pfmts[i].transp) &&
+ var->bits_per_pixel == mxcfb_pfmts[i].bpp) {
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+static uint32_t fb_to_store_pixfmt(uint32_t fb_pixfmt)
+{
+ switch (fb_pixfmt) {
+ case IPU_PIX_FMT_RGB32:
+ case IPU_PIX_FMT_BGR32:
+ case IPU_PIX_FMT_ABGR32:
+ case IPU_PIX_FMT_RGB24:
+ case IPU_PIX_FMT_BGR24:
+ case IPU_PIX_FMT_RGB565:
+ case IPU_PIX_FMT_BGRA4444:
+ case IPU_PIX_FMT_BGRA5551:
+ case IPU_PIX_FMT_UYVY:
+ case IPU_PIX_FMT_YUYV:
+ case IPU_PIX_FMT_YUV444:
+ case IPU_PIX_FMT_AYUV:
+ return fb_pixfmt;
+ case IPU_PIX_FMT_YUV422P:
+ case IPU_PIX_FMT_YVU422P:
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YVU420P:
+ case IPU_PIX_FMT_NV12:
+ case PRE_PIX_FMT_NV21:
+ case IPU_PIX_FMT_NV16:
+ case PRE_PIX_FMT_NV61:
+ case IPU_PIX_FMT_YUV420P:
+ return IPU_PIX_FMT_UYVY;
+ case IPU_PIX_FMT_YUV444P:
+ return IPU_PIX_FMT_AYUV;
+ default:
+ return 0;
+ }
+}
+
+static uint32_t fbi_to_pixfmt(struct fb_info *fbi, bool original_fb)
+{
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
+ int i;
+ uint32_t pixfmt = 0;
+
+ if (fbi->var.nonstd) {
+ if (mxc_fbi->prefetch && !original_fb) {
+ if (ipu_pixel_format_is_gpu_tile(fbi->var.nonstd))
+ goto next;
+
+ return fb_to_store_pixfmt(fbi->var.nonstd);
+ } else {
+ return fbi->var.nonstd;
+ }
+ }
+
+next:
+ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
+ if (bitfield_is_equal(fbi->var.red, mxcfb_pfmts[i].red) &&
+ bitfield_is_equal(fbi->var.green, mxcfb_pfmts[i].green) &&
+ bitfield_is_equal(fbi->var.blue, mxcfb_pfmts[i].blue) &&
+ bitfield_is_equal(fbi->var.transp, mxcfb_pfmts[i].transp)) {
+ pixfmt = mxcfb_pfmts[i].fb_pix_fmt;
+ break;
+ }
+ }
+
+ if (pixfmt == 0)
+ dev_err(fbi->device, "cannot get pixel format\n");
+
+ return pixfmt;
+}
+
+static void fmt_to_tile_alignment(uint32_t fmt, int *bw, int *bh)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tas); i++) {
+ if (tas[i].fb_pix_fmt == fmt) {
+ *bw = tas[i].bw;
+ *bh = tas[i].bh;
+ }
+ }
+
+ BUG_ON(!(*bw) || !(*bh));
+}
+
+static void fmt_to_tile_block(uint32_t fmt, int *bw, int *bh)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(trs); i++) {
+ if (trs[i].fb_pix_fmt == fmt) {
+ *bw = trs[i].bw;
+ *bh = trs[i].bh;
+ }
+ }
+
+ BUG_ON(!(*bw) || !(*bh));
+}
+
+static struct fb_info *found_registered_fb(ipu_channel_t ipu_ch, int ipu_id)
+{
+ int i;
+ struct mxcfb_info *mxc_fbi;
+ struct fb_info *fbi = NULL;
+
+ for (i = 0; i < num_registered_fb; i++) {
+ mxc_fbi =
+ ((struct mxcfb_info *)(registered_fb[i]->par));
+
+ if ((mxc_fbi->ipu_ch == ipu_ch) &&
+ (mxc_fbi->ipu_id == ipu_id)) {
+ fbi = registered_fb[i];
+ break;
+ }
+ }
+ return fbi;
+}
+
+static irqreturn_t mxcfb_irq_handler(int irq, void *dev_id);
+static irqreturn_t mxcfb_nf_irq_handler(int irq, void *dev_id);
+static int mxcfb_blank(int blank, struct fb_info *info);
+static int mxcfb_map_video_memory(struct fb_info *fbi);
+static int mxcfb_unmap_video_memory(struct fb_info *fbi);
+
+/*
+ * Set fixed framebuffer parameters based on variable settings.
+ *
+ * @param info framebuffer information pointer
+ */
+static int mxcfb_set_fix(struct fb_info *info)
+{
+ struct fb_fix_screeninfo *fix = &info->fix;
+ struct fb_var_screeninfo *var = &info->var;
+
+ fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
+
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ fix->accel = FB_ACCEL_NONE;
+ fix->visual = FB_VISUAL_TRUECOLOR;
+ fix->xpanstep = 1;
+ fix->ywrapstep = 1;
+ fix->ypanstep = 1;
+
+ return 0;
+}
+
+static int _setup_disp_channel1(struct fb_info *fbi)
+{
+ ipu_channel_params_t params;
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
+
+ memset(&params, 0, sizeof(params));
+
+ if (mxc_fbi->ipu_ch == MEM_DC_SYNC) {
+ params.mem_dc_sync.di = mxc_fbi->ipu_di;
+ if (fbi->var.vmode & FB_VMODE_INTERLACED)
+ params.mem_dc_sync.interlaced = true;
+ params.mem_dc_sync.out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
+ params.mem_dc_sync.in_pixel_fmt = mxc_fbi->on_the_fly ?
+ mxc_fbi->final_pfmt :
+ fbi_to_pixfmt(fbi, false);
+ } else {
+ params.mem_dp_bg_sync.di = mxc_fbi->ipu_di;
+ if (fbi->var.vmode & FB_VMODE_INTERLACED)
+ params.mem_dp_bg_sync.interlaced = true;
+ params.mem_dp_bg_sync.out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
+ params.mem_dp_bg_sync.in_pixel_fmt = mxc_fbi->on_the_fly ?
+ mxc_fbi->final_pfmt :
+ fbi_to_pixfmt(fbi, false);
+ if (mxc_fbi->alpha_chan_en)
+ params.mem_dp_bg_sync.alpha_chan_en = true;
+ }
+ ipu_init_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, &params);
+
+ return 0;
+}
+
+static int _setup_disp_channel2(struct fb_info *fbi)
+{
+ int retval = 0;
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
+ int fb_stride, ipu_stride, bw = 0, bh = 0;
+ unsigned long base, ipu_base;
+ unsigned int fr_xoff, fr_yoff, fr_w, fr_h;
+ unsigned int prg_width;
+ struct ipu_pre_context pre;
+ bool post_pre_disable = false;
+
+ switch (fbi_to_pixfmt(fbi, true)) {
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YVU420P:
+ case IPU_PIX_FMT_NV12:
+ case PRE_PIX_FMT_NV21:
+ case IPU_PIX_FMT_NV16:
+ case PRE_PIX_FMT_NV61:
+ case IPU_PIX_FMT_YUV422P:
+ case IPU_PIX_FMT_YVU422P:
+ case IPU_PIX_FMT_YUV420P:
+ case IPU_PIX_FMT_YUV444P:
+ fb_stride = fbi->var.xres_virtual;
+ break;
+ default:
+ fb_stride = fbi->fix.line_length;
+ }
+
+ base = fbi->fix.smem_start;
+ fr_xoff = fbi->var.xoffset;
+ fr_w = fbi->var.xres_virtual;
+ if (!(fbi->var.vmode & FB_VMODE_YWRAP)) {
+ dev_dbg(fbi->device, "Y wrap disabled\n");
+ fr_yoff = fbi->var.yoffset % fbi->var.yres;
+ fr_h = fbi->var.yres;
+ base += fbi->fix.line_length * fbi->var.yres *
+ (fbi->var.yoffset / fbi->var.yres);
+ if (ipu_pixel_format_is_split_gpu_tile(fbi->var.nonstd))
+ base += (mxc_fbi->gpu_sec_buf_off -
+ fbi->fix.line_length * fbi->var.yres / 2) *
+ (fbi->var.yoffset / fbi->var.yres);
+ } else {
+ dev_dbg(fbi->device, "Y wrap enabled\n");
+ fr_yoff = fbi->var.yoffset;
+ fr_h = fbi->var.yres_virtual;
+ }
+
+ /* pixel block alignment for resolving cases */
+ if (mxc_fbi->resolve) {
+ fmt_to_tile_block(fbi->var.nonstd, &bw, &bh);
+ } else {
+ base += fr_yoff * fb_stride + fr_xoff *
+ bytes_per_pixel(fbi_to_pixfmt(fbi, true));
+ }
+
+ if (!mxc_fbi->on_the_fly)
+ mxc_fbi->cur_ipu_buf = 2;
+ init_completion(&mxc_fbi->flip_complete);
+ /*
+ * We don't need to wait for vsync at the first time
+ * we do pan display after fb is initialized, as IPU will
+ * switch to the newly selected buffer automatically,
+ * so we call complete() for both mxc_fbi->flip_complete
+ * and mxc_fbi->alpha_flip_complete.
+ */
+ complete(&mxc_fbi->flip_complete);
+ if (mxc_fbi->alpha_chan_en) {
+ mxc_fbi->cur_ipu_alpha_buf = 1;
+ init_completion(&mxc_fbi->alpha_flip_complete);
+ complete(&mxc_fbi->alpha_flip_complete);
+ }
+
+ if (mxc_fbi->prefetch) {
+ struct ipu_prg_config prg;
+ struct fb_var_screeninfo from_var, to_var;
+
+ if (mxc_fbi->pre_num < 0) {
+ mxc_fbi->pre_num = ipu_pre_alloc(mxc_fbi->ipu_id,
+ mxc_fbi->ipu_ch);
+ if (mxc_fbi->pre_num < 0) {
+ dev_dbg(fbi->device, "failed to alloc PRE\n");
+ mxc_fbi->prefetch = mxc_fbi->cur_prefetch;
+ mxc_fbi->resolve = false;
+ if (!mxc_fbi->on_the_fly)
+ mxc_fbi->cur_blank = FB_BLANK_POWERDOWN;
+ return mxc_fbi->pre_num;
+ }
+ }
+ pre.repeat = true;
+ pre.vflip = fbi->var.rotate ? true : false;
+ pre.handshake_en = true;
+ pre.hsk_abort_en = true;
+ pre.hsk_line_num = 0;
+ pre.sdw_update = false;
+ pre.cur_buf = base;
+ pre.next_buf = pre.cur_buf;
+ if (fbi->var.vmode & FB_VMODE_INTERLACED) {
+ pre.interlaced = 2;
+ if (mxc_fbi->resolve) {
+ pre.field_inverse = fbi->var.rotate;
+ pre.interlace_offset = 0;
+ } else {
+ pre.field_inverse = 0;
+ if (fbi->var.rotate) {
+ pre.interlace_offset = ~(fbi->var.xres_virtual *
+ bytes_per_pixel(fbi_to_pixfmt(fbi, true))) + 1;
+ pre.cur_buf += fbi->var.xres_virtual * bytes_per_pixel(fbi_to_pixfmt(fbi, true));
+ pre.next_buf = pre.cur_buf;
+ } else {
+ pre.interlace_offset = fbi->var.xres_virtual *
+ bytes_per_pixel(fbi_to_pixfmt(fbi, true));
+ }
+ }
+ } else {
+ pre.interlaced = 0;
+ pre.interlace_offset = 0;
+ }
+ pre.prefetch_mode = mxc_fbi->resolve ? 1 : 0;
+ pre.tile_fmt = mxc_fbi->resolve ? fbi->var.nonstd : 0;
+ pre.read_burst = mxc_fbi->resolve ? 0x4 : 0x3;
+ if (fbi_to_pixfmt(fbi, true) == IPU_PIX_FMT_RGB24 ||
+ fbi_to_pixfmt(fbi, true) == IPU_PIX_FMT_BGR24 ||
+ fbi_to_pixfmt(fbi, true) == IPU_PIX_FMT_YUV444) {
+ if ((fbi->var.xres * 3) % 8 == 0 &&
+ (fbi->var.xres_virtual * 3) % 8 == 0)
+ pre.prefetch_input_bpp = 64;
+ else if ((fbi->var.xres * 3) % 4 == 0 &&
+ (fbi->var.xres_virtual * 3) % 4 == 0)
+ pre.prefetch_input_bpp = 32;
+ else if ((fbi->var.xres * 3) % 2 == 0 &&
+ (fbi->var.xres_virtual * 3) % 2 == 0)
+ pre.prefetch_input_bpp = 16;
+ else
+ pre.prefetch_input_bpp = 8;
+ } else {
+ pre.prefetch_input_bpp =
+ 8 * bytes_per_pixel(fbi_to_pixfmt(fbi, true));
+ }
+ pre.prefetch_input_pixel_fmt = mxc_fbi->resolve ?
+ 0x1 : (fbi->var.nonstd ? fbi->var.nonstd : 0);
+ pre.shift_bypass = (mxc_fbi->on_the_fly &&
+ mxc_fbi->final_pfmt != fbi_to_pixfmt(fbi, false)) ?
+ false : true;
+ pixfmt_to_var(fbi_to_pixfmt(fbi, false), &from_var);
+ pixfmt_to_var(mxc_fbi->final_pfmt, &to_var);
+ if (mxc_fbi->on_the_fly &&
+ (format_to_colorspace(fbi_to_pixfmt(fbi, true)) == RGB) &&
+ (bytes_per_pixel(fbi_to_pixfmt(fbi, true)) == 4)) {
+ pre.prefetch_shift_offset = (from_var.red.offset << to_var.red.offset) |
+ (from_var.green.offset << to_var.green.offset) |
+ (from_var.blue.offset << to_var.blue.offset) |
+ (from_var.transp.offset << to_var.transp.offset);
+ pre.prefetch_shift_width = (to_var.red.length << (to_var.red.offset/2)) |
+ (to_var.green.length << (to_var.green.offset/2)) |
+ (to_var.blue.length << (to_var.blue.offset/2)) |
+ (to_var.transp.length << (to_var.transp.offset/2));
+ } else {
+ pre.prefetch_shift_offset = 0;
+ pre.prefetch_shift_width = 0;
+ }
+ pre.tpr_coor_offset_en = mxc_fbi->resolve ? true : false;
+ pre.prefetch_output_size.left = mxc_fbi->resolve ? (fr_xoff & ~(bw - 1)) : 0;
+ pre.prefetch_output_size.top = mxc_fbi->resolve ? (fr_yoff & ~(bh - 1)) : 0;
+ if (fbi_to_pixfmt(fbi, true) == IPU_PIX_FMT_RGB24 ||
+ fbi_to_pixfmt(fbi, true) == IPU_PIX_FMT_BGR24 ||
+ fbi_to_pixfmt(fbi, true) == IPU_PIX_FMT_YUV444) {
+ pre.prefetch_output_size.width = (fbi->var.xres * 3) /
+ (pre.prefetch_input_bpp / 8);
+ pre.store_output_bpp = pre.prefetch_input_bpp;
+ } else {
+ pre.prefetch_output_size.width = fbi->var.xres;
+ pre.store_output_bpp = 8 *
+ bytes_per_pixel(fbi_to_pixfmt(fbi, false));
+ }
+ pre.prefetch_output_size.height = fbi->var.yres;
+ pre.prefetch_input_active_width = pre.prefetch_output_size.width;
+ if (fbi_to_pixfmt(fbi, true) == IPU_PIX_FMT_RGB24 ||
+ fbi_to_pixfmt(fbi, true) == IPU_PIX_FMT_BGR24 ||
+ fbi_to_pixfmt(fbi, true) == IPU_PIX_FMT_YUV444)
+ pre.prefetch_input_width = (fbi->var.xres_virtual * 3) /
+ (pre.prefetch_input_bpp / 8);
+ else
+ pre.prefetch_input_width = fbi->var.xres_virtual;
+ pre.prefetch_input_height = fbi->var.yres;
+ prg_width = pre.prefetch_output_size.width;
+ if (!(pre.prefetch_input_active_width % 32))
+ pre.block_size = 0;
+ else if (!(pre.prefetch_input_active_width % 16))
+ pre.block_size = 1;
+ else
+ pre.block_size = 0;
+ if (mxc_fbi->resolve) {
+ int bs = pre.block_size ? 16 : 32;
+ pre.prefetch_input_active_width += fr_xoff % bw;
+ if (((fr_xoff % bw) + pre.prefetch_input_active_width) % bs)
+ pre.prefetch_input_active_width =
+ ALIGN(pre.prefetch_input_active_width, bs);
+ pre.prefetch_output_size.width =
+ pre.prefetch_input_active_width;
+ prg_width = pre.prefetch_output_size.width;
+ pre.prefetch_input_height += fr_yoff % bh;
+ if (((fr_yoff % bh) + fbi->var.yres) % 4) {
+ pre.prefetch_input_height =
+ (fbi->var.vmode & FB_VMODE_INTERLACED) ?
+ ALIGN(pre.prefetch_input_height, 8) :
+ ALIGN(pre.prefetch_input_height, 4);
+ } else {
+ if (fbi->var.vmode & FB_VMODE_INTERLACED)
+ pre.prefetch_input_height =
+ ALIGN(pre.prefetch_input_height, 8);
+ }
+ pre.prefetch_output_size.height = pre.prefetch_input_height;
+ }
+
+ /* store output pitch 8-byte aligned */
+ while ((pre.store_output_bpp * prg_width) % 64)
+ prg_width++;
+
+ pre.store_pitch = (pre.store_output_bpp * prg_width) / 8;
+
+ pre.store_en = true;
+ pre.write_burst = 0x3;
+ ipu_get_channel_offset(fbi_to_pixfmt(fbi, true),
+ fbi->var.xres,
+ fr_h,
+ fr_w,
+ 0, 0,
+ fr_yoff,
+ fr_xoff,
+ &pre.sec_buf_off,
+ &pre.trd_buf_off);
+ if (mxc_fbi->resolve)
+ pre.sec_buf_off = mxc_fbi->gpu_sec_buf_off;
+
+ ipu_pre_config(mxc_fbi->pre_num, &pre);
+ ipu_stride = pre.store_pitch;
+ ipu_base = pre.store_addr;
+ mxc_fbi->store_addr = ipu_base;
+
+ retval = ipu_pre_sdw_update(mxc_fbi->pre_num);
+ if (retval < 0) {
+ dev_err(fbi->device,
+ "failed to update PRE shadow reg %d\n", retval);
+ return retval;
+ }
+
+ if (!mxc_fbi->on_the_fly || !mxc_fbi->cur_prefetch) {
+ prg.id = mxc_fbi->ipu_id;
+ prg.pre_num = mxc_fbi->pre_num;
+ prg.ipu_ch = mxc_fbi->ipu_ch;
+ prg.so = (fbi->var.vmode & FB_VMODE_INTERLACED) ?
+ PRG_SO_INTERLACE : PRG_SO_PROGRESSIVE;
+ prg.vflip = fbi->var.rotate ? true : false;
+ prg.block_mode = mxc_fbi->resolve ? PRG_BLOCK_MODE : PRG_SCAN_MODE;
+ prg.stride = (fbi->var.vmode & FB_VMODE_INTERLACED) ?
+ ipu_stride * 2 : ipu_stride;
+ prg.ilo = (fbi->var.vmode & FB_VMODE_INTERLACED) ?
+ ipu_stride : 0;
+ prg.height = mxc_fbi->resolve ?
+ pre.prefetch_output_size.height : fbi->var.yres;
+ prg.ipu_height = fbi->var.yres;
+ prg.crop_line = mxc_fbi->resolve ?
+ ((fbi->var.vmode & FB_VMODE_INTERLACED) ? (fr_yoff % bh) / 2 : fr_yoff % bh) : 0;
+ prg.baddr = pre.store_addr;
+ prg.offset = mxc_fbi->resolve ? (prg.crop_line * prg.stride +
+ (fr_xoff % bw) *
+ bytes_per_pixel(fbi_to_pixfmt(fbi, false))) : 0;
+ ipu_base += prg.offset;
+ if (ipu_base % 8) {
+ dev_err(fbi->device,
+ "IPU base address is not 8byte aligned\n");
+ return -EINVAL;
+ }
+ mxc_fbi->store_addr = ipu_base;
+
+ if (!mxc_fbi->on_the_fly) {
+ retval = ipu_init_channel_buffer(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
+ fbi_to_pixfmt(fbi, false),
+ fbi->var.xres, fbi->var.yres,
+ ipu_stride,
+ fbi->var.rotate,
+ ipu_base,
+ ipu_base,
+ fbi->var.accel_flags &
+ FB_ACCEL_DOUBLE_FLAG ? 0 : ipu_base,
+ 0, 0);
+ if (retval) {
+ dev_err(fbi->device,
+ "ipu_init_channel_buffer error %d\n", retval);
+ return retval;
+ }
+ }
+
+ retval = ipu_prg_config(&prg);
+ if (retval < 0) {
+ dev_err(fbi->device,
+ "failed to configure PRG %d\n", retval);
+ return retval;
+ }
+
+ retval = ipu_pre_enable(mxc_fbi->pre_num);
+ if (retval < 0) {
+ ipu_prg_disable(mxc_fbi->ipu_id, mxc_fbi->pre_num);
+ dev_err(fbi->device,
+ "failed to enable PRE %d\n", retval);
+ return retval;
+ }
+
+ retval = ipu_prg_wait_buf_ready(mxc_fbi->ipu_id,
+ mxc_fbi->pre_num,
+ pre.hsk_line_num,
+ pre.prefetch_output_size.height);
+ if (retval < 0) {
+ ipu_prg_disable(mxc_fbi->ipu_id, mxc_fbi->pre_num);
+ ipu_pre_disable(mxc_fbi->pre_num);
+ ipu_pre_free(&mxc_fbi->pre_num);
+ dev_err(fbi->device, "failed to wait PRG ready %d\n", retval);
+ return retval;
+ }
+ }
+ } else {
+ ipu_stride = fb_stride;
+ ipu_base = base;
+ if (mxc_fbi->on_the_fly)
+ post_pre_disable = true;
+ }
+
+ if (mxc_fbi->on_the_fly && ((mxc_fbi->cur_prefetch && !mxc_fbi->prefetch) ||
+ (!mxc_fbi->cur_prefetch && mxc_fbi->prefetch))) {
+ int htotal = fbi->var.xres + fbi->var.right_margin +
+ fbi->var.hsync_len + fbi->var.left_margin;
+ int vtotal = fbi->var.yres + fbi->var.lower_margin +
+ fbi->var.vsync_len + fbi->var.upper_margin;
+ int timeout = ((htotal * vtotal) / PICOS2KHZ(fbi->var.pixclock)) * 2 ;
+ int cur_buf = 0;
+
+ BUG_ON(timeout <= 0);
+
+ ++mxc_fbi->cur_ipu_buf;
+ mxc_fbi->cur_ipu_buf %= 3;
+ if (ipu_update_channel_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER,
+ mxc_fbi->cur_ipu_buf,
+ ipu_base) == 0) {
+ if (!mxc_fbi->prefetch)
+ ipu_update_channel_offset(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER,
+ fbi_to_pixfmt(fbi, true),
+ fr_w,
+ fr_h,
+ fr_w,
+ 0, 0,
+ fr_yoff,
+ fr_xoff);
+ ipu_select_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER, mxc_fbi->cur_ipu_buf);
+ for (; timeout > 0; timeout--) {
+ cur_buf = ipu_get_cur_buffer_idx(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER);
+ if (cur_buf == mxc_fbi->cur_ipu_buf)
+ break;
+
+ udelay(1000);
+ }
+ if (!timeout)
+ dev_err(fbi->device, "Timeout for switch to buf %d "
+ "to address=0x%08lX, current buf %d, "
+ "buf0 ready %d, buf1 ready %d, buf2 ready "
+ "%d\n", mxc_fbi->cur_ipu_buf, ipu_base,
+ ipu_get_cur_buffer_idx(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER),
+ ipu_check_buffer_ready(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER, 0),
+ ipu_check_buffer_ready(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER, 1),
+ ipu_check_buffer_ready(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER, 2));
+ }
+ } else if (!mxc_fbi->on_the_fly && !mxc_fbi->prefetch) {
+ retval = ipu_init_channel_buffer(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
+ mxc_fbi->on_the_fly ? mxc_fbi->final_pfmt :
+ fbi_to_pixfmt(fbi, false),
+ fbi->var.xres, fbi->var.yres,
+ ipu_stride,
+ fbi->var.rotate,
+ ipu_base,
+ ipu_base,
+ fbi->var.accel_flags &
+ FB_ACCEL_DOUBLE_FLAG ? 0 : ipu_base,
+ 0, 0);
+ if (retval) {
+ dev_err(fbi->device,
+ "ipu_init_channel_buffer error %d\n", retval);
+ return retval;
+ }
+ /* update u/v offset */
+ if (!mxc_fbi->prefetch)
+ ipu_update_channel_offset(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER,
+ fbi_to_pixfmt(fbi, true),
+ fr_w,
+ fr_h,
+ fr_w,
+ 0, 0,
+ fr_yoff,
+ fr_xoff);
+ }
+
+ if (post_pre_disable) {
+ ipu_prg_disable(mxc_fbi->ipu_id, mxc_fbi->pre_num);
+ ipu_pre_disable(mxc_fbi->pre_num);
+ ipu_pre_free(&mxc_fbi->pre_num);
+ }
+
+ if (mxc_fbi->alpha_chan_en) {
+ retval = ipu_init_channel_buffer(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch,
+ IPU_ALPHA_IN_BUFFER,
+ IPU_PIX_FMT_GENERIC,
+ fbi->var.xres, fbi->var.yres,
+ fbi->var.xres,
+ fbi->var.rotate,
+ mxc_fbi->alpha_phy_addr1,
+ mxc_fbi->alpha_phy_addr0,
+ 0,
+ 0, 0);
+ if (retval) {
+ dev_err(fbi->device,
+ "ipu_init_channel_buffer error %d\n", retval);
+ return retval;
+ }
+ }
+
+ return retval;
+}
+
+static bool mxcfb_need_to_set_par(struct fb_info *fbi)
+{
+ struct mxcfb_info *mxc_fbi = fbi->par;
+
+ if ((fbi->var.activate & FB_ACTIVATE_FORCE) &&
+ (fbi->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW)
+ return true;
+
+ /*
+ * Ignore xoffset and yoffset update,
+ * because pan display handles this case.
+ */
+ mxc_fbi->cur_var.xoffset = fbi->var.xoffset;
+ mxc_fbi->cur_var.yoffset = fbi->var.yoffset;
+
+ return !!memcmp(&mxc_fbi->cur_var, &fbi->var,
+ sizeof(struct fb_var_screeninfo));
+}
+
+static bool mxcfb_can_set_par_on_the_fly(struct fb_info *fbi,
+ uint32_t *final_pfmt)
+{
+ struct mxcfb_info *mxc_fbi = fbi->par;
+ struct fb_var_screeninfo cur_var = mxc_fbi->cur_var;
+ uint32_t cur_pfmt = mxc_fbi->cur_ipu_pfmt;
+ uint32_t new_pfmt = fbi_to_pixfmt(fbi, false);
+ uint32_t new_fb_pfmt = fbi_to_pixfmt(fbi, true);
+ int cur_bpp, new_bpp, cur_bw, cur_bh, new_bw, new_bh;
+ unsigned int mem_len;
+ ipu_color_space_t cur_space, new_space;
+
+ cur_space = format_to_colorspace(cur_pfmt);
+ new_space = format_to_colorspace(new_pfmt);
+
+ cur_bpp = bytes_per_pixel(cur_pfmt);
+ new_bpp = bytes_per_pixel(new_pfmt);
+
+ if (mxc_fbi->first_set_par || mxc_fbi->cur_blank != FB_BLANK_UNBLANK)
+ return false;
+
+ if (!mxc_fbi->prefetch && !mxc_fbi->cur_prefetch)
+ return false;
+
+ if (!mxc_fbi->prefetch && cur_pfmt != new_pfmt)
+ return false;
+
+ if (cur_space == RGB && (cur_bpp == 2 || cur_bpp == 3) &&
+ cur_pfmt != new_pfmt)
+ return false;
+
+ if (!(mxc_fbi->cur_prefetch && mxc_fbi->prefetch) &&
+ ((cur_var.xres_virtual != fbi->var.xres_virtual) ||
+ (cur_var.xres != cur_var.xres_virtual) ||
+ (fbi->var.xres != fbi->var.xres_virtual)))
+ return false;
+
+ if (cur_space != new_space ||
+ (new_space == RGB && cur_bpp != new_bpp))
+ return false;
+
+ if (new_space == YCbCr)
+ return false;
+
+ mem_len = fbi->var.yres_virtual * fbi->fix.line_length;
+ if (mxc_fbi->resolve && mxc_fbi->gpu_sec_buf_off) {
+ if (fbi->var.vmode & FB_VMODE_YWRAP)
+ mem_len = mxc_fbi->gpu_sec_buf_off + mem_len / 2;
+ else
+ mem_len = mxc_fbi->gpu_sec_buf_off *
+ (fbi->var.yres_virtual / fbi->var.yres) + mem_len / 2;
+ }
+ if (mem_len > fbi->fix.smem_len)
+ return false;
+
+ if (mxc_fbi->resolve && ipu_pixel_format_is_gpu_tile(mxc_fbi->cur_fb_pfmt)) {
+ fmt_to_tile_block(mxc_fbi->cur_fb_pfmt, &cur_bw, &cur_bh);
+ fmt_to_tile_block(new_fb_pfmt, &new_bw, &new_bh);
+
+ if (cur_bw != new_bw || cur_bh != new_bh ||
+ cur_var.xoffset % cur_bw != fbi->var.xoffset % new_bw ||
+ cur_var.yoffset % cur_bh != fbi->var.yoffset % new_bh)
+ return false;
+ } else if (mxc_fbi->resolve && mxc_fbi->cur_prefetch) {
+ fmt_to_tile_block(new_fb_pfmt, &new_bw, &new_bh);
+ if (fbi->var.xoffset % new_bw || fbi->var.yoffset % new_bh ||
+ fbi->var.xres % 16 || fbi->var.yres %
+ (fbi->var.vmode & FB_VMODE_INTERLACED ? 8 : 4))
+ return false;
+ } else if (mxc_fbi->prefetch && ipu_pixel_format_is_gpu_tile(mxc_fbi->cur_fb_pfmt)) {
+ fmt_to_tile_block(mxc_fbi->cur_fb_pfmt, &cur_bw, &cur_bh);
+ if (cur_var.xoffset % cur_bw || cur_var.yoffset % cur_bh ||
+ cur_var.xres % 16 || cur_var.yres %
+ (cur_var.vmode & FB_VMODE_INTERLACED ? 8 : 4))
+ return false;
+ }
+
+ cur_var.xres_virtual = fbi->var.xres_virtual;
+ cur_var.yres_virtual = fbi->var.yres_virtual;
+ cur_var.xoffset = fbi->var.xoffset;
+ cur_var.yoffset = fbi->var.yoffset;
+ cur_var.red = fbi->var.red;
+ cur_var.green = fbi->var.green;
+ cur_var.blue = fbi->var.blue;
+ cur_var.transp = fbi->var.transp;
+ cur_var.nonstd = fbi->var.nonstd;
+ if (memcmp(&cur_var, &fbi->var,
+ sizeof(struct fb_var_screeninfo)))
+ return false;
+
+ *final_pfmt = cur_pfmt;
+
+ return true;
+}
+
+static void mxcfb_check_resolve(struct fb_info *fbi)
+{
+ struct mxcfb_info *mxc_fbi = fbi->par;
+
+ switch (fbi->var.nonstd) {
+ case IPU_PIX_FMT_GPU32_ST:
+ case IPU_PIX_FMT_GPU32_SRT:
+ case IPU_PIX_FMT_GPU16_ST:
+ case IPU_PIX_FMT_GPU16_SRT:
+ mxc_fbi->gpu_sec_buf_off = 0;
+ case IPU_PIX_FMT_GPU32_SB_ST:
+ case IPU_PIX_FMT_GPU32_SB_SRT:
+ case IPU_PIX_FMT_GPU16_SB_ST:
+ case IPU_PIX_FMT_GPU16_SB_SRT:
+ mxc_fbi->prefetch = true;
+ mxc_fbi->resolve = true;
+ break;
+ default:
+ mxc_fbi->resolve = false;
+ }
+}
+
+static void mxcfb_check_yuv(struct fb_info *fbi)
+{
+ struct mxcfb_info *mxc_fbi = fbi->par;
+
+ if (fbi->var.vmode & FB_VMODE_INTERLACED) {
+ if (ipu_pixel_format_is_multiplanar_yuv(fbi_to_pixfmt(fbi, true)))
+ mxc_fbi->prefetch = false;
+ } else {
+ if (fbi->var.nonstd == PRE_PIX_FMT_NV21 ||
+ fbi->var.nonstd == PRE_PIX_FMT_NV61)
+ mxc_fbi->prefetch = true;
+ }
+}
+
+/*
+ * Set framebuffer parameters and change the operating mode.
+ *
+ * @param info framebuffer information pointer
+ */
+static int mxcfb_set_par(struct fb_info *fbi)
+{
+ int retval = 0;
+ u32 mem_len, alpha_mem_len;
+ ipu_di_signal_cfg_t sig_cfg;
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
+ uint32_t final_pfmt = 0;
+ int16_t ov_pos_x = 0, ov_pos_y = 0;
+ int ov_pos_ret = 0;
+ struct mxcfb_info *mxc_fbi_fg = NULL;
+ bool ovfbi_enable = false, on_the_fly;
+
+ if (ipu_ch_param_bad_alpha_pos(fbi_to_pixfmt(fbi, true)) &&
+ mxc_fbi->alpha_chan_en) {
+ dev_err(fbi->device, "Bad pixel format for "
+ "graphics plane fb\n");
+ return -EINVAL;
+ }
+
+ if (mxc_fbi->ovfbi)
+ mxc_fbi_fg = (struct mxcfb_info *)mxc_fbi->ovfbi->par;
+
+ if (mxc_fbi->ovfbi && mxc_fbi_fg)
+ if (mxc_fbi_fg->next_blank == FB_BLANK_UNBLANK)
+ ovfbi_enable = true;
+
+ if (!mxcfb_need_to_set_par(fbi))
+ return 0;
+
+ dev_dbg(fbi->device, "Reconfiguring framebuffer\n");
+
+ if (fbi->var.xres == 0 || fbi->var.yres == 0)
+ return 0;
+
+ mxcfb_set_fix(fbi);
+
+ mxcfb_check_resolve(fbi);
+
+ mxcfb_check_yuv(fbi);
+
+ on_the_fly = mxcfb_can_set_par_on_the_fly(fbi, &final_pfmt);
+ mxc_fbi->on_the_fly = on_the_fly;
+ mxc_fbi->final_pfmt = final_pfmt;
+
+ if (on_the_fly)
+ dev_dbg(fbi->device, "Reconfiguring framebuffer on the fly\n");
+
+ if (ovfbi_enable) {
+ ov_pos_ret = ipu_disp_get_window_pos(
+ mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch,
+ &ov_pos_x, &ov_pos_y);
+ if (ov_pos_ret < 0)
+ dev_err(fbi->device, "Get overlay pos failed, dispdrv:%s.\n",
+ mxc_fbi->dispdrv->drv->name);
+
+ if (!on_the_fly) {
+ ipu_clear_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_irq);
+ ipu_disable_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_irq);
+ ipu_clear_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_nf_irq);
+ ipu_disable_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_nf_irq);
+ ipu_disable_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch, true);
+ ipu_uninit_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
+ if (mxc_fbi_fg->cur_prefetch) {
+ ipu_prg_disable(mxc_fbi_fg->ipu_id, mxc_fbi_fg->pre_num);
+ ipu_pre_disable(mxc_fbi_fg->pre_num);
+ ipu_pre_free(&mxc_fbi_fg->pre_num);
+ }
+ }
+ }
+
+ if (!on_the_fly) {
+ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
+ ipu_disable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
+ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
+ ipu_disable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
+ ipu_disable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, true);
+ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
+ if (mxc_fbi->cur_prefetch) {
+ ipu_prg_disable(mxc_fbi->ipu_id, mxc_fbi->pre_num);
+ ipu_pre_disable(mxc_fbi->pre_num);
+ ipu_pre_free(&mxc_fbi->pre_num);
+ }
+ }
+
+ /*
+ * Disable IPU hsp clock if it is enabled for an
+ * additional time in ipu common driver.
+ */
+ if (mxc_fbi->first_set_par && mxc_fbi->late_init)
+ ipu_disable_hsp_clk(mxc_fbi->ipu);
+
+ mem_len = fbi->var.yres_virtual * fbi->fix.line_length;
+ if (mxc_fbi->resolve && mxc_fbi->gpu_sec_buf_off) {
+ if (fbi->var.vmode & FB_VMODE_YWRAP)
+ mem_len = mxc_fbi->gpu_sec_buf_off + mem_len / 2;
+ else
+ mem_len = mxc_fbi->gpu_sec_buf_off *
+ (fbi->var.yres_virtual / fbi->var.yres) + mem_len / 2;
+ }
+ if (!fbi->fix.smem_start || (mem_len > fbi->fix.smem_len)) {
+ if (fbi->fix.smem_start)
+ mxcfb_unmap_video_memory(fbi);
+
+ if (mxcfb_map_video_memory(fbi) < 0)
+ return -ENOMEM;
+ }
+
+ if (mxc_fbi->first_set_par) {
+ /*
+ * Clear the screen in case uboot fb pixel format is not
+ * the same to kernel fb pixel format.
+ */
+ if (mxc_fbi->late_init)
+ memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
+
+ mxc_fbi->first_set_par = false;
+ }
+
+ if (mxc_fbi->alpha_chan_en) {
+ alpha_mem_len = fbi->var.xres * fbi->var.yres;
+ if ((!mxc_fbi->alpha_phy_addr0 && !mxc_fbi->alpha_phy_addr1) ||
+ (alpha_mem_len > mxc_fbi->alpha_mem_len)) {
+ if (mxc_fbi->alpha_phy_addr0)
+ dma_free_coherent(fbi->device,
+ mxc_fbi->alpha_mem_len,
+ mxc_fbi->alpha_virt_addr0,
+ mxc_fbi->alpha_phy_addr0);
+ if (mxc_fbi->alpha_phy_addr1)
+ dma_free_coherent(fbi->device,
+ mxc_fbi->alpha_mem_len,
+ mxc_fbi->alpha_virt_addr1,
+ mxc_fbi->alpha_phy_addr1);
+
+ mxc_fbi->alpha_virt_addr0 =
+ dma_alloc_coherent(fbi->device,
+ alpha_mem_len,
+ &mxc_fbi->alpha_phy_addr0,
+ GFP_DMA | GFP_KERNEL);
+
+ mxc_fbi->alpha_virt_addr1 =
+ dma_alloc_coherent(fbi->device,
+ alpha_mem_len,
+ &mxc_fbi->alpha_phy_addr1,
+ GFP_DMA | GFP_KERNEL);
+ if (mxc_fbi->alpha_virt_addr0 == NULL ||
+ mxc_fbi->alpha_virt_addr1 == NULL) {
+ dev_err(fbi->device, "mxcfb: dma alloc for"
+ " alpha buffer failed.\n");
+ if (mxc_fbi->alpha_virt_addr0)
+ dma_free_coherent(fbi->device,
+ mxc_fbi->alpha_mem_len,
+ mxc_fbi->alpha_virt_addr0,
+ mxc_fbi->alpha_phy_addr0);
+ if (mxc_fbi->alpha_virt_addr1)
+ dma_free_coherent(fbi->device,
+ mxc_fbi->alpha_mem_len,
+ mxc_fbi->alpha_virt_addr1,
+ mxc_fbi->alpha_phy_addr1);
+ return -ENOMEM;
+ }
+ mxc_fbi->alpha_mem_len = alpha_mem_len;
+ }
+ }
+
+ if (mxc_fbi->next_blank != FB_BLANK_UNBLANK)
+ return retval;
+
+ if (!on_the_fly && mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->setup) {
+ retval = mxc_fbi->dispdrv->drv->setup(mxc_fbi->dispdrv, fbi);
+ if (retval < 0) {
+ dev_err(fbi->device, "setup error, dispdrv:%s.\n",
+ mxc_fbi->dispdrv->drv->name);
+ return -EINVAL;
+ }
+ }
+
+ if (!on_the_fly) {
+ _setup_disp_channel1(fbi);
+ if (ovfbi_enable)
+ _setup_disp_channel1(mxc_fbi->ovfbi);
+ }
+
+ if (!mxc_fbi->overlay && !on_the_fly) {
+ uint32_t out_pixel_fmt;
+
+ memset(&sig_cfg, 0, sizeof(sig_cfg));
+ if (fbi->var.vmode & FB_VMODE_INTERLACED)
+ sig_cfg.interlaced = true;
+ out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
+ if (fbi->var.vmode & FB_VMODE_ODD_FLD_FIRST) /* PAL */
+ sig_cfg.odd_field_first = true;
+ if (mxc_fbi->ipu_int_clk)
+ sig_cfg.int_clk = true;
+ if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT)
+ sig_cfg.Hsync_pol = true;
+ if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)
+ sig_cfg.Vsync_pol = true;
+ if (!(fbi->var.sync & FB_SYNC_CLK_LAT_FALL))
+ sig_cfg.clk_pol = true;
+ if (fbi->var.sync & FB_SYNC_DATA_INVERT)
+ sig_cfg.data_pol = true;
+ if (!(fbi->var.sync & FB_SYNC_OE_LOW_ACT))
+ sig_cfg.enable_pol = true;
+ if (fbi->var.sync & FB_SYNC_CLK_IDLE_EN)
+ sig_cfg.clkidle_en = true;
+
+ dev_dbg(fbi->device, "pixclock = %ul Hz\n",
+ (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL));
+
+ if (ipu_init_sync_panel(mxc_fbi->ipu, mxc_fbi->ipu_di,
+ (PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
+ fbi->var.xres, fbi->var.yres,
+ out_pixel_fmt,
+ fbi->var.left_margin,
+ fbi->var.hsync_len,
+ fbi->var.right_margin,
+ fbi->var.upper_margin,
+ fbi->var.vsync_len,
+ fbi->var.lower_margin,
+ 0, sig_cfg) != 0) {
+ dev_err(fbi->device,
+ "mxcfb: Error initializing panel.\n");
+ return -EINVAL;
+ }
+
+ fbi->mode =
+ (struct fb_videomode *)fb_match_mode(&fbi->var,
+ &fbi->modelist);
+
+ ipu_disp_set_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch, 0, 0);
+ }
+
+ retval = _setup_disp_channel2(fbi);
+ if (retval) {
+ if (!on_the_fly)
+ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
+ return retval;
+ }
+
+ if (ovfbi_enable && !on_the_fly) {
+ if (ov_pos_ret >= 0)
+ ipu_disp_set_window_pos(
+ mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch,
+ ov_pos_x, ov_pos_y);
+ retval = _setup_disp_channel2(mxc_fbi->ovfbi);
+ if (retval) {
+ ipu_uninit_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
+ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
+ return retval;
+ }
+ }
+
+ if (!on_the_fly) {
+ ipu_enable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
+ if (ovfbi_enable)
+ ipu_enable_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
+ }
+
+ if (!on_the_fly && mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->enable) {
+ retval = mxc_fbi->dispdrv->drv->enable(mxc_fbi->dispdrv, fbi);
+ if (retval < 0) {
+ dev_err(fbi->device, "enable error, dispdrv:%s.\n",
+ mxc_fbi->dispdrv->drv->name);
+ return -EINVAL;
+ }
+ }
+
+ mxc_fbi->cur_var = fbi->var;
+ mxc_fbi->cur_ipu_pfmt = on_the_fly ? mxc_fbi->final_pfmt :
+ fbi_to_pixfmt(fbi, false);
+ mxc_fbi->cur_fb_pfmt = fbi_to_pixfmt(fbi, true);
+ mxc_fbi->cur_prefetch = mxc_fbi->prefetch;
+
+ return retval;
+}
+
+static int _swap_channels(struct fb_info *fbi_from,
+ struct fb_info *fbi_to, bool both_on)
+{
+ int retval, tmp;
+ ipu_channel_t old_ch;
+ struct fb_info *ovfbi;
+ struct mxcfb_info *mxc_fbi_from = (struct mxcfb_info *)fbi_from->par;
+ struct mxcfb_info *mxc_fbi_to = (struct mxcfb_info *)fbi_to->par;
+
+ if (both_on) {
+ ipu_disable_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch, true);
+ ipu_uninit_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch);
+ }
+
+ /* switch the mxc fbi parameters */
+ old_ch = mxc_fbi_from->ipu_ch;
+ mxc_fbi_from->ipu_ch = mxc_fbi_to->ipu_ch;
+ mxc_fbi_to->ipu_ch = old_ch;
+ tmp = mxc_fbi_from->ipu_ch_irq;
+ mxc_fbi_from->ipu_ch_irq = mxc_fbi_to->ipu_ch_irq;
+ mxc_fbi_to->ipu_ch_irq = tmp;
+ tmp = mxc_fbi_from->ipu_ch_nf_irq;
+ mxc_fbi_from->ipu_ch_nf_irq = mxc_fbi_to->ipu_ch_nf_irq;
+ mxc_fbi_to->ipu_ch_nf_irq = tmp;
+ ovfbi = mxc_fbi_from->ovfbi;
+ mxc_fbi_from->ovfbi = mxc_fbi_to->ovfbi;
+ mxc_fbi_to->ovfbi = ovfbi;
+
+ _setup_disp_channel1(fbi_from);
+ retval = _setup_disp_channel2(fbi_from);
+ if (retval)
+ return retval;
+
+ /* switch between dp and dc, disable old idmac, enable new idmac */
+ retval = ipu_swap_channel(mxc_fbi_from->ipu, old_ch, mxc_fbi_from->ipu_ch);
+ ipu_uninit_channel(mxc_fbi_from->ipu, old_ch);
+
+ if (both_on) {
+ _setup_disp_channel1(fbi_to);
+ retval = _setup_disp_channel2(fbi_to);
+ if (retval)
+ return retval;
+ ipu_enable_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch);
+ }
+
+ return retval;
+}
+
+static int swap_channels(struct fb_info *fbi_from)
+{
+ int i;
+ int swap_mode;
+ ipu_channel_t ch_to;
+ struct mxcfb_info *mxc_fbi_from = (struct mxcfb_info *)fbi_from->par;
+ struct fb_info *fbi_to = NULL;
+ struct mxcfb_info *mxc_fbi_to;
+
+ /* what's the target channel? */
+ if (mxc_fbi_from->ipu_ch == MEM_BG_SYNC)
+ ch_to = MEM_DC_SYNC;
+ else
+ ch_to = MEM_BG_SYNC;
+
+ fbi_to = found_registered_fb(ch_to, mxc_fbi_from->ipu_id);
+ if (!fbi_to)
+ return -1;
+ mxc_fbi_to = (struct mxcfb_info *)fbi_to->par;
+
+ ipu_clear_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq);
+ ipu_clear_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq);
+ ipu_free_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq, fbi_from);
+ ipu_free_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq, fbi_to);
+ ipu_clear_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq);
+ ipu_clear_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq);
+ ipu_free_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq, fbi_from);
+ ipu_free_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq, fbi_to);
+
+ if (mxc_fbi_from->cur_blank == FB_BLANK_UNBLANK) {
+ if (mxc_fbi_to->cur_blank == FB_BLANK_UNBLANK)
+ swap_mode = BOTH_ON;
+ else
+ swap_mode = SRC_ON;
+ } else {
+ if (mxc_fbi_to->cur_blank == FB_BLANK_UNBLANK)
+ swap_mode = TGT_ON;
+ else
+ swap_mode = BOTH_OFF;
+ }
+
+ switch (swap_mode) {
+ case BOTH_ON:
+ /* disable target->switch src->enable target */
+ _swap_channels(fbi_from, fbi_to, true);
+ break;
+ case SRC_ON:
+ /* just switch src */
+ _swap_channels(fbi_from, fbi_to, false);
+ break;
+ case TGT_ON:
+ /* just switch target */
+ _swap_channels(fbi_to, fbi_from, false);
+ break;
+ case BOTH_OFF:
+ /* switch directly, no more need to do */
+ mxc_fbi_to->ipu_ch = mxc_fbi_from->ipu_ch;
+ mxc_fbi_from->ipu_ch = ch_to;
+ i = mxc_fbi_from->ipu_ch_irq;
+ mxc_fbi_from->ipu_ch_irq = mxc_fbi_to->ipu_ch_irq;
+ mxc_fbi_to->ipu_ch_irq = i;
+ i = mxc_fbi_from->ipu_ch_nf_irq;
+ mxc_fbi_from->ipu_ch_nf_irq = mxc_fbi_to->ipu_ch_nf_irq;
+ mxc_fbi_to->ipu_ch_nf_irq = i;
+ break;
+ default:
+ break;
+ }
+
+ if (ipu_request_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq,
+ mxcfb_irq_handler, IPU_IRQF_ONESHOT,
+ MXCFB_NAME, fbi_from) != 0) {
+ dev_err(fbi_from->device, "Error registering irq %d\n",
+ mxc_fbi_from->ipu_ch_irq);
+ return -EBUSY;
+ }
+ ipu_disable_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq);
+ if (ipu_request_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq,
+ mxcfb_irq_handler, IPU_IRQF_ONESHOT,
+ MXCFB_NAME, fbi_to) != 0) {
+ dev_err(fbi_to->device, "Error registering irq %d\n",
+ mxc_fbi_to->ipu_ch_irq);
+ return -EBUSY;
+ }
+ ipu_disable_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq);
+ if (ipu_request_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq,
+ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT,
+ MXCFB_NAME, fbi_from) != 0) {
+ dev_err(fbi_from->device, "Error registering irq %d\n",
+ mxc_fbi_from->ipu_ch_nf_irq);
+ return -EBUSY;
+ }
+ ipu_disable_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq);
+ if (ipu_request_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq,
+ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT,
+ MXCFB_NAME, fbi_to) != 0) {
+ dev_err(fbi_to->device, "Error registering irq %d\n",
+ mxc_fbi_to->ipu_ch_nf_irq);
+ return -EBUSY;
+ }
+ ipu_disable_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq);
+
+ return 0;
+}
+
+/*
+ * Check framebuffer variable parameters and adjust to valid values.
+ *
+ * @param var framebuffer variable parameters
+ *
+ * @param info framebuffer information pointer
+ */
+static int mxcfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ u32 vtotal;
+ u32 htotal;
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par;
+ struct fb_info tmp_fbi;
+ unsigned int fr_xoff, fr_yoff, fr_w, fr_h, line_length;
+ unsigned long base = 0;
+ int ret, bw = 0, bh = 0;
+ bool triple_buffer = false;
+
+ if (var->xres == 0 || var->yres == 0)
+ return 0;
+
+ /* fg should not bigger than bg */
+ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
+ struct fb_info *fbi_tmp;
+ int bg_xres = 0, bg_yres = 0;
+ int16_t pos_x, pos_y;
+
+ bg_xres = var->xres;
+ bg_yres = var->yres;
+
+ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
+ if (fbi_tmp) {
+ bg_xres = fbi_tmp->var.xres;
+ bg_yres = fbi_tmp->var.yres;
+ }
+
+ ipu_disp_get_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch, &pos_x, &pos_y);
+
+ if ((var->xres + pos_x) > bg_xres)
+ var->xres = bg_xres - pos_x;
+ if ((var->yres + pos_y) > bg_yres)
+ var->yres = bg_yres - pos_y;
+
+ if (fbi_tmp->var.vmode & FB_VMODE_INTERLACED)
+ var->vmode |= FB_VMODE_INTERLACED;
+ else
+ var->vmode &= ~FB_VMODE_INTERLACED;
+
+ var->pixclock = fbi_tmp->var.pixclock;
+ var->right_margin = fbi_tmp->var.right_margin;
+ var->hsync_len = fbi_tmp->var.hsync_len;
+ var->left_margin = fbi_tmp->var.left_margin +
+ fbi_tmp->var.xres - var->xres;
+ var->upper_margin = fbi_tmp->var.upper_margin;
+ var->vsync_len = fbi_tmp->var.vsync_len;
+ var->lower_margin = fbi_tmp->var.lower_margin +
+ fbi_tmp->var.yres - var->yres;
+ }
+
+ if (var->rotate > IPU_ROTATE_VERT_FLIP)
+ var->rotate = IPU_ROTATE_NONE;
+
+ if (var->xres_virtual < var->xres)
+ var->xres_virtual = var->xres;
+
+ if (var->yres_virtual < var->yres) {
+ var->yres_virtual = var->yres * 3;
+ triple_buffer = true;
+ }
+
+ if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) &&
+ (var->bits_per_pixel != 16) && (var->bits_per_pixel != 12) &&
+ (var->bits_per_pixel != 8))
+ var->bits_per_pixel = 16;
+
+ if (check_var_pixfmt(var)) {
+ /* Fall back to default */
+ ret = bpp_to_var(var->bits_per_pixel, var);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ipu_pixel_format_is_gpu_tile(var->nonstd)) {
+ fmt_to_tile_alignment(var->nonstd, &bw, &bh);
+ var->xres_virtual = ALIGN(var->xres_virtual, bw);
+ if (triple_buffer)
+ var->yres_virtual = 3 * ALIGN(var->yres, bh);
+ else
+ var->yres_virtual = ALIGN(var->yres_virtual, bh);
+ }
+
+ line_length = var->xres_virtual * var->bits_per_pixel / 8;
+ fr_xoff = var->xoffset;
+ fr_w = var->xres_virtual;
+ if (!(var->vmode & FB_VMODE_YWRAP)) {
+ fr_yoff = var->yoffset % var->yres;
+ fr_h = var->yres;
+ base = line_length * var->yres *
+ (var->yoffset / var->yres);
+ if (ipu_pixel_format_is_split_gpu_tile(var->nonstd))
+ base += (mxc_fbi->gpu_sec_buf_off -
+ line_length * var->yres / 2) *
+ (var->yoffset / var->yres);
+ } else {
+ fr_yoff = var->yoffset;
+ fr_h = var->yres_virtual;
+ }
+
+ tmp_fbi.var = *var;
+ tmp_fbi.par = mxc_fbi;
+ if (ipu_pixel_format_is_gpu_tile(var->nonstd)) {
+ unsigned int crop_line, prg_width = var->xres, offset;
+ int ipu_stride, prg_stride, bs;
+ bool tmp_prefetch = mxc_fbi->prefetch;
+
+ if (!(var->xres % 32))
+ bs = 32;
+ else if (!(var->xres % 16))
+ bs = 16;
+ else
+ bs = 32;
+
+ prg_width += fr_xoff % bw;
+ if (((fr_xoff % bw) + prg_width) % bs)
+ prg_width = ALIGN(prg_width, bs);
+
+ mxc_fbi->prefetch = true;
+ ipu_stride = prg_width *
+ bytes_per_pixel(fbi_to_pixfmt(&tmp_fbi, false));
+
+ if (var->vmode & FB_VMODE_INTERLACED) {
+ if ((fr_yoff % bh) % 2) {
+ dev_err(info->device,
+ "wrong crop value in interlaced mode\n");
+ return -EINVAL;
+ }
+ crop_line = (fr_yoff % bh) / 2;
+ prg_stride = ipu_stride * 2;
+ } else {
+ crop_line = fr_yoff % bh;
+ prg_stride = ipu_stride;
+ }
+
+ offset = crop_line * prg_stride +
+ (fr_xoff % bw) *
+ bytes_per_pixel(fbi_to_pixfmt(&tmp_fbi, false));
+ mxc_fbi->prefetch = tmp_prefetch;
+ if (offset % 8) {
+ dev_err(info->device,
+ "IPU base address is not 8byte aligned\n");
+ return -EINVAL;
+ }
+ } else {
+ unsigned int uoff = 0, voff = 0;
+ int fb_stride;
+
+ switch (fbi_to_pixfmt(&tmp_fbi, true)) {
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YVU420P:
+ case IPU_PIX_FMT_NV12:
+ case PRE_PIX_FMT_NV21:
+ case IPU_PIX_FMT_NV16:
+ case PRE_PIX_FMT_NV61:
+ case IPU_PIX_FMT_YUV422P:
+ case IPU_PIX_FMT_YVU422P:
+ case IPU_PIX_FMT_YUV420P:
+ case IPU_PIX_FMT_YUV444P:
+ fb_stride = var->xres_virtual;
+ break;
+ default:
+ fb_stride = line_length;
+ }
+ base += fr_yoff * fb_stride +
+ fr_xoff * var->bits_per_pixel / 8;
+
+ ipu_get_channel_offset(fbi_to_pixfmt(&tmp_fbi, true),
+ var->xres,
+ fr_h,
+ fr_w,
+ 0, 0,
+ fr_yoff,
+ fr_xoff,
+ &uoff,
+ &voff);
+ if (base % 8 || uoff % 8 || voff % 8) {
+ dev_err(info->device,
+ "IPU base address is not 8byte aligned\n");
+ return -EINVAL;
+ }
+ }
+
+ if (var->pixclock < 1000) {
+ htotal = var->xres + var->right_margin + var->hsync_len +
+ var->left_margin;
+ vtotal = var->yres + var->lower_margin + var->vsync_len +
+ var->upper_margin;
+ var->pixclock = (vtotal * htotal * 6UL) / 100UL;
+ var->pixclock = KHZ2PICOS(var->pixclock);
+ dev_dbg(info->device,
+ "pixclock set for 60Hz refresh = %u ps\n",
+ var->pixclock);
+ }
+
+ var->height = -1;
+ var->width = -1;
+ var->grayscale = 0;
+
+ return 0;
+}
+
+static inline u_int _chan_to_field(u_int chan, struct fb_bitfield *bf)
+{
+ chan &= 0xffff;
+ chan >>= 16 - bf->length;
+ return chan << bf->offset;
+}
+
+static int mxcfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int trans, struct fb_info *fbi)
+{
+ unsigned int val;
+ int ret = 1;
+
+ /*
+ * If greyscale is true, then we convert the RGB value
+ * to greyscale no matter what visual we are using.
+ */
+ if (fbi->var.grayscale)
+ red = green = blue = (19595 * red + 38470 * green +
+ 7471 * blue) >> 16;
+ switch (fbi->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ /*
+ * 16-bit True Colour. We encode the RGB value
+ * according to the RGB bitfield information.
+ */
+ if (regno < 16) {
+ u32 *pal = fbi->pseudo_palette;
+
+ val = _chan_to_field(red, &fbi->var.red);
+ val |= _chan_to_field(green, &fbi->var.green);
+ val |= _chan_to_field(blue, &fbi->var.blue);
+
+ pal[regno] = val;
+ ret = 0;
+ }
+ break;
+
+ case FB_VISUAL_STATIC_PSEUDOCOLOR:
+ case FB_VISUAL_PSEUDOCOLOR:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Function to handle custom ioctls for MXC framebuffer.
+ *
+ * @param inode inode struct
+ *
+ * @param file file struct
+ *
+ * @param cmd Ioctl command to handle
+ *
+ * @param arg User pointer to command arguments
+ *
+ * @param fbi framebuffer information pointer
+ */
+static int mxcfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ int __user *argp = (void __user *)arg;
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
+
+ switch (cmd) {
+ case MXCFB_SET_GBL_ALPHA:
+ {
+ struct mxcfb_gbl_alpha ga;
+
+ if (copy_from_user(&ga, (void *)arg, sizeof(ga))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (ipu_disp_set_global_alpha(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch,
+ (bool)ga.enable,
+ ga.alpha)) {
+ retval = -EINVAL;
+ break;
+ }
+
+ if (ga.enable)
+ mxc_fbi->alpha_chan_en = false;
+
+ if (ga.enable)
+ dev_dbg(fbi->device,
+ "Set global alpha of %s to %d\n",
+ fbi->fix.id, ga.alpha);
+ break;
+ }
+ case MXCFB_SET_LOC_ALPHA:
+ {
+ struct mxcfb_loc_alpha la;
+ bool bad_pixfmt =
+ ipu_ch_param_bad_alpha_pos(fbi_to_pixfmt(fbi, true));
+
+ if (copy_from_user(&la, (void *)arg, sizeof(la))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (la.enable && !la.alpha_in_pixel) {
+ struct fb_info *fbi_tmp;
+ ipu_channel_t ipu_ch;
+
+ if (bad_pixfmt) {
+ dev_err(fbi->device, "Bad pixel format "
+ "for graphics plane fb\n");
+ retval = -EINVAL;
+ break;
+ }
+
+ mxc_fbi->alpha_chan_en = true;
+
+ if (mxc_fbi->ipu_ch == MEM_FG_SYNC)
+ ipu_ch = MEM_BG_SYNC;
+ else if (mxc_fbi->ipu_ch == MEM_BG_SYNC)
+ ipu_ch = MEM_FG_SYNC;
+ else {
+ retval = -EINVAL;
+ break;
+ }
+
+ fbi_tmp = found_registered_fb(ipu_ch, mxc_fbi->ipu_id);
+ if (fbi_tmp)
+ ((struct mxcfb_info *)(fbi_tmp->par))->alpha_chan_en = false;
+ } else
+ mxc_fbi->alpha_chan_en = false;
+
+ if (ipu_disp_set_global_alpha(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch,
+ !(bool)la.enable, 0)) {
+ retval = -EINVAL;
+ break;
+ }
+
+ fbi->var.activate = (fbi->var.activate & ~FB_ACTIVATE_MASK) |
+ FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+ mxcfb_set_par(fbi);
+
+ la.alpha_phy_addr0 = mxc_fbi->alpha_phy_addr0;
+ la.alpha_phy_addr1 = mxc_fbi->alpha_phy_addr1;
+ if (copy_to_user((void *)arg, &la, sizeof(la))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (la.enable)
+ dev_dbg(fbi->device,
+ "Enable DP local alpha for %s\n",
+ fbi->fix.id);
+ break;
+ }
+ case MXCFB_SET_LOC_ALP_BUF:
+ {
+ unsigned long base;
+ uint32_t ipu_alp_ch_irq;
+
+ if (!(((mxc_fbi->ipu_ch == MEM_FG_SYNC) ||
+ (mxc_fbi->ipu_ch == MEM_BG_SYNC)) &&
+ (mxc_fbi->alpha_chan_en))) {
+ dev_err(fbi->device,
+ "Should use background or overlay "
+ "framebuffer to set the alpha buffer "
+ "number\n");
+ return -EINVAL;
+ }
+
+ if (get_user(base, argp))
+ return -EFAULT;
+
+ if (base != mxc_fbi->alpha_phy_addr0 &&
+ base != mxc_fbi->alpha_phy_addr1) {
+ dev_err(fbi->device,
+ "Wrong alpha buffer physical address "
+ "%lu\n", base);
+ return -EINVAL;
+ }
+
+ if (mxc_fbi->ipu_ch == MEM_FG_SYNC)
+ ipu_alp_ch_irq = IPU_IRQ_FG_ALPHA_SYNC_EOF;
+ else
+ ipu_alp_ch_irq = IPU_IRQ_BG_ALPHA_SYNC_EOF;
+
+ retval = wait_for_completion_timeout(
+ &mxc_fbi->alpha_flip_complete, HZ/2);
+ if (retval == 0) {
+ dev_err(fbi->device, "timeout when waiting for alpha flip irq\n");
+ retval = -ETIMEDOUT;
+ break;
+ }
+
+ mxc_fbi->cur_ipu_alpha_buf =
+ !mxc_fbi->cur_ipu_alpha_buf;
+ if (ipu_update_channel_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ IPU_ALPHA_IN_BUFFER,
+ mxc_fbi->
+ cur_ipu_alpha_buf,
+ base) == 0) {
+ ipu_select_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ IPU_ALPHA_IN_BUFFER,
+ mxc_fbi->cur_ipu_alpha_buf);
+ ipu_clear_irq(mxc_fbi->ipu, ipu_alp_ch_irq);
+ ipu_enable_irq(mxc_fbi->ipu, ipu_alp_ch_irq);
+ } else {
+ dev_err(fbi->device,
+ "Error updating %s SDC alpha buf %d "
+ "to address=0x%08lX\n",
+ fbi->fix.id,
+ mxc_fbi->cur_ipu_alpha_buf, base);
+ }
+ break;
+ }
+ case MXCFB_SET_CLR_KEY:
+ {
+ struct mxcfb_color_key key;
+ if (copy_from_user(&key, (void *)arg, sizeof(key))) {
+ retval = -EFAULT;
+ break;
+ }
+ retval = ipu_disp_set_color_key(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ key.enable,
+ key.color_key);
+ dev_dbg(fbi->device, "Set color key to 0x%08X\n",
+ key.color_key);
+ break;
+ }
+ case MXCFB_SET_GAMMA:
+ {
+ struct mxcfb_gamma gamma;
+ if (copy_from_user(&gamma, (void *)arg, sizeof(gamma))) {
+ retval = -EFAULT;
+ break;
+ }
+ retval = ipu_disp_set_gamma_correction(mxc_fbi->ipu,
+ mxc_fbi->ipu_ch,
+ gamma.enable,
+ gamma.constk,
+ gamma.slopek);
+ break;
+ }
+ case MXCFB_SET_GPU_SPLIT_FMT:
+ {
+ struct mxcfb_gpu_split_fmt fmt;
+
+ if (copy_from_user(&fmt, (void *)arg, sizeof(fmt))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (fmt.var.nonstd != IPU_PIX_FMT_GPU32_SB_ST &&
+ fmt.var.nonstd != IPU_PIX_FMT_GPU32_SB_SRT &&
+ fmt.var.nonstd != IPU_PIX_FMT_GPU16_SB_ST &&
+ fmt.var.nonstd != IPU_PIX_FMT_GPU16_SB_SRT) {
+ retval = -EINVAL;
+ break;
+ }
+
+ mxc_fbi->gpu_sec_buf_off = fmt.offset;
+ fmt.var.activate = (fbi->var.activate & ~FB_ACTIVATE_MASK) |
+ FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+ console_lock();
+ fbi->flags |= FBINFO_MISC_USEREVENT;
+ retval = fb_set_var(fbi, &fmt.var);
+ fbi->flags &= ~FBINFO_MISC_USEREVENT;
+ console_unlock();
+ break;
+ }
+ case MXCFB_SET_PREFETCH:
+ {
+ int enable;
+
+ if (copy_from_user(&enable, (void *)arg, sizeof(enable))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (!enable) {
+ if (ipu_pixel_format_is_gpu_tile(fbi_to_pixfmt(fbi, true))) {
+ dev_err(fbi->device, "Cannot disable prefetch in "
+ "resolving mode\n");
+ retval = -EINVAL;
+ break;
+ }
+ if (ipu_pixel_format_is_pre_yuv(fbi_to_pixfmt(fbi, true))) {
+ dev_err(fbi->device, "Cannot disable prefetch when "
+ "PRE gets NV61 or NV21\n");
+ retval = -EINVAL;
+ break;
+ }
+ } else {
+ if ((fbi->var.vmode & FB_VMODE_INTERLACED) &&
+ ipu_pixel_format_is_multiplanar_yuv(fbi_to_pixfmt(fbi, true))) {
+ dev_err(fbi->device, "Cannot enable prefetch when "
+ "PRE gets multiplanar YUV frames\n");
+ retval = -EINVAL;
+ break;
+ }
+ }
+
+ if (mxc_fbi->cur_prefetch == !!enable)
+ break;
+
+ retval = mxcfb_check_var(&fbi->var, fbi);
+ if (retval)
+ break;
+
+ mxc_fbi->prefetch = !!enable;
+
+ fbi->var.activate = (fbi->var.activate & ~FB_ACTIVATE_MASK) |
+ FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+ retval = mxcfb_set_par(fbi);
+ break;
+ }
+ case MXCFB_GET_PREFETCH:
+ {
+ struct mxcfb_info *mxc_fbi =
+ (struct mxcfb_info *)fbi->par;
+
+ if (put_user(mxc_fbi->cur_prefetch, argp))
+ return -EFAULT;
+ break;
+ }
+ case MXCFB_WAIT_FOR_VSYNC:
+ {
+ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
+ /* BG should poweron */
+ struct mxcfb_info *bg_mxcfbi = NULL;
+ struct fb_info *fbi_tmp;
+
+ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
+ if (fbi_tmp)
+ bg_mxcfbi = ((struct mxcfb_info *)(fbi_tmp->par));
+
+ if (!bg_mxcfbi) {
+ retval = -EINVAL;
+ break;
+ }
+ if (bg_mxcfbi->cur_blank != FB_BLANK_UNBLANK) {
+ retval = -EINVAL;
+ break;
+ }
+ }
+ if (mxc_fbi->cur_blank != FB_BLANK_UNBLANK) {
+ retval = -EINVAL;
+ break;
+ }
+
+ init_completion(&mxc_fbi->vsync_complete);
+ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
+ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
+ retval = wait_for_completion_interruptible_timeout(
+ &mxc_fbi->vsync_complete, 1 * HZ);
+ if (retval == 0) {
+ dev_err(fbi->device,
+ "MXCFB_WAIT_FOR_VSYNC: timeout %d\n",
+ retval);
+ retval = -ETIME;
+ } else if (retval > 0) {
+ retval = 0;
+ }
+ break;
+ }
+ case FBIO_ALLOC:
+ {
+ int size;
+ struct mxcfb_alloc_list *mem;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (mem == NULL)
+ return -ENOMEM;
+
+ if (get_user(size, argp))
+ return -EFAULT;
+
+ mem->size = PAGE_ALIGN(size);
+
+ mem->cpu_addr = dma_alloc_coherent(fbi->device, size,
+ &mem->phy_addr,
+ GFP_KERNEL);
+ if (mem->cpu_addr == NULL) {
+ kfree(mem);
+ return -ENOMEM;
+ }
+
+ list_add(&mem->list, &fb_alloc_list);
+
+ dev_dbg(fbi->device, "allocated %d bytes @ 0x%08X\n",
+ mem->size, mem->phy_addr);
+
+ if (put_user(mem->phy_addr, argp))
+ return -EFAULT;
+
+ break;
+ }
+ case FBIO_FREE:
+ {
+ unsigned long offset;
+ struct mxcfb_alloc_list *mem;
+
+ if (get_user(offset, argp))
+ return -EFAULT;
+
+ retval = -EINVAL;
+ list_for_each_entry(mem, &fb_alloc_list, list) {
+ if (mem->phy_addr == offset) {
+ list_del(&mem->list);
+ dma_free_coherent(fbi->device,
+ mem->size,
+ mem->cpu_addr,
+ mem->phy_addr);
+ kfree(mem);
+ retval = 0;
+ break;
+ }
+ }
+
+ break;
+ }
+ case MXCFB_SET_OVERLAY_POS:
+ {
+ struct mxcfb_pos pos;
+ struct fb_info *bg_fbi = NULL;
+ struct mxcfb_info *bg_mxcfbi = NULL;
+
+ if (mxc_fbi->ipu_ch != MEM_FG_SYNC) {
+ dev_err(fbi->device, "Should use the overlay "
+ "framebuffer to set the position of "
+ "the overlay window\n");
+ retval = -EINVAL;
+ break;
+ }
+
+ if (copy_from_user(&pos, (void *)arg, sizeof(pos))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ bg_fbi = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
+ if (bg_fbi)
+ bg_mxcfbi = ((struct mxcfb_info *)(bg_fbi->par));
+
+ if (bg_fbi == NULL) {
+ dev_err(fbi->device, "Cannot find the "
+ "background framebuffer\n");
+ retval = -ENOENT;
+ break;
+ }
+
+ /* if fb is unblank, check if the pos fit the display */
+ if (mxc_fbi->cur_blank == FB_BLANK_UNBLANK) {
+ if (fbi->var.xres + pos.x > bg_fbi->var.xres) {
+ if (bg_fbi->var.xres < fbi->var.xres)
+ pos.x = 0;
+ else
+ pos.x = bg_fbi->var.xres - fbi->var.xres;
+ }
+ if (fbi->var.yres + pos.y > bg_fbi->var.yres) {
+ if (bg_fbi->var.yres < fbi->var.yres)
+ pos.y = 0;
+ else
+ pos.y = bg_fbi->var.yres - fbi->var.yres;
+ }
+ }
+
+ retval = ipu_disp_set_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ pos.x, pos.y);
+
+ if (copy_to_user((void *)arg, &pos, sizeof(pos))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ }
+ case MXCFB_GET_FB_IPU_CHAN:
+ {
+ struct mxcfb_info *mxc_fbi =
+ (struct mxcfb_info *)fbi->par;
+
+ if (put_user(mxc_fbi->ipu_ch, argp))
+ return -EFAULT;
+ break;
+ }
+ case MXCFB_GET_DIFMT:
+ {
+ struct mxcfb_info *mxc_fbi =
+ (struct mxcfb_info *)fbi->par;
+
+ if (put_user(mxc_fbi->ipu_di_pix_fmt, argp))
+ return -EFAULT;
+ break;
+ }
+ case MXCFB_GET_FB_IPU_DI:
+ {
+ struct mxcfb_info *mxc_fbi =
+ (struct mxcfb_info *)fbi->par;
+
+ if (put_user(mxc_fbi->ipu_di, argp))
+ return -EFAULT;
+ break;
+ }
+ case MXCFB_GET_FB_BLANK:
+ {
+ struct mxcfb_info *mxc_fbi =
+ (struct mxcfb_info *)fbi->par;
+
+ if (put_user(mxc_fbi->cur_blank, argp))
+ return -EFAULT;
+ break;
+ }
+ case MXCFB_SET_DIFMT:
+ {
+ struct mxcfb_info *mxc_fbi =
+ (struct mxcfb_info *)fbi->par;
+
+ if (get_user(mxc_fbi->ipu_di_pix_fmt, argp))
+ return -EFAULT;
+
+ break;
+ }
+ case MXCFB_CSC_UPDATE:
+ {
+ struct mxcfb_csc_matrix csc;
+
+ if (copy_from_user(&csc, (void *) arg, sizeof(csc)))
+ return -EFAULT;
+
+ if ((mxc_fbi->ipu_ch != MEM_FG_SYNC) &&
+ (mxc_fbi->ipu_ch != MEM_BG_SYNC) &&
+ (mxc_fbi->ipu_ch != MEM_BG_ASYNC0))
+ return -EFAULT;
+ ipu_set_csc_coefficients(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ csc.param);
+ break;
+ }
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+/*
+ * mxcfb_blank():
+ * Blank the display.
+ */
+static int mxcfb_blank(int blank, struct fb_info *info)
+{
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par;
+ int ret = 0;
+
+ dev_dbg(info->device, "blank = %d\n", blank);
+
+ if (mxc_fbi->cur_blank == blank)
+ return 0;
+
+ mxc_fbi->next_blank = blank;
+
+ switch (blank) {
+ case FB_BLANK_POWERDOWN:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_NORMAL:
+ if (mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->disable)
+ mxc_fbi->dispdrv->drv->disable(mxc_fbi->dispdrv, info);
+ ipu_disable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, true);
+ if (mxc_fbi->ipu_di >= 0)
+ ipu_uninit_sync_panel(mxc_fbi->ipu, mxc_fbi->ipu_di);
+ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
+ if (mxc_fbi->cur_prefetch) {
+ ipu_prg_disable(mxc_fbi->ipu_id, mxc_fbi->pre_num);
+ ipu_pre_disable(mxc_fbi->pre_num);
+ ipu_pre_free(&mxc_fbi->pre_num);
+ }
+ break;
+ case FB_BLANK_UNBLANK:
+ info->var.activate = (info->var.activate & ~FB_ACTIVATE_MASK) |
+ FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+ ret = mxcfb_set_par(info);
+ break;
+ }
+ if (!ret)
+ mxc_fbi->cur_blank = blank;
+ return ret;
+}
+
+/*
+ * Pan or Wrap the Display
+ *
+ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
+ *
+ * @param var Variable screen buffer information
+ * @param info Framebuffer information pointer
+ */
+static int
+mxcfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par,
+ *mxc_graphic_fbi = NULL;
+ u_int y_bottom;
+ unsigned int fr_xoff, fr_yoff, fr_w, fr_h;
+ unsigned int x_crop = 0, y_crop = 0;
+ unsigned long base, ipu_base = 0, active_alpha_phy_addr = 0;
+ bool loc_alpha_en = false;
+ int fb_stride;
+ int bw = 0, bh = 0;
+ int i;
+ int ret;
+
+ /* no pan display during fb blank */
+ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
+ struct mxcfb_info *bg_mxcfbi = NULL;
+ struct fb_info *fbi_tmp;
+
+ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
+ if (fbi_tmp)
+ bg_mxcfbi = ((struct mxcfb_info *)(fbi_tmp->par));
+ if (!bg_mxcfbi)
+ return -EINVAL;
+ if (bg_mxcfbi->cur_blank != FB_BLANK_UNBLANK)
+ return -EINVAL;
+ }
+ if (mxc_fbi->cur_blank != FB_BLANK_UNBLANK)
+ return -EINVAL;
+
+ if (mxc_fbi->resolve) {
+ fmt_to_tile_block(info->var.nonstd, &bw, &bh);
+
+ if (mxc_fbi->cur_var.xoffset % bw != var->xoffset % bw ||
+ mxc_fbi->cur_var.yoffset % bh != var->yoffset % bh) {
+ dev_err(info->device, "do not support panning "
+ "with tile crop settings changed\n");
+ return -EINVAL;
+ }
+ }
+
+ y_bottom = var->yoffset;
+
+ if (y_bottom > info->var.yres_virtual)
+ return -EINVAL;
+
+ switch (fbi_to_pixfmt(info, true)) {
+ case IPU_PIX_FMT_YUV420P2:
+ case IPU_PIX_FMT_YVU420P:
+ case IPU_PIX_FMT_NV12:
+ case PRE_PIX_FMT_NV21:
+ case IPU_PIX_FMT_NV16:
+ case PRE_PIX_FMT_NV61:
+ case IPU_PIX_FMT_YUV422P:
+ case IPU_PIX_FMT_YVU422P:
+ case IPU_PIX_FMT_YUV420P:
+ case IPU_PIX_FMT_YUV444P:
+ fb_stride = info->var.xres_virtual;
+ break;
+ default:
+ fb_stride = info->fix.line_length;
+ }
+
+ base = info->fix.smem_start;
+ fr_xoff = var->xoffset;
+ fr_w = info->var.xres_virtual;
+ if (!(var->vmode & FB_VMODE_YWRAP)) {
+ dev_dbg(info->device, "Y wrap disabled\n");
+ fr_yoff = var->yoffset % info->var.yres;
+ fr_h = info->var.yres;
+ base += info->fix.line_length * info->var.yres *
+ (var->yoffset / info->var.yres);
+ if (ipu_pixel_format_is_split_gpu_tile(var->nonstd))
+ base += (mxc_fbi->gpu_sec_buf_off -
+ info->fix.line_length * info->var.yres / 2) *
+ (var->yoffset / info->var.yres);
+ } else {
+ dev_dbg(info->device, "Y wrap enabled\n");
+ fr_yoff = var->yoffset;
+ fr_h = info->var.yres_virtual;
+ }
+ if (!mxc_fbi->resolve) {
+ base += fr_yoff * fb_stride + fr_xoff *
+ bytes_per_pixel(fbi_to_pixfmt(info, true));
+
+ if (mxc_fbi->cur_prefetch && (info->var.vmode & FB_VMODE_INTERLACED))
+ base += info->var.rotate ?
+ fr_w * bytes_per_pixel(fbi_to_pixfmt(info, true)) : 0;
+ } else {
+ x_crop = fr_xoff & ~(bw - 1);
+ y_crop = fr_yoff & ~(bh - 1);
+ }
+
+ if (mxc_fbi->cur_prefetch) {
+ unsigned int sec_buf_off = 0, trd_buf_off = 0;
+ ipu_get_channel_offset(fbi_to_pixfmt(info, true),
+ info->var.xres,
+ fr_h,
+ fr_w,
+ 0, 0,
+ fr_yoff,
+ fr_xoff,
+ &sec_buf_off,
+ &trd_buf_off);
+ if (mxc_fbi->resolve)
+ sec_buf_off = mxc_fbi->gpu_sec_buf_off;
+ ipu_pre_set_fb_buffer(mxc_fbi->pre_num, base,
+ x_crop, y_crop,
+ sec_buf_off, trd_buf_off);
+ } else {
+ ipu_base = base;
+ }
+
+ /* Check if DP local alpha is enabled and find the graphic fb */
+ if (mxc_fbi->ipu_ch == MEM_BG_SYNC || mxc_fbi->ipu_ch == MEM_FG_SYNC) {
+ for (i = 0; i < num_registered_fb; i++) {
+ char bg_id[] = "DISP3 BG";
+ char fg_id[] = "DISP3 FG";
+ char *idstr = registered_fb[i]->fix.id;
+ bg_id[4] += mxc_fbi->ipu_id;
+ fg_id[4] += mxc_fbi->ipu_id;
+ if ((strcmp(idstr, bg_id) == 0 ||
+ strcmp(idstr, fg_id) == 0) &&
+ ((struct mxcfb_info *)
+ (registered_fb[i]->par))->alpha_chan_en) {
+ loc_alpha_en = true;
+ mxc_graphic_fbi = (struct mxcfb_info *)
+ (registered_fb[i]->par);
+ active_alpha_phy_addr =
+ mxc_fbi->cur_ipu_alpha_buf ?
+ mxc_graphic_fbi->alpha_phy_addr1 :
+ mxc_graphic_fbi->alpha_phy_addr0;
+ dev_dbg(info->device, "Updating SDC alpha "
+ "buf %d address=0x%08lX\n",
+ !mxc_fbi->cur_ipu_alpha_buf,
+ active_alpha_phy_addr);
+ break;
+ }
+ }
+ }
+
+ ret = wait_for_completion_timeout(&mxc_fbi->flip_complete, HZ/2);
+ if (ret == 0) {
+ dev_err(info->device, "timeout when waiting for flip irq\n");
+ return -ETIMEDOUT;
+ }
+
+ if (!mxc_fbi->cur_prefetch) {
+ ++mxc_fbi->cur_ipu_buf;
+ mxc_fbi->cur_ipu_buf %= 3;
+ dev_dbg(info->device, "Updating SDC %s buf %d address=0x%08lX\n",
+ info->fix.id, mxc_fbi->cur_ipu_buf, base);
+ }
+ mxc_fbi->cur_ipu_alpha_buf = !mxc_fbi->cur_ipu_alpha_buf;
+
+ if (mxc_fbi->cur_prefetch)
+ goto next;
+
+ if (ipu_update_channel_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
+ mxc_fbi->cur_ipu_buf, ipu_base) == 0) {
+next:
+ /* Update the DP local alpha buffer only for graphic plane */
+ if (loc_alpha_en && mxc_graphic_fbi == mxc_fbi &&
+ ipu_update_channel_buffer(mxc_graphic_fbi->ipu, mxc_graphic_fbi->ipu_ch,
+ IPU_ALPHA_IN_BUFFER,
+ mxc_fbi->cur_ipu_alpha_buf,
+ active_alpha_phy_addr) == 0) {
+ ipu_select_buffer(mxc_graphic_fbi->ipu, mxc_graphic_fbi->ipu_ch,
+ IPU_ALPHA_IN_BUFFER,
+ mxc_fbi->cur_ipu_alpha_buf);
+ }
+
+ /* update u/v offset */
+ if (!mxc_fbi->cur_prefetch) {
+ ipu_update_channel_offset(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER,
+ fbi_to_pixfmt(info, true),
+ fr_w,
+ fr_h,
+ fr_w,
+ 0, 0,
+ fr_yoff,
+ fr_xoff);
+
+ ipu_select_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER, mxc_fbi->cur_ipu_buf);
+ }
+ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
+ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
+ } else {
+ dev_err(info->device,
+ "Error updating SDC buf %d to address=0x%08lX, "
+ "current buf %d, buf0 ready %d, buf1 ready %d, "
+ "buf2 ready %d\n", mxc_fbi->cur_ipu_buf, base,
+ ipu_get_cur_buffer_idx(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER),
+ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER, 0),
+ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER, 1),
+ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
+ IPU_INPUT_BUFFER, 2));
+ if (!mxc_fbi->cur_prefetch) {
+ ++mxc_fbi->cur_ipu_buf;
+ mxc_fbi->cur_ipu_buf %= 3;
+ ++mxc_fbi->cur_ipu_buf;
+ mxc_fbi->cur_ipu_buf %= 3;
+ }
+ mxc_fbi->cur_ipu_alpha_buf = !mxc_fbi->cur_ipu_alpha_buf;
+ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
+ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
+ return -EBUSY;
+ }
+
+ dev_dbg(info->device, "Update complete\n");
+
+ info->var.yoffset = var->yoffset;
+ mxc_fbi->cur_var.xoffset = var->xoffset;
+ mxc_fbi->cur_var.yoffset = var->yoffset;
+
+ return 0;
+}
+
+/*
+ * Function to handle custom mmap for MXC framebuffer.
+ *
+ * @param fbi framebuffer information pointer
+ *
+ * @param vma Pointer to vm_area_struct
+ */
+static int mxcfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
+{
+ bool found = false;
+ u32 len;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ struct mxcfb_alloc_list *mem;
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
+
+ if (offset < fbi->fix.smem_len) {
+ /* mapping framebuffer memory */
+ len = fbi->fix.smem_len - offset;
+ vma->vm_pgoff = (fbi->fix.smem_start + offset) >> PAGE_SHIFT;
+ } else if ((vma->vm_pgoff ==
+ (mxc_fbi->alpha_phy_addr0 >> PAGE_SHIFT)) ||
+ (vma->vm_pgoff ==
+ (mxc_fbi->alpha_phy_addr1 >> PAGE_SHIFT))) {
+ len = mxc_fbi->alpha_mem_len;
+ } else {
+ list_for_each_entry(mem, &fb_alloc_list, list) {
+ if (offset == mem->phy_addr) {
+ found = true;
+ len = mem->size;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+ }
+
+ len = PAGE_ALIGN(len);
+ if (vma->vm_end - vma->vm_start > len)
+ return -EINVAL;
+
+ /* make buffers bufferable */
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ vma->vm_flags |= VM_IO;
+
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
+ dev_dbg(fbi->device, "mmap remap_pfn_range failed\n");
+ return -ENOBUFS;
+ }
+
+ return 0;
+}
+
+/*!
+ * This structure contains the pointers to the control functions that are
+ * invoked by the core framebuffer driver to perform operations like
+ * blitting, rectangle filling, copy regions and cursor definition.
+ */
+static struct fb_ops mxcfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_set_par = mxcfb_set_par,
+ .fb_check_var = mxcfb_check_var,
+ .fb_setcolreg = mxcfb_setcolreg,
+ .fb_pan_display = mxcfb_pan_display,
+ .fb_ioctl = mxcfb_ioctl,
+ .fb_mmap = mxcfb_mmap,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_blank = mxcfb_blank,
+};
+
+static irqreturn_t mxcfb_irq_handler(int irq, void *dev_id)
+{
+ struct fb_info *fbi = dev_id;
+ struct mxcfb_info *mxc_fbi = fbi->par;
+
+ complete(&mxc_fbi->flip_complete);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mxcfb_nf_irq_handler(int irq, void *dev_id)
+{
+ struct fb_info *fbi = dev_id;
+ struct mxcfb_info *mxc_fbi = fbi->par;
+
+ complete(&mxc_fbi->vsync_complete);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mxcfb_alpha_irq_handler(int irq, void *dev_id)
+{
+ struct fb_info *fbi = dev_id;
+ struct mxcfb_info *mxc_fbi = fbi->par;
+
+ complete(&mxc_fbi->alpha_flip_complete);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Suspends the framebuffer and blanks the screen. Power management support
+ */
+static int mxcfb_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct fb_info *fbi = platform_get_drvdata(pdev);
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
+ int saved_blank;
+#ifdef CONFIG_FB_MXC_LOW_PWR_DISPLAY
+ void *fbmem;
+#endif
+
+ if (mxc_fbi->ovfbi) {
+ struct mxcfb_info *mxc_fbi_fg =
+ (struct mxcfb_info *)mxc_fbi->ovfbi->par;
+
+ console_lock();
+ fb_set_suspend(mxc_fbi->ovfbi, 1);
+ saved_blank = mxc_fbi_fg->cur_blank;
+ mxcfb_blank(FB_BLANK_POWERDOWN, mxc_fbi->ovfbi);
+ mxc_fbi_fg->next_blank = saved_blank;
+ console_unlock();
+ }
+
+ console_lock();
+ fb_set_suspend(fbi, 1);
+ saved_blank = mxc_fbi->cur_blank;
+ mxcfb_blank(FB_BLANK_POWERDOWN, fbi);
+ mxc_fbi->next_blank = saved_blank;
+ console_unlock();
+
+ return 0;
+}
+
+/*
+ * Resumes the framebuffer and unblanks the screen. Power management support
+ */
+static int mxcfb_resume(struct platform_device *pdev)
+{
+ struct fb_info *fbi = platform_get_drvdata(pdev);
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
+
+ console_lock();
+ mxcfb_blank(mxc_fbi->next_blank, fbi);
+ fb_set_suspend(fbi, 0);
+ console_unlock();
+
+ if (mxc_fbi->ovfbi) {
+ struct mxcfb_info *mxc_fbi_fg =
+ (struct mxcfb_info *)mxc_fbi->ovfbi->par;
+ console_lock();
+ mxcfb_blank(mxc_fbi_fg->next_blank, mxc_fbi->ovfbi);
+ fb_set_suspend(mxc_fbi->ovfbi, 0);
+ console_unlock();
+ }
+
+ return 0;
+}
+
+/*
+ * Main framebuffer functions
+ */
+
+/*!
+ * Allocates the DRAM memory for the frame buffer. This buffer is remapped
+ * into a non-cached, non-buffered, memory region to allow palette and pixel
+ * writes to occur without flushing the cache. Once this area is remapped,
+ * all virtual memory access to the video memory should occur at the new region.
+ *
+ * @param fbi framebuffer information pointer
+ *
+ * @return Error code indicating success or failure
+ */
+static int mxcfb_map_video_memory(struct fb_info *fbi)
+{
+ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
+
+ if (fbi->fix.smem_len < fbi->var.yres_virtual * fbi->fix.line_length)
+ fbi->fix.smem_len = fbi->var.yres_virtual *
+ fbi->fix.line_length;
+
+ if (mxc_fbi->resolve && mxc_fbi->gpu_sec_buf_off) {
+ if (fbi->var.vmode & FB_VMODE_YWRAP)
+ fbi->fix.smem_len = mxc_fbi->gpu_sec_buf_off +
+ fbi->fix.smem_len / 2;
+ else
+ fbi->fix.smem_len = mxc_fbi->gpu_sec_buf_off *
+ (fbi->var.yres_virtual / fbi->var.yres) +
+ fbi->fix.smem_len / 2;
+ }
+
+ fbi->screen_base = dma_alloc_writecombine(fbi->device,
+ fbi->fix.smem_len,
+ (dma_addr_t *)&fbi->fix.smem_start,
+ GFP_DMA | GFP_KERNEL);
+ if (fbi->screen_base == 0) {
+ dev_err(fbi->device, "Unable to allocate framebuffer memory\n");
+ fbi->fix.smem_len = 0;
+ fbi->fix.smem_start = 0;
+ return -EBUSY;
+ }
+
+ dev_dbg(fbi->device, "allocated fb @ paddr=0x%08X, size=%d.\n",
+ (uint32_t) fbi->fix.smem_start, fbi->fix.smem_len);
+
+ fbi->screen_size = fbi->fix.smem_len;
+
+ /* Clear the screen */
+ memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
+
+ return 0;
+}
+
+/*!
+ * De-allocates the DRAM memory for the frame buffer.
+ *
+ * @param fbi framebuffer information pointer
+ *
+ * @return Error code indicating success or failure
+ */
+static int mxcfb_unmap_video_memory(struct fb_info *fbi)
+{
+ dma_free_writecombine(fbi->device, fbi->fix.smem_len,
+ fbi->screen_base, fbi->fix.smem_start);
+ fbi->screen_base = 0;
+ fbi->fix.smem_start = 0;
+ fbi->fix.smem_len = 0;
+ return 0;
+}
+
+/*!
+ * Initializes the framebuffer information pointer. After allocating
+ * sufficient memory for the framebuffer structure, the fields are
+ * filled with custom information passed in from the configurable
+ * structures. This includes information such as bits per pixel,
+ * color maps, screen width/height and RGBA offsets.
+ *
+ * @return Framebuffer structure initialized with our information
+ */
+static struct fb_info *mxcfb_init_fbinfo(struct device *dev, struct fb_ops *ops)
+{
+ struct fb_info *fbi;
+ struct mxcfb_info *mxcfbi;
+ struct ipuv3_fb_platform_data *plat_data = dev->platform_data;
+
+ /*
+ * Allocate sufficient memory for the fb structure
+ */
+ fbi = framebuffer_alloc(sizeof(struct mxcfb_info), dev);
+ if (!fbi)
+ return NULL;
+
+ mxcfbi = (struct mxcfb_info *)fbi->par;
+
+ fbi->var.activate = FB_ACTIVATE_NOW;
+
+ bpp_to_var(plat_data->default_bpp, &fbi->var);
+
+ fbi->fbops = ops;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->pseudo_palette = mxcfbi->pseudo_palette;
+
+ /*
+ * Allocate colormap
+ */
+ fb_alloc_cmap(&fbi->cmap, 16, 0);
+
+ return fbi;
+}
+
+static ssize_t show_disp_chan(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
+
+ if (mxcfbi->ipu_ch == MEM_BG_SYNC)
+ return sprintf(buf, "2-layer-fb-bg\n");
+ else if (mxcfbi->ipu_ch == MEM_FG_SYNC)
+ return sprintf(buf, "2-layer-fb-fg\n");
+ else if (mxcfbi->ipu_ch == MEM_DC_SYNC)
+ return sprintf(buf, "1-layer-fb\n");
+ else
+ return sprintf(buf, "err: no display chan\n");
+}
+
+static ssize_t swap_disp_chan(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
+ struct mxcfb_info *fg_mxcfbi = NULL;
+
+ console_lock();
+ /* swap only happen between DP-BG and DC, while DP-FG disable */
+ if (((mxcfbi->ipu_ch == MEM_BG_SYNC) &&
+ (strstr(buf, "1-layer-fb") != NULL)) ||
+ ((mxcfbi->ipu_ch == MEM_DC_SYNC) &&
+ (strstr(buf, "2-layer-fb-bg") != NULL))) {
+ struct fb_info *fbi_fg;
+
+ fbi_fg = found_registered_fb(MEM_FG_SYNC, mxcfbi->ipu_id);
+ if (fbi_fg)
+ fg_mxcfbi = (struct mxcfb_info *)fbi_fg->par;
+
+ if (!fg_mxcfbi ||
+ fg_mxcfbi->cur_blank == FB_BLANK_UNBLANK) {
+ dev_err(dev,
+ "Can not switch while fb2(fb-fg) is on.\n");
+ console_unlock();
+ return count;
+ }
+
+ if (swap_channels(info) < 0)
+ dev_err(dev, "Swap display channel failed.\n");
+ }
+
+ console_unlock();
+ return count;
+}
+static DEVICE_ATTR(fsl_disp_property, S_IWUSR | S_IRUGO,
+ show_disp_chan, swap_disp_chan);
+
+static ssize_t show_disp_dev(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
+
+ if (mxcfbi->ipu_ch == MEM_FG_SYNC)
+ return sprintf(buf, "overlay\n");
+ else
+ return sprintf(buf, "%s\n", mxcfbi->dispdrv->drv->name);
+}
+static DEVICE_ATTR(fsl_disp_dev_property, S_IRUGO, show_disp_dev, NULL);
+
+static int mxcfb_get_crtc(struct device *dev, struct mxcfb_info *mxcfbi,
+ enum crtc crtc)
+{
+ int i = 0;
+
+ for (; i < ARRAY_SIZE(ipu_di_crtc_maps); i++)
+ if (ipu_di_crtc_maps[i].crtc == crtc) {
+ mxcfbi->ipu_id = ipu_di_crtc_maps[i].ipu_id;
+ mxcfbi->ipu_di = ipu_di_crtc_maps[i].ipu_di;
+ return 0;
+ }
+
+ dev_err(dev, "failed to get valid crtc\n");
+ return -EINVAL;
+}
+
+static int mxcfb_dispdrv_init(struct platform_device *pdev,
+ struct fb_info *fbi)
+{
+ struct ipuv3_fb_platform_data *plat_data = pdev->dev.platform_data;
+ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
+ struct mxc_dispdrv_setting setting;
+ char disp_dev[32], *default_dev = "lcd";
+ int ret = 0;
+
+ setting.if_fmt = plat_data->interface_pix_fmt;
+ setting.dft_mode_str = plat_data->mode_str;
+ setting.default_bpp = plat_data->default_bpp;
+ if (!setting.default_bpp)
+ setting.default_bpp = 16;
+ setting.fbi = fbi;
+ if (!strlen(plat_data->disp_dev)) {
+ memcpy(disp_dev, default_dev, strlen(default_dev));
+ disp_dev[strlen(default_dev)] = '\0';
+ } else {
+ memcpy(disp_dev, plat_data->disp_dev,
+ strlen(plat_data->disp_dev));
+ disp_dev[strlen(plat_data->disp_dev)] = '\0';
+ }
+
+ mxcfbi->dispdrv = mxc_dispdrv_gethandle(disp_dev, &setting);
+ if (IS_ERR(mxcfbi->dispdrv)) {
+ ret = PTR_ERR(mxcfbi->dispdrv);
+ dev_err(&pdev->dev, "NO mxc display driver found!\n");
+ return ret;
+ } else {
+ /* fix-up */
+ mxcfbi->ipu_di_pix_fmt = setting.if_fmt;
+ mxcfbi->default_bpp = setting.default_bpp;
+
+ ret = mxcfb_get_crtc(&pdev->dev, mxcfbi, setting.crtc);
+ if (ret)
+ return ret;
+
+ dev_dbg(&pdev->dev, "di_pixfmt:0x%x, bpp:0x%x, di:%d, ipu:%d\n",
+ setting.if_fmt, setting.default_bpp,
+ mxcfbi->ipu_di, mxcfbi->ipu_id);
+ }
+
+ dev_info(&pdev->dev, "registered mxc display driver %s\n", disp_dev);
+
+ return ret;
+}
+
+/*
+ * Parse user specified options (`video=trident:')
+ * example:
+ * video=mxcfb0:dev=lcd,800x480M-16@55,if=RGB565,bpp=16,noaccel
+ * video=mxcfb0:dev=lcd,800x480M-16@55,if=RGB565,fbpix=RGB565
+ */
+static int mxcfb_option_setup(struct platform_device *pdev, struct fb_info *fbi)
+{
+ struct ipuv3_fb_platform_data *pdata = pdev->dev.platform_data;
+ char *options, *opt, *fb_mode_str = NULL;
+ char name[] = "mxcfb0";
+ uint32_t fb_pix_fmt = 0;
+
+ name[5] += pdev->id;
+ if (fb_get_options(name, &options)) {
+ dev_err(&pdev->dev, "Can't get fb option for %s!\n", name);
+ return -ENODEV;
+ }
+
+ if (!options || !*options)
+ return 0;
+
+ while ((opt = strsep(&options, ",")) != NULL) {
+ if (!*opt)
+ continue;
+
+ if (!strncmp(opt, "dev=", 4)) {
+ memcpy(pdata->disp_dev, opt + 4, strlen(opt) - 4);
+ pdata->disp_dev[strlen(opt) - 4] = '\0';
+ } else if (!strncmp(opt, "if=", 3)) {
+ if (!strncmp(opt+3, "RGB24", 5))
+ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB24;
+ else if (!strncmp(opt+3, "BGR24", 5))
+ pdata->interface_pix_fmt = IPU_PIX_FMT_BGR24;
+ else if (!strncmp(opt+3, "GBR24", 5))
+ pdata->interface_pix_fmt = IPU_PIX_FMT_GBR24;
+ else if (!strncmp(opt+3, "RGB565", 6))
+ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB565;
+ else if (!strncmp(opt+3, "RGB666", 6))
+ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB666;
+ else if (!strncmp(opt+3, "YUV444", 6))
+ pdata->interface_pix_fmt = IPU_PIX_FMT_YUV444;
+ else if (!strncmp(opt+3, "LVDS666", 7))
+ pdata->interface_pix_fmt = IPU_PIX_FMT_LVDS666;
+ else if (!strncmp(opt+3, "YUYV16", 6))
+ pdata->interface_pix_fmt = IPU_PIX_FMT_YUYV;
+ else if (!strncmp(opt+3, "UYVY16", 6))
+ pdata->interface_pix_fmt = IPU_PIX_FMT_UYVY;
+ else if (!strncmp(opt+3, "YVYU16", 6))
+ pdata->interface_pix_fmt = IPU_PIX_FMT_YVYU;
+ else if (!strncmp(opt+3, "VYUY16", 6))
+ pdata->interface_pix_fmt = IPU_PIX_FMT_VYUY;
+ } else if (!strncmp(opt, "fbpix=", 6)) {
+ if (!strncmp(opt+6, "RGB24", 5))
+ fb_pix_fmt = IPU_PIX_FMT_RGB24;
+ else if (!strncmp(opt+6, "BGR24", 5))
+ fb_pix_fmt = IPU_PIX_FMT_BGR24;
+ else if (!strncmp(opt+6, "RGB32", 5))
+ fb_pix_fmt = IPU_PIX_FMT_RGB32;
+ else if (!strncmp(opt+6, "BGR32", 5))
+ fb_pix_fmt = IPU_PIX_FMT_BGR32;
+ else if (!strncmp(opt+6, "ABGR32", 6))
+ fb_pix_fmt = IPU_PIX_FMT_ABGR32;
+ else if (!strncmp(opt+6, "RGB565", 6))
+ fb_pix_fmt = IPU_PIX_FMT_RGB565;
+ else if (!strncmp(opt+6, "BGRA4444", 8))
+ fb_pix_fmt = IPU_PIX_FMT_BGRA4444;
+ else if (!strncmp(opt+6, "BGRA5551", 8))
+ fb_pix_fmt = IPU_PIX_FMT_BGRA5551;
+
+ if (fb_pix_fmt) {
+ pixfmt_to_var(fb_pix_fmt, &fbi->var);
+ pdata->default_bpp =
+ fbi->var.bits_per_pixel;
+ }
+ } else if (!strncmp(opt, "int_clk", 7)) {
+ pdata->int_clk = true;
+ continue;
+ } else if (!strncmp(opt, "bpp=", 4)) {
+ /* bpp setting cannot overwirte fbpix setting */
+ if (fb_pix_fmt)
+ continue;
+
+ pdata->default_bpp =
+ simple_strtoul(opt + 4, NULL, 0);
+
+ fb_pix_fmt = bpp_to_pixfmt(pdata->default_bpp);
+ if (fb_pix_fmt)
+ pixfmt_to_var(fb_pix_fmt, &fbi->var);
+ } else
+ fb_mode_str = opt;
+ }
+
+ if (fb_mode_str)
+ pdata->mode_str = fb_mode_str;
+
+ return 0;
+}
+
+static int mxcfb_register(struct fb_info *fbi)
+{
+ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
+ struct fb_videomode m;
+ int ret = 0;
+ char bg0_id[] = "DISP3 BG";
+ char bg1_id[] = "DISP3 BG - DI1";
+ char fg_id[] = "DISP3 FG";
+
+ if (mxcfbi->ipu_di == 0) {
+ bg0_id[4] += mxcfbi->ipu_id;
+ strcpy(fbi->fix.id, bg0_id);
+ } else if (mxcfbi->ipu_di == 1) {
+ bg1_id[4] += mxcfbi->ipu_id;
+ strcpy(fbi->fix.id, bg1_id);
+ } else { /* Overlay */
+ fg_id[4] += mxcfbi->ipu_id;
+ strcpy(fbi->fix.id, fg_id);
+ }
+
+ mxcfb_check_var(&fbi->var, fbi);
+
+ mxcfb_set_fix(fbi);
+
+ /* Added first mode to fbi modelist. */
+ if (!fbi->modelist.next || !fbi->modelist.prev)
+ INIT_LIST_HEAD(&fbi->modelist);
+ fb_var_to_videomode(&m, &fbi->var);
+ fb_add_videomode(&m, &fbi->modelist);
+
+ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq,
+ mxcfb_irq_handler, IPU_IRQF_ONESHOT, MXCFB_NAME, fbi) != 0) {
+ dev_err(fbi->device, "Error registering EOF irq handler.\n");
+ ret = -EBUSY;
+ goto err0;
+ }
+ ipu_disable_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq);
+ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq,
+ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT, MXCFB_NAME, fbi) != 0) {
+ dev_err(fbi->device, "Error registering NFACK irq handler.\n");
+ ret = -EBUSY;
+ goto err1;
+ }
+ ipu_disable_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq);
+
+ if (mxcfbi->ipu_alp_ch_irq != -1)
+ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq,
+ mxcfb_alpha_irq_handler, IPU_IRQF_ONESHOT,
+ MXCFB_NAME, fbi) != 0) {
+ dev_err(fbi->device, "Error registering alpha irq "
+ "handler.\n");
+ ret = -EBUSY;
+ goto err2;
+ }
+
+ if (!mxcfbi->late_init) {
+ fbi->var.activate |= FB_ACTIVATE_FORCE;
+ console_lock();
+ fbi->flags |= FBINFO_MISC_USEREVENT;
+ ret = fb_set_var(fbi, &fbi->var);
+ fbi->flags &= ~FBINFO_MISC_USEREVENT;
+ console_unlock();
+ if (ret < 0) {
+ dev_err(fbi->device, "Error fb_set_var ret:%d\n", ret);
+ goto err3;
+ }
+
+ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
+ console_lock();
+ ret = fb_blank(fbi, FB_BLANK_UNBLANK);
+ console_unlock();
+ if (ret < 0) {
+ dev_err(fbi->device,
+ "Error fb_blank ret:%d\n", ret);
+ goto err4;
+ }
+ }
+ } else {
+ /*
+ * Setup the channel again though bootloader
+ * has done this, then set_par() can stop the
+ * channel neatly and re-initialize it .
+ */
+ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
+ console_lock();
+ _setup_disp_channel1(fbi);
+ ipu_enable_channel(mxcfbi->ipu, mxcfbi->ipu_ch);
+ console_unlock();
+ }
+ }
+
+
+ ret = register_framebuffer(fbi);
+ if (ret < 0)
+ goto err5;
+
+ return ret;
+err5:
+ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
+ console_lock();
+ if (!mxcfbi->late_init)
+ fb_blank(fbi, FB_BLANK_POWERDOWN);
+ else {
+ ipu_disable_channel(mxcfbi->ipu, mxcfbi->ipu_ch,
+ true);
+ ipu_uninit_channel(mxcfbi->ipu, mxcfbi->ipu_ch);
+ }
+ console_unlock();
+ }
+err4:
+err3:
+ if (mxcfbi->ipu_alp_ch_irq != -1)
+ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq, fbi);
+err2:
+ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq, fbi);
+err1:
+ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq, fbi);
+err0:
+ return ret;
+}
+
+static void mxcfb_unregister(struct fb_info *fbi)
+{
+ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
+
+ if (mxcfbi->ipu_alp_ch_irq != -1)
+ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq, fbi);
+ if (mxcfbi->ipu_ch_irq)
+ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq, fbi);
+ if (mxcfbi->ipu_ch_nf_irq)
+ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq, fbi);
+
+ unregister_framebuffer(fbi);
+}
+
+static int mxcfb_setup_overlay(struct platform_device *pdev,
+ struct fb_info *fbi_bg, struct resource *res)
+{
+ struct fb_info *ovfbi;
+ struct mxcfb_info *mxcfbi_bg = (struct mxcfb_info *)fbi_bg->par;
+ struct mxcfb_info *mxcfbi_fg;
+ int ret = 0;
+
+ ovfbi = mxcfb_init_fbinfo(&pdev->dev, &mxcfb_ops);
+ if (!ovfbi) {
+ ret = -ENOMEM;
+ goto init_ovfbinfo_failed;
+ }
+ mxcfbi_fg = (struct mxcfb_info *)ovfbi->par;
+
+ mxcfbi_fg->ipu = ipu_get_soc(mxcfbi_bg->ipu_id);
+ if (IS_ERR(mxcfbi_fg->ipu)) {
+ ret = -ENODEV;
+ goto get_ipu_failed;
+ }
+ mxcfbi_fg->ipu_id = mxcfbi_bg->ipu_id;
+ mxcfbi_fg->ipu_ch_irq = IPU_IRQ_FG_SYNC_EOF;
+ mxcfbi_fg->ipu_ch_nf_irq = IPU_IRQ_FG_SYNC_NFACK;
+ mxcfbi_fg->ipu_alp_ch_irq = IPU_IRQ_FG_ALPHA_SYNC_EOF;
+ mxcfbi_fg->ipu_ch = MEM_FG_SYNC;
+ mxcfbi_fg->ipu_di = -1;
+ mxcfbi_fg->ipu_di_pix_fmt = mxcfbi_bg->ipu_di_pix_fmt;
+ mxcfbi_fg->overlay = true;
+ mxcfbi_fg->cur_blank = mxcfbi_fg->next_blank = FB_BLANK_POWERDOWN;
+ mxcfbi_fg->prefetch = false;
+ mxcfbi_fg->resolve = false;
+ mxcfbi_fg->pre_num = -1;
+
+ /* Need dummy values until real panel is configured */
+ ovfbi->var.xres = 240;
+ ovfbi->var.yres = 320;
+
+ if (res && res->start && res->end) {
+ ovfbi->fix.smem_len = res->end - res->start + 1;
+ ovfbi->fix.smem_start = res->start;
+ ovfbi->screen_base = ioremap(
+ ovfbi->fix.smem_start,
+ ovfbi->fix.smem_len);
+ }
+
+ ret = mxcfb_register(ovfbi);
+ if (ret < 0)
+ goto register_ov_failed;
+
+ mxcfbi_bg->ovfbi = ovfbi;
+
+ return ret;
+
+register_ov_failed:
+get_ipu_failed:
+ fb_dealloc_cmap(&ovfbi->cmap);
+ framebuffer_release(ovfbi);
+init_ovfbinfo_failed:
+ return ret;
+}
+
+static void mxcfb_unsetup_overlay(struct fb_info *fbi_bg)
+{
+ struct mxcfb_info *mxcfbi_bg = (struct mxcfb_info *)fbi_bg->par;
+ struct fb_info *ovfbi = mxcfbi_bg->ovfbi;
+
+ mxcfb_unregister(ovfbi);
+
+ if (&ovfbi->cmap)
+ fb_dealloc_cmap(&ovfbi->cmap);
+ framebuffer_release(ovfbi);
+}
+
+static bool ipu_usage[2][2];
+static int ipu_test_set_usage(int ipu, int di)
+{
+ if (ipu_usage[ipu][di])
+ return -EBUSY;
+ else
+ ipu_usage[ipu][di] = true;
+ return 0;
+}
+
+static void ipu_clear_usage(int ipu, int di)
+{
+ ipu_usage[ipu][di] = false;
+}
+
+static int mxcfb_get_of_property(struct platform_device *pdev,
+ struct ipuv3_fb_platform_data *plat_data)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const char *disp_dev;
+ const char *mode_str = NULL;
+ const char *pixfmt;
+ int err;
+ int len;
+ u32 bpp, int_clk;
+ u32 late_init;
+
+ err = of_property_read_string(np, "disp_dev", &disp_dev);
+ if (err < 0) {
+ dev_dbg(&pdev->dev, "get of property disp_dev fail\n");
+ return err;
+ }
+ err = of_property_read_string(np, "mode_str", &mode_str);
+ if (err < 0)
+ dev_dbg(&pdev->dev, "get of property mode_str fail\n");
+ err = of_property_read_string(np, "interface_pix_fmt", &pixfmt);
+ if (err) {
+ dev_dbg(&pdev->dev, "get of property pix fmt fail\n");
+ return err;
+ }
+ err = of_property_read_u32(np, "default_bpp", &bpp);
+ if (err) {
+ dev_dbg(&pdev->dev, "get of property bpp fail\n");
+ return err;
+ }
+ err = of_property_read_u32(np, "int_clk", &int_clk);
+ if (err) {
+ dev_dbg(&pdev->dev, "get of property int_clk fail\n");
+ return err;
+ }
+ err = of_property_read_u32(np, "late_init", &late_init);
+ if (err) {
+ dev_dbg(&pdev->dev, "get of property late_init fail\n");
+ return err;
+ }
+
+ plat_data->prefetch = of_property_read_bool(np, "prefetch");
+
+ if (!strncmp(pixfmt, "RGB24", 5))
+ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB24;
+ else if (!strncmp(pixfmt, "BGR24", 5))
+ plat_data->interface_pix_fmt = IPU_PIX_FMT_BGR24;
+ else if (!strncmp(pixfmt, "GBR24", 5))
+ plat_data->interface_pix_fmt = IPU_PIX_FMT_GBR24;
+ else if (!strncmp(pixfmt, "RGB565", 6))
+ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB565;
+ else if (!strncmp(pixfmt, "RGB666", 6))
+ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB666;
+ else if (!strncmp(pixfmt, "YUV444", 6))
+ plat_data->interface_pix_fmt = IPU_PIX_FMT_YUV444;
+ else if (!strncmp(pixfmt, "LVDS666", 7))
+ plat_data->interface_pix_fmt = IPU_PIX_FMT_LVDS666;
+ else if (!strncmp(pixfmt, "YUYV16", 6))
+ plat_data->interface_pix_fmt = IPU_PIX_FMT_YUYV;
+ else if (!strncmp(pixfmt, "UYVY16", 6))
+ plat_data->interface_pix_fmt = IPU_PIX_FMT_UYVY;
+ else if (!strncmp(pixfmt, "YVYU16", 6))
+ plat_data->interface_pix_fmt = IPU_PIX_FMT_YVYU;
+ else if (!strncmp(pixfmt, "VYUY16", 6))
+ plat_data->interface_pix_fmt = IPU_PIX_FMT_VYUY;
+ else {
+ dev_err(&pdev->dev, "err interface_pix_fmt!\n");
+ return -ENOENT;
+ }
+
+ len = min(sizeof(plat_data->disp_dev) - 1, strlen(disp_dev));
+ memcpy(plat_data->disp_dev, disp_dev, len);
+ plat_data->disp_dev[len] = '\0';
+ plat_data->mode_str = (char *)mode_str;
+ plat_data->default_bpp = bpp;
+ plat_data->int_clk = (bool)int_clk;
+ plat_data->late_init = (bool)late_init;
+ return err;
+}
+
+/*!
+ * Probe routine for the framebuffer driver. It is called during the
+ * driver binding process. The following functions are performed in
+ * this routine: Framebuffer initialization, Memory allocation and
+ * mapping, Framebuffer registration, IPU initialization.
+ *
+ * @return Appropriate error code to the kernel common code
+ */
+static int mxcfb_probe(struct platform_device *pdev)
+{
+ struct ipuv3_fb_platform_data *plat_data;
+ struct fb_info *fbi;
+ struct mxcfb_info *mxcfbi;
+ struct resource *res;
+ int ret = 0;
+
+ dev_dbg(&pdev->dev, "%s enter\n", __func__);
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "mxcfb");
+ if (pdev->id < 0) {
+ dev_err(&pdev->dev, "can not get alias id\n");
+ return pdev->id;
+ }
+
+ plat_data = devm_kzalloc(&pdev->dev, sizeof(struct
+ ipuv3_fb_platform_data), GFP_KERNEL);
+ if (!plat_data)
+ return -ENOMEM;
+ pdev->dev.platform_data = plat_data;
+
+ ret = mxcfb_get_of_property(pdev, plat_data);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "get mxcfb of property fail\n");
+ return ret;
+ }
+
+ /* Initialize FB structures */
+ fbi = mxcfb_init_fbinfo(&pdev->dev, &mxcfb_ops);
+ if (!fbi) {
+ ret = -ENOMEM;
+ goto init_fbinfo_failed;
+ }
+
+ ret = mxcfb_option_setup(pdev, fbi);
+ if (ret)
+ goto get_fb_option_failed;
+
+ mxcfbi = (struct mxcfb_info *)fbi->par;
+ mxcfbi->ipu_int_clk = plat_data->int_clk;
+ mxcfbi->late_init = plat_data->late_init;
+ mxcfbi->first_set_par = true;
+ mxcfbi->prefetch = plat_data->prefetch;
+ mxcfbi->pre_num = -1;
+
+ ret = mxcfb_dispdrv_init(pdev, fbi);
+ if (ret < 0)
+ goto init_dispdrv_failed;
+
+ ret = ipu_test_set_usage(mxcfbi->ipu_id, mxcfbi->ipu_di);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "ipu%d-di%d already in use\n",
+ mxcfbi->ipu_id, mxcfbi->ipu_di);
+ goto ipu_in_busy;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res && res->start && res->end) {
+ fbi->fix.smem_len = res->end - res->start + 1;
+ fbi->fix.smem_start = res->start;
+ fbi->screen_base = ioremap(fbi->fix.smem_start, fbi->fix.smem_len);
+ /* Do not clear the fb content drawn in bootloader. */
+ if (!mxcfbi->late_init)
+ memset(fbi->screen_base, 0, fbi->fix.smem_len);
+ }
+
+ mxcfbi->ipu = ipu_get_soc(mxcfbi->ipu_id);
+ if (IS_ERR(mxcfbi->ipu)) {
+ ret = -ENODEV;
+ goto get_ipu_failed;
+ }
+
+ /* first user uses DP with alpha feature */
+ if (!g_dp_in_use[mxcfbi->ipu_id]) {
+ mxcfbi->ipu_ch_irq = IPU_IRQ_BG_SYNC_EOF;
+ mxcfbi->ipu_ch_nf_irq = IPU_IRQ_BG_SYNC_NFACK;
+ mxcfbi->ipu_alp_ch_irq = IPU_IRQ_BG_ALPHA_SYNC_EOF;
+ mxcfbi->ipu_ch = MEM_BG_SYNC;
+ /* Unblank the primary fb only by default */
+ if (pdev->id == 0)
+ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_UNBLANK;
+ else
+ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_POWERDOWN;
+
+ ret = mxcfb_register(fbi);
+ if (ret < 0)
+ goto mxcfb_register_failed;
+
+ ipu_disp_set_global_alpha(mxcfbi->ipu, mxcfbi->ipu_ch,
+ true, 0x80);
+ ipu_disp_set_color_key(mxcfbi->ipu, mxcfbi->ipu_ch, false, 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ret = mxcfb_setup_overlay(pdev, fbi, res);
+
+ if (ret < 0) {
+ mxcfb_unregister(fbi);
+ goto mxcfb_setupoverlay_failed;
+ }
+
+ g_dp_in_use[mxcfbi->ipu_id] = true;
+
+ ret = device_create_file(mxcfbi->ovfbi->dev,
+ &dev_attr_fsl_disp_property);
+ if (ret)
+ dev_err(mxcfbi->ovfbi->dev, "Error %d on creating "
+ "file for disp property\n",
+ ret);
+
+ ret = device_create_file(mxcfbi->ovfbi->dev,
+ &dev_attr_fsl_disp_dev_property);
+ if (ret)
+ dev_err(mxcfbi->ovfbi->dev, "Error %d on creating "
+ "file for disp device "
+ "propety\n", ret);
+ } else {
+ mxcfbi->ipu_ch_irq = IPU_IRQ_DC_SYNC_EOF;
+ mxcfbi->ipu_ch_nf_irq = IPU_IRQ_DC_SYNC_NFACK;
+ mxcfbi->ipu_alp_ch_irq = -1;
+ mxcfbi->ipu_ch = MEM_DC_SYNC;
+ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_POWERDOWN;
+
+ ret = mxcfb_register(fbi);
+ if (ret < 0)
+ goto mxcfb_register_failed;
+ }
+
+ platform_set_drvdata(pdev, fbi);
+
+ ret = device_create_file(fbi->dev, &dev_attr_fsl_disp_property);
+ if (ret)
+ dev_err(&pdev->dev, "Error %d on creating file for disp "
+ "property\n", ret);
+
+ ret = device_create_file(fbi->dev, &dev_attr_fsl_disp_dev_property);
+ if (ret)
+ dev_err(&pdev->dev, "Error %d on creating file for disp "
+ " device propety\n", ret);
+
+ return 0;
+
+mxcfb_setupoverlay_failed:
+mxcfb_register_failed:
+get_ipu_failed:
+ ipu_clear_usage(mxcfbi->ipu_id, mxcfbi->ipu_di);
+ipu_in_busy:
+init_dispdrv_failed:
+ fb_dealloc_cmap(&fbi->cmap);
+ framebuffer_release(fbi);
+get_fb_option_failed:
+init_fbinfo_failed:
+ return ret;
+}
+
+static int mxcfb_remove(struct platform_device *pdev)
+{
+ struct fb_info *fbi = platform_get_drvdata(pdev);
+ struct mxcfb_info *mxc_fbi = fbi->par;
+
+ if (!fbi)
+ return 0;
+
+ device_remove_file(fbi->dev, &dev_attr_fsl_disp_dev_property);
+ device_remove_file(fbi->dev, &dev_attr_fsl_disp_property);
+ mxcfb_blank(FB_BLANK_POWERDOWN, fbi);
+ mxcfb_unregister(fbi);
+ mxcfb_unmap_video_memory(fbi);
+
+ if (mxc_fbi->ovfbi) {
+ device_remove_file(mxc_fbi->ovfbi->dev,
+ &dev_attr_fsl_disp_dev_property);
+ device_remove_file(mxc_fbi->ovfbi->dev,
+ &dev_attr_fsl_disp_property);
+ mxcfb_blank(FB_BLANK_POWERDOWN, mxc_fbi->ovfbi);
+ mxcfb_unsetup_overlay(fbi);
+ mxcfb_unmap_video_memory(mxc_fbi->ovfbi);
+ }
+
+ ipu_clear_usage(mxc_fbi->ipu_id, mxc_fbi->ipu_di);
+ if (&fbi->cmap)
+ fb_dealloc_cmap(&fbi->cmap);
+ framebuffer_release(fbi);
+ return 0;
+}
+
+static const struct of_device_id imx_mxcfb_dt_ids[] = {
+ { .compatible = "fsl,mxc_sdc_fb"},
+ { /* sentinel */ }
+};
+
+/*!
+ * This structure contains pointers to the power management callback functions.
+ */
+static struct platform_driver mxcfb_driver = {
+ .driver = {
+ .name = MXCFB_NAME,
+ .of_match_table = imx_mxcfb_dt_ids,
+ },
+ .probe = mxcfb_probe,
+ .remove = mxcfb_remove,
+ .suspend = mxcfb_suspend,
+ .resume = mxcfb_resume,
+};
+
+/*!
+ * Main entry function for the framebuffer. The function registers the power
+ * management callback functions with the kernel and also registers the MXCFB
+ * callback functions with the core Linux framebuffer driver \b fbmem.c
+ *
+ * @return Error code indicating success or failure
+ */
+int __init mxcfb_init(void)
+{
+ return platform_driver_register(&mxcfb_driver);
+}
+
+void mxcfb_exit(void)
+{
+ platform_driver_unregister(&mxcfb_driver);
+}
+
+module_init(mxcfb_init);
+module_exit(mxcfb_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MXC framebuffer driver");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("fb");
diff --git a/drivers/video/fbdev/mxc/mxc_lcdif.c b/drivers/video/fbdev/mxc/mxc_lcdif.c
new file mode 100644
index 000000000000..59d429c82017
--- /dev/null
+++ b/drivers/video/fbdev/mxc/mxc_lcdif.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2011-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/ipu.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mxcfb.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+
+#include "mxc_dispdrv.h"
+
+struct mxc_lcd_platform_data {
+ u32 default_ifmt;
+ u32 ipu_id;
+ u32 disp_id;
+};
+
+struct mxc_lcdif_data {
+ struct platform_device *pdev;
+ struct mxc_dispdrv_handle *disp_lcdif;
+};
+
+#define DISPDRV_LCD "lcd"
+
+static struct fb_videomode lcdif_modedb[] = {
+ {
+ /* 800x480 @ 57 Hz , pixel clk @ 27MHz */
+ "CLAA-WVGA", 57, 800, 480, 37037, 40, 60, 10, 10, 20, 10,
+ FB_SYNC_CLK_LAT_FALL,
+ FB_VMODE_NONINTERLACED,
+ 0,},
+ {
+ /* 800x480 @ 60 Hz , pixel clk @ 32MHz */
+ "SEIKO-WVGA", 60, 800, 480, 29850, 89, 164, 23, 10, 10, 10,
+ FB_SYNC_CLK_LAT_FALL,
+ FB_VMODE_NONINTERLACED,
+ 0,},
+};
+static int lcdif_modedb_sz = ARRAY_SIZE(lcdif_modedb);
+
+static int lcdif_init(struct mxc_dispdrv_handle *disp,
+ struct mxc_dispdrv_setting *setting)
+{
+ int ret, i;
+ struct mxc_lcdif_data *lcdif = mxc_dispdrv_getdata(disp);
+ struct device *dev = &lcdif->pdev->dev;
+ struct mxc_lcd_platform_data *plat_data = dev->platform_data;
+ struct fb_videomode *modedb = lcdif_modedb;
+ int modedb_sz = lcdif_modedb_sz;
+
+ /* use platform defined ipu/di */
+ ret = ipu_di_to_crtc(dev, plat_data->ipu_id,
+ plat_data->disp_id, &setting->crtc);
+ if (ret < 0)
+ return ret;
+
+ ret = fb_find_mode(&setting->fbi->var, setting->fbi, setting->dft_mode_str,
+ modedb, modedb_sz, NULL, setting->default_bpp);
+ if (!ret) {
+ fb_videomode_to_var(&setting->fbi->var, &modedb[0]);
+ setting->if_fmt = plat_data->default_ifmt;
+ }
+
+ INIT_LIST_HEAD(&setting->fbi->modelist);
+ for (i = 0; i < modedb_sz; i++) {
+ struct fb_videomode m;
+ fb_var_to_videomode(&m, &setting->fbi->var);
+ if (fb_mode_is_equal(&m, &modedb[i])) {
+ fb_add_videomode(&modedb[i],
+ &setting->fbi->modelist);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void lcdif_deinit(struct mxc_dispdrv_handle *disp)
+{
+ /*TODO*/
+}
+
+static struct mxc_dispdrv_driver lcdif_drv = {
+ .name = DISPDRV_LCD,
+ .init = lcdif_init,
+ .deinit = lcdif_deinit,
+};
+
+static int lcd_get_of_property(struct platform_device *pdev,
+ struct mxc_lcd_platform_data *plat_data)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int err;
+ u32 ipu_id, disp_id;
+ const char *default_ifmt;
+
+ err = of_property_read_string(np, "default_ifmt", &default_ifmt);
+ if (err) {
+ dev_dbg(&pdev->dev, "get of property default_ifmt fail\n");
+ return err;
+ }
+ err = of_property_read_u32(np, "ipu_id", &ipu_id);
+ if (err) {
+ dev_dbg(&pdev->dev, "get of property ipu_id fail\n");
+ return err;
+ }
+ err = of_property_read_u32(np, "disp_id", &disp_id);
+ if (err) {
+ dev_dbg(&pdev->dev, "get of property disp_id fail\n");
+ return err;
+ }
+
+ plat_data->ipu_id = ipu_id;
+ plat_data->disp_id = disp_id;
+ if (!strncmp(default_ifmt, "RGB24", 5))
+ plat_data->default_ifmt = IPU_PIX_FMT_RGB24;
+ else if (!strncmp(default_ifmt, "BGR24", 5))
+ plat_data->default_ifmt = IPU_PIX_FMT_BGR24;
+ else if (!strncmp(default_ifmt, "GBR24", 5))
+ plat_data->default_ifmt = IPU_PIX_FMT_GBR24;
+ else if (!strncmp(default_ifmt, "RGB565", 6))
+ plat_data->default_ifmt = IPU_PIX_FMT_RGB565;
+ else if (!strncmp(default_ifmt, "RGB666", 6))
+ plat_data->default_ifmt = IPU_PIX_FMT_RGB666;
+ else if (!strncmp(default_ifmt, "YUV444", 6))
+ plat_data->default_ifmt = IPU_PIX_FMT_YUV444;
+ else if (!strncmp(default_ifmt, "LVDS666", 7))
+ plat_data->default_ifmt = IPU_PIX_FMT_LVDS666;
+ else if (!strncmp(default_ifmt, "YUYV16", 6))
+ plat_data->default_ifmt = IPU_PIX_FMT_YUYV;
+ else if (!strncmp(default_ifmt, "UYVY16", 6))
+ plat_data->default_ifmt = IPU_PIX_FMT_UYVY;
+ else if (!strncmp(default_ifmt, "YVYU16", 6))
+ plat_data->default_ifmt = IPU_PIX_FMT_YVYU;
+ else if (!strncmp(default_ifmt, "VYUY16", 6))
+ plat_data->default_ifmt = IPU_PIX_FMT_VYUY;
+ else {
+ dev_err(&pdev->dev, "err default_ifmt!\n");
+ return -ENOENT;
+ }
+
+ return err;
+}
+
+static int mxc_lcdif_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct pinctrl *pinctrl;
+ struct mxc_lcdif_data *lcdif;
+ struct mxc_lcd_platform_data *plat_data;
+
+ dev_dbg(&pdev->dev, "%s enter\n", __func__);
+ lcdif = devm_kzalloc(&pdev->dev, sizeof(struct mxc_lcdif_data),
+ GFP_KERNEL);
+ if (!lcdif)
+ return -ENOMEM;
+ plat_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct mxc_lcd_platform_data),
+ GFP_KERNEL);
+ if (!plat_data)
+ return -ENOMEM;
+ pdev->dev.platform_data = plat_data;
+
+ ret = lcd_get_of_property(pdev, plat_data);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "get lcd of property fail\n");
+ return ret;
+ }
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ dev_err(&pdev->dev, "can't get/select pinctrl\n");
+ return PTR_ERR(pinctrl);
+ }
+
+ lcdif->pdev = pdev;
+ lcdif->disp_lcdif = mxc_dispdrv_register(&lcdif_drv);
+ mxc_dispdrv_setdata(lcdif->disp_lcdif, lcdif);
+
+ dev_set_drvdata(&pdev->dev, lcdif);
+ dev_dbg(&pdev->dev, "%s exit\n", __func__);
+
+ return ret;
+}
+
+static int mxc_lcdif_remove(struct platform_device *pdev)
+{
+ struct mxc_lcdif_data *lcdif = dev_get_drvdata(&pdev->dev);
+
+ mxc_dispdrv_puthandle(lcdif->disp_lcdif);
+ mxc_dispdrv_unregister(lcdif->disp_lcdif);
+ kfree(lcdif);
+ return 0;
+}
+
+static const struct of_device_id imx_lcd_dt_ids[] = {
+ { .compatible = "fsl,lcd"},
+ { /* sentinel */ }
+};
+static struct platform_driver mxc_lcdif_driver = {
+ .driver = {
+ .name = "mxc_lcdif",
+ .of_match_table = imx_lcd_dt_ids,
+ },
+ .probe = mxc_lcdif_probe,
+ .remove = mxc_lcdif_remove,
+};
+
+static int __init mxc_lcdif_init(void)
+{
+ return platform_driver_register(&mxc_lcdif_driver);
+}
+
+static void __exit mxc_lcdif_exit(void)
+{
+ platform_driver_unregister(&mxc_lcdif_driver);
+}
+
+module_init(mxc_lcdif_init);
+module_exit(mxc_lcdif_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("i.MX ipuv3 LCD extern port driver");
+MODULE_LICENSE("GPL");