summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/busfreq-imx.h77
-rw-r--r--include/linux/can/rx-offload.h7
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/device_cooling.h45
-rw-r--r--include/linux/dmaengine.h4
-rw-r--r--include/linux/fb.h14
-rw-r--r--include/linux/firmware/imx/dsp.h10
-rw-r--r--include/linux/firmware/imx/ipc.h29
-rw-r--r--include/linux/firmware/imx/sci.h9
-rw-r--r--include/linux/firmware/imx/seco_mu_ioctl.h50
-rw-r--r--include/linux/firmware/imx/svc/misc.h16
-rw-r--r--include/linux/firmware/imx/svc/rm.h101
-rw-r--r--include/linux/firmware/imx/svc/seco.h77
-rw-r--r--include/linux/firmware/imx/types.h65
-rw-r--r--include/linux/fsl/enetc_mdio.h55
-rw-r--r--include/linux/fsl/mc.h496
-rw-r--r--include/linux/fsl/svr.h97
-rw-r--r--include/linux/fsl_bman.h532
-rw-r--r--include/linux/fsl_ifc.h5
-rw-r--r--include/linux/fsl_qman.h3910
-rw-r--r--include/linux/fsl_usdpaa.h404
-rw-r--r--include/linux/hantrodec.h29
-rwxr-xr-xinclude/linux/hx280enc.h31
-rw-r--r--include/linux/imx_mic_epf.h32
-rw-r--r--include/linux/imx_rpmsg.h43
-rw-r--r--include/linux/imx_sema4.h83
-rw-r--r--include/linux/ipu-v3-pre.h155
-rw-r--r--include/linux/ipu-v3-prg.h61
-rw-r--r--include/linux/ipu-v3.h770
-rw-r--r--include/linux/ipu.h38
-rw-r--r--include/linux/ivshmem.h30
-rw-r--r--include/linux/mfd/max17135.h207
-rw-r--r--include/linux/mfd/mxc-hdmi-core.h56
-rw-r--r--include/linux/mfd/pca9450.h355
-rw-r--r--include/linux/mfd/si476x-core.h2
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h50
-rw-r--r--include/linux/mfd/syscon/imx7-iomuxc-gpr.h2
-rw-r--r--include/linux/mii.h50
-rw-r--r--include/linux/mipi_csi2.h81
-rw-r--r--include/linux/mipi_dsi.h165
-rw-r--r--include/linux/mipi_dsi_northwest.h164
-rw-r--r--include/linux/mipi_dsi_samsung.h131
-rw-r--r--include/linux/mmc/pm.h1
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mod_devicetable.h15
-rw-r--r--include/linux/mx8_mu.h48
-rw-r--r--include/linux/mxc_dcic.h126
-rw-r--r--include/linux/mxc_mlb.h55
-rw-r--r--include/linux/mxc_sim_interface.h124
-rw-r--r--include/linux/mxc_vpu.h118
-rw-r--r--include/linux/mxcfb_epdc.h32
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/of.h6
-rw-r--r--include/linux/of_fdt.h11
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/phy/phy-mixel-lvds-combo.h40
-rw-r--r--include/linux/phy/phy-mixel-lvds.h39
-rw-r--r--include/linux/phylink.h2
-rw-r--r--include/linux/platform_data/brcmfmac.h1
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h8
-rw-r--r--include/linux/platform_data/dma-imx.h13
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/pm_domain.h1
-rw-r--r--include/linux/pm_wakeup.h9
-rw-r--r--include/linux/pmic_status.h76
-rw-r--r--include/linux/power/sabresd_battery.h65
-rw-r--r--include/linux/pwm_backlight.h1
-rw-r--r--include/linux/pxp_device.h56
-rw-r--r--include/linux/pxp_dma.h70
-rw-r--r--include/linux/regulator/fixed.h1
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/stmmac.h13
-rw-r--r--include/linux/tee_drv.h19
-rw-r--r--include/linux/uio_driver.h2
-rw-r--r--include/linux/usb/chipidea.h6
-rw-r--r--include/linux/usb/hcd.h13
-rw-r--r--include/linux/usb/otg-fsm.h1
-rw-r--r--include/linux/usb/phy.h43
81 files changed, 9473 insertions, 95 deletions
diff --git a/include/linux/busfreq-imx.h b/include/linux/busfreq-imx.h
new file mode 100644
index 000000000000..39c71a9f55eb
--- /dev/null
+++ b/include/linux/busfreq-imx.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012-2016 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARCH_MXC_BUSFREQ_H__
+#define __ASM_ARCH_MXC_BUSFREQ_H__
+
+#include <linux/notifier.h>
+#include <linux/regulator/consumer.h>
+
+/*
+ * This enumerates busfreq low power mode entry and exit.
+ */
+enum busfreq_event {
+ LOW_BUSFREQ_ENTER,
+ LOW_BUSFREQ_EXIT,
+};
+
+/*
+ * This enumerates the system bus and ddr frequencies in various modes.
+ * BUS_FREQ_HIGH - DDR @ 528MHz, AHB @ 132MHz.
+ * BUS_FREQ_MED - DDR @ 400MHz, AHB @ 132MHz
+ * BUS_FREQ_AUDIO - DDR @ 50MHz/100MHz, AHB @ 24MHz.
+ * BUS_FREQ_LOW - DDR @ 24MHz, AHB @ 24MHz.
+ * BUS_FREQ_ULTRA_LOW - DDR @ 1MHz, AHB - 3MHz.
+ *
+ * Drivers need to request/release the bus/ddr frequencies based on
+ * their performance requirements. Drivers cannot request/release
+ * BUS_FREQ_ULTRA_LOW mode as this mode is automatically entered from
+ * either BUS_FREQ_AUDIO or BUS_FREQ_LOW
+ * modes.
+ */
+enum bus_freq_mode {
+ BUS_FREQ_HIGH,
+ BUS_FREQ_MED,
+ BUS_FREQ_AUDIO,
+ BUS_FREQ_LOW,
+ BUS_FREQ_ULTRA_LOW,
+};
+
+#if defined(CONFIG_HAVE_IMX_BUSFREQ) && !defined(CONFIG_ARM64)
+extern struct regulator *arm_reg;
+extern struct regulator *soc_reg;
+void request_bus_freq(enum bus_freq_mode mode);
+void release_bus_freq(enum bus_freq_mode mode);
+int register_busfreq_notifier(struct notifier_block *nb);
+int unregister_busfreq_notifier(struct notifier_block *nb);
+int get_bus_freq_mode(void);
+#elif defined(CONFIG_HAVE_IMX_BUSFREQ)
+void request_bus_freq(enum bus_freq_mode mode);
+void release_bus_freq(enum bus_freq_mode mode);
+int get_bus_freq_mode(void);
+#else
+static inline void request_bus_freq(enum bus_freq_mode mode)
+{
+}
+static inline void release_bus_freq(enum bus_freq_mode mode)
+{
+}
+static inline int register_busfreq_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline int unregister_busfreq_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline int get_bus_freq_mode(void)
+{
+ return BUS_FREQ_HIGH;
+}
+#endif
+#endif
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index 01219f2902bf..1b78a0cfb615 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -15,9 +15,9 @@
struct can_rx_offload {
struct net_device *dev;
- unsigned int (*mailbox_read)(struct can_rx_offload *offload,
- struct can_frame *cf,
- u32 *timestamp, unsigned int mb);
+ struct sk_buff *(*mailbox_read)(struct can_rx_offload *offload,
+ unsigned int mb, u32 *timestamp,
+ bool drop);
struct sk_buff_head skb_queue;
u32 skb_queue_len_max;
@@ -44,7 +44,6 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
-void can_rx_offload_reset(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 2fdfe8061363..30b61ec0e167 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -32,6 +32,7 @@
#define CLK_OPS_PARENT_ENABLE BIT(12)
/* duty cycle call may be forwarded to the parent clock */
#define CLK_DUTY_CYCLE_PARENT BIT(13)
+#define CLK_SET_PARENT_NOCACHE BIT(14) /* do not use the cached clk parent */
struct clk;
struct clk_hw;
diff --git a/include/linux/device_cooling.h b/include/linux/device_cooling.h
new file mode 100644
index 000000000000..18d9e3b90ed9
--- /dev/null
+++ b/include/linux/device_cooling.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013-2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DEVICE_THERMAL_H__
+#define __DEVICE_THERMAL_H__
+
+#include <linux/thermal.h>
+
+#ifdef CONFIG_DEVICE_THERMAL
+int register_devfreq_cooling_notifier(struct notifier_block *nb);
+int unregister_devfreq_cooling_notifier(struct notifier_block *nb);
+struct thermal_cooling_device *devfreq_cooling_register(void);
+void devfreq_cooling_unregister(struct thermal_cooling_device *cdev);
+#else
+static inline
+int register_devfreq_cooling_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline
+int unregister_devfreq_cooling_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline
+struct thermal_cooling_device *devfreq_cooling_register(void)
+{
+ return NULL;
+}
+
+static inline
+void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
+{
+ return;
+}
+#endif
+#endif /* __DEVICE_THERMAL_H__ */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8013562751a5..98d6549839b7 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -333,6 +333,8 @@ enum dma_slave_buswidth {
* loops in this area in order to transfer the data.
* @dst_port_window_size: same as src_port_window_size but for the destination
* port.
+ * @src_fifo_num: bit 0-7 is the fifo number, bit:8-11 is the fifo offset;
+ * @dst_fifo_num: same as src_fifo_num
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime.
@@ -362,6 +364,8 @@ struct dma_slave_config {
u32 dst_maxburst;
u32 src_port_window_size;
u32 dst_port_window_size;
+ u32 src_fifo_num;
+ u32 dst_fifo_num;
bool device_fc;
unsigned int slave_id;
};
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 8221838fefd9..71a62c29aa61 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -127,7 +127,19 @@ struct fb_cursor_user {
/* The resolution of the passed in fb_info about to change */
#define FB_EVENT_MODE_CHANGE 0x01
-#ifdef CONFIG_GUMSTIX_AM200EPD
+#ifdef CONFIG_FB_MXC_HDMI
+/* only used by mxc_hdmi.c */
+/* The display on this fb_info is being suspended, no access to the
+ * framebuffer is allowed any more after that call returns
+ */
+#define FB_EVENT_SUSPEND 0x02
+/* The display on this fb_info was resumed, you can restore the display
+ * if you own it
+ */
+#define FB_EVENT_RESUME 0x03
+#endif
+
+#if (defined CONFIG_GUMSTIX_AM200EPD) || (defined CONFIG_FB_MXC_HDMI) || (defined CONFIG_FB_MXS_SII902X)
/* only used by mach-pxa/am200epd.c */
#define FB_EVENT_FB_REGISTERED 0x05
#define FB_EVENT_FB_UNREGISTERED 0x06
diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h
index 7562099c9e46..d3936d4c4199 100644
--- a/include/linux/firmware/imx/dsp.h
+++ b/include/linux/firmware/imx/dsp.h
@@ -55,6 +55,9 @@ static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc)
int imx_dsp_ring_doorbell(struct imx_dsp_ipc *dsp, unsigned int chan_idx);
+struct mbox_chan* imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx);
+void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx);
+
#else
static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc,
@@ -63,5 +66,12 @@ static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc,
return -ENOTSUPP;
}
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx) { }
+
#endif
#endif /* _IMX_DSP_IPC_H */
diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h
index 6312c8cb084a..a0491a9c0b7a 100644
--- a/include/linux/firmware/imx/ipc.h
+++ b/include/linux/firmware/imx/ipc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright 2018 NXP
+ * Copyright 2018,2020 NXP
*
* Header file for the IPC implementation.
*/
@@ -9,7 +9,6 @@
#define _SC_IPC_H
#include <linux/device.h>
-#include <linux/types.h>
#define IMX_SC_RPC_VERSION 1
#define IMX_SC_RPC_MAX_MSG 8
@@ -25,7 +24,8 @@ enum imx_sc_rpc_svc {
IMX_SC_RPC_SVC_PAD = 6,
IMX_SC_RPC_SVC_MISC = 7,
IMX_SC_RPC_SVC_IRQ = 8,
- IMX_SC_RPC_SVC_ABORT = 9
+ IMX_SC_RPC_SVC_SECO = 9,
+ IMX_SC_RPC_SVC_ABORT = 10,
};
struct imx_sc_rpc_msg {
@@ -35,6 +35,7 @@ struct imx_sc_rpc_msg {
uint8_t func;
};
+#if IS_ENABLED(CONFIG_IMX_SCU)
/*
* This is an function to send an RPC message over an IPC channel.
* It is called by client-side SCFW API function shims.
@@ -47,6 +48,8 @@ struct imx_sc_rpc_msg {
* and returns the result in msg.
*/
int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp);
+int imx_scu_call_big_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp);
+
/*
* This function gets the default ipc handle used by SCU
@@ -56,4 +59,24 @@ int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp);
* @return Returns an error code (0 = success, failed if < 0)
*/
int imx_scu_get_handle(struct imx_sc_ipc **ipc);
+#else
+static inline int
+imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp)
+{
+ return -EIO;
+
+}
+
+static inline int
+imx_scu_call_big_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp)
+{
+ return -EIO;
+
+}
+
+static inline int imx_scu_get_handle(struct imx_sc_ipc **ipc)
+{
+ return -EIO;
+}
+#endif
#endif /* _SC_IPC_H */
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 17ba4e405129..e749594436bb 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Copyright 2017~2018 NXP
+ * Copyright 2017~2018,2020 NXP
*
* Header file containing the public System Controller Interface (SCI)
* definitions.
@@ -11,13 +11,18 @@
#define _SC_SCI_H
#include <linux/firmware/imx/ipc.h>
-#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/svc/misc.h>
#include <linux/firmware/imx/svc/pm.h>
+#include <linux/firmware/imx/svc/rm.h>
+#include <linux/firmware/imx/svc/seco.h>
+
+#define IMX_SC_IRQ_GROUP_WAKE 3U /* Wakeup interrupts */
+#define IMX_SC_IRQ_SECVIO BIT(6) /* Security violation */
int imx_scu_enable_general_irq_channel(struct device *dev);
int imx_scu_irq_register_notifier(struct notifier_block *nb);
int imx_scu_irq_unregister_notifier(struct notifier_block *nb);
int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable);
+int imx_scu_irq_get_status(u8 group, u32 *irq_status);
#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/imx/seco_mu_ioctl.h b/include/linux/firmware/imx/seco_mu_ioctl.h
new file mode 100644
index 000000000000..bd8402b473a4
--- /dev/null
+++ b/include/linux/firmware/imx/seco_mu_ioctl.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause*/
+/*
+ * Copyright 2019-2020 NXP
+ */
+
+#ifndef SECO_MU_IOCTL_H
+#define SECO_MU_IOCTL_H
+
+/* IOCTL definitions. */
+struct seco_mu_ioctl_setup_iobuf {
+ u8 *user_buf;
+ u32 length;
+ u32 flags;
+ u64 seco_addr;
+};
+
+struct seco_mu_ioctl_shared_mem_cfg {
+ u32 base_offset;
+ u32 size;
+};
+
+struct seco_mu_ioctl_get_mu_info {
+ u8 seco_mu_idx;
+ u8 interrupt_idx;
+ u8 tz;
+ u8 did;
+};
+
+struct seco_mu_ioctl_signed_message {
+ u8 *message;
+ u32 msg_size;
+ u32 error_code;
+};
+
+#define SECO_MU_IO_FLAGS_IS_INPUT (0x01u)
+#define SECO_MU_IO_FLAGS_USE_SEC_MEM (0x02u)
+#define SECO_MU_IO_FLAGS_USE_SHORT_ADDR (0x04u)
+
+#define SECO_MU_IOCTL 0x0A /* like MISC_MAJOR. */
+#define SECO_MU_IOCTL_ENABLE_CMD_RCV _IO(SECO_MU_IOCTL, 0x01)
+#define SECO_MU_IOCTL_SHARED_BUF_CFG _IOW(SECO_MU_IOCTL, 0x02, \
+ struct seco_mu_ioctl_shared_mem_cfg)
+#define SECO_MU_IOCTL_SETUP_IOBUF _IOWR(SECO_MU_IOCTL, 0x03, \
+ struct seco_mu_ioctl_setup_iobuf)
+#define SECO_MU_IOCTL_GET_MU_INFO _IOR(SECO_MU_IOCTL, 0x04, \
+ struct seco_mu_ioctl_get_mu_info)
+#define SECO_MU_IOCTL_SIGNED_MESSAGE _IOWR(SECO_MU_IOCTL, 0x05, \
+ struct seco_mu_ioctl_signed_message)
+
+#endif
diff --git a/include/linux/firmware/imx/svc/misc.h b/include/linux/firmware/imx/svc/misc.h
index 031dd4d3c766..56d95a7f22fa 100644
--- a/include/linux/firmware/imx/svc/misc.h
+++ b/include/linux/firmware/imx/svc/misc.h
@@ -46,11 +46,27 @@ enum imx_misc_func {
* Control Functions
*/
+#if IS_ENABLED(CONFIG_IMX_SCU)
int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
u8 ctrl, u32 val);
int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
u8 ctrl, u32 *val);
+#else
+static inline int
+imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 val)
+{
+ return -EIO;
+}
+
+static inline int
+imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 *val)
+{
+ return -EIO;
+}
+#endif
int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
bool enable, u64 phys_addr);
diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h
new file mode 100644
index 000000000000..419d552843e6
--- /dev/null
+++ b/include/linux/firmware/imx/svc/rm.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2020 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Power Management (PM) function. This includes functions for power state
+ * control, clock control, reset control, and wake-up event control.
+ *
+ * RM_SVC (SVC) Resource Management Service
+ *
+ * Module for the Resource Management (RM) service.
+ */
+
+#ifndef _SC_RM_API_H
+#define _SC_RM_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC RM function calls.
+ */
+enum imx_sc_rm_func {
+ IMX_SC_RM_FUNC_UNKNOWN = 0,
+ IMX_SC_RM_FUNC_PARTITION_ALLOC = 1,
+ IMX_SC_RM_FUNC_SET_CONFIDENTIAL = 31,
+ IMX_SC_RM_FUNC_PARTITION_FREE = 2,
+ IMX_SC_RM_FUNC_GET_DID = 26,
+ IMX_SC_RM_FUNC_PARTITION_STATIC = 3,
+ IMX_SC_RM_FUNC_PARTITION_LOCK = 4,
+ IMX_SC_RM_FUNC_GET_PARTITION = 5,
+ IMX_SC_RM_FUNC_SET_PARENT = 6,
+ IMX_SC_RM_FUNC_MOVE_ALL = 7,
+ IMX_SC_RM_FUNC_ASSIGN_RESOURCE = 8,
+ IMX_SC_RM_FUNC_SET_RESOURCE_MOVABLE = 9,
+ IMX_SC_RM_FUNC_SET_SUBSYS_RSRC_MOVABLE = 28,
+ IMX_SC_RM_FUNC_SET_MASTER_ATTRIBUTES = 10,
+ IMX_SC_RM_FUNC_SET_MASTER_SID = 11,
+ IMX_SC_RM_FUNC_SET_PERIPHERAL_PERMISSIONS = 12,
+ IMX_SC_RM_FUNC_IS_RESOURCE_OWNED = 13,
+ IMX_SC_RM_FUNC_GET_RESOURCE_OWNER = 33,
+ IMX_SC_RM_FUNC_IS_RESOURCE_MASTER = 14,
+ IMX_SC_RM_FUNC_IS_RESOURCE_PERIPHERAL = 15,
+ IMX_SC_RM_FUNC_GET_RESOURCE_INFO = 16,
+ IMX_SC_RM_FUNC_MEMREG_ALLOC = 17,
+ IMX_SC_RM_FUNC_MEMREG_SPLIT = 29,
+ IMX_SC_RM_FUNC_MEMREG_FRAG = 32,
+ IMX_SC_RM_FUNC_MEMREG_FREE = 18,
+ IMX_SC_RM_FUNC_FIND_MEMREG = 30,
+ IMX_SC_RM_FUNC_ASSIGN_MEMREG = 19,
+ IMX_SC_RM_FUNC_SET_MEMREG_PERMISSIONS = 20,
+ IMX_SC_RM_FUNC_IS_MEMREG_OWNED = 21,
+ IMX_SC_RM_FUNC_GET_MEMREG_INFO = 22,
+ IMX_SC_RM_FUNC_ASSIGN_PAD = 23,
+ IMX_SC_RM_FUNC_SET_PAD_MOVABLE = 24,
+ IMX_SC_RM_FUNC_IS_PAD_OWNED = 25,
+ IMX_SC_RM_FUNC_DUMP = 27,
+};
+
+#if IS_ENABLED(CONFIG_IMX_SCU)
+bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource);
+int imx_sc_rm_find_memreg(struct imx_sc_ipc *ipc, u8 *mr, u64 addr_start,
+ u64 addr_end);
+int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt);
+int imx_sc_rm_set_memreg_permissions(struct imx_sc_ipc *ipc, u8 mr,
+ u8 pt, u8 perm);
+int imx_sc_rm_get_did(struct imx_sc_ipc *ipc, u8 *did);
+#else
+static inline bool
+imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
+{
+ return true;
+}
+
+static inline
+int imx_sc_rm_find_memreg(struct imx_sc_ipc *ipc, u8 *mr, u64 addr_start,
+ u64 addr_end)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+int imx_sc_rm_set_memreg_permissions(struct imx_sc_ipc *ipc, u8 mr,
+ u8 pt, u8 perm)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+int imx_sc_rm_get_did(struct imx_sc_ipc *ipc, u8 *did)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif
diff --git a/include/linux/firmware/imx/svc/seco.h b/include/linux/firmware/imx/svc/seco.h
new file mode 100644
index 000000000000..d0dd803a1a52
--- /dev/null
+++ b/include/linux/firmware/imx/svc/seco.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Security Controller (SECO) function.
+ *
+ * SECO_SVC (SVC) Security Controller Service
+ *
+ * Module for the Security Controller (SECO) service.
+ */
+
+#ifndef _SC_SECO_API_H
+#define _SC_SECO_API_H
+
+#include <linux/errno.h>
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC RM function calls.
+ */
+enum imx_sc_seco_func {
+ IMX_SC_SECO_FUNC_UNKNOWN = 0,
+ IMX_SC_SECO_FUNC_BUILD_INFO = 16,
+ IMX_SC_SECO_FUNC_SAB_MSG = 23,
+ IMX_SC_SECO_FUNC_SECVIO_ENABLE = 25,
+ IMX_SC_SECO_FUNC_SECVIO_CONFIG = 26,
+ IMX_SC_SECO_FUNC_SECVIO_DGO_CONFIG = 27,
+};
+
+#if IS_ENABLED(CONFIG_IMX_SCU)
+int imx_sc_seco_build_info(struct imx_sc_ipc *ipc, uint32_t *version,
+ uint32_t *commit);
+int imx_sc_seco_sab_msg(struct imx_sc_ipc *ipc, u64 smsg_addr);
+int imx_sc_seco_secvio_enable(struct imx_sc_ipc *ipc);
+int imx_sc_seco_secvio_config(struct imx_sc_ipc *ipc, u8 id, u8 access,
+ u32 *data0, u32 *data1, u32 *data2, u32 *data3,
+ u32 *data4, u8 size);
+int imx_sc_seco_secvio_dgo_config(struct imx_sc_ipc *ipc, u8 id, u8 access,
+ u32 *data);
+#else /* IS_ENABLED(CONFIG_IMX_SCU) */
+static inline
+int imx_sc_seco_build_info(struct imx_sc_ipc *ipc, uint32_t *version,
+ uint32_t *commit)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+int imx_sc_seco_sab_msg(struct imx_sc_ipc *ipc, u64 smsg_addr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+int imx_sc_seco_secvio_enable(struct imx_sc_ipc *ipc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+int imx_sc_seco_secvio_config(struct imx_sc_ipc *ipc, u8 id, u8 access,
+ u32 *data0, u32 *data1, u32 *data2, u32 *data3,
+ u32 *data4, u8 size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+int imx_sc_seco_secvio_dgo_config(struct imx_sc_ipc *ipc, u8 id, u8 access,
+ u32 *data)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* IS_ENABLED(CONFIG_IMX_SCU) */
+
+#endif /* _SC_SECO_API_H */
diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h
deleted file mode 100644
index 80821100e85f..000000000000
--- a/include/linux/firmware/imx/types.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Copyright 2017~2018 NXP
- *
- * Header file containing types used across multiple service APIs.
- */
-
-#ifndef _SC_TYPES_H
-#define _SC_TYPES_H
-
-/*
- * This type is used to indicate a control.
- */
-enum imx_sc_ctrl {
- IMX_SC_C_TEMP = 0,
- IMX_SC_C_TEMP_HI = 1,
- IMX_SC_C_TEMP_LOW = 2,
- IMX_SC_C_PXL_LINK_MST1_ADDR = 3,
- IMX_SC_C_PXL_LINK_MST2_ADDR = 4,
- IMX_SC_C_PXL_LINK_MST_ENB = 5,
- IMX_SC_C_PXL_LINK_MST1_ENB = 6,
- IMX_SC_C_PXL_LINK_MST2_ENB = 7,
- IMX_SC_C_PXL_LINK_SLV1_ADDR = 8,
- IMX_SC_C_PXL_LINK_SLV2_ADDR = 9,
- IMX_SC_C_PXL_LINK_MST_VLD = 10,
- IMX_SC_C_PXL_LINK_MST1_VLD = 11,
- IMX_SC_C_PXL_LINK_MST2_VLD = 12,
- IMX_SC_C_SINGLE_MODE = 13,
- IMX_SC_C_ID = 14,
- IMX_SC_C_PXL_CLK_POLARITY = 15,
- IMX_SC_C_LINESTATE = 16,
- IMX_SC_C_PCIE_G_RST = 17,
- IMX_SC_C_PCIE_BUTTON_RST = 18,
- IMX_SC_C_PCIE_PERST = 19,
- IMX_SC_C_PHY_RESET = 20,
- IMX_SC_C_PXL_LINK_RATE_CORRECTION = 21,
- IMX_SC_C_PANIC = 22,
- IMX_SC_C_PRIORITY_GROUP = 23,
- IMX_SC_C_TXCLK = 24,
- IMX_SC_C_CLKDIV = 25,
- IMX_SC_C_DISABLE_50 = 26,
- IMX_SC_C_DISABLE_125 = 27,
- IMX_SC_C_SEL_125 = 28,
- IMX_SC_C_MODE = 29,
- IMX_SC_C_SYNC_CTRL0 = 30,
- IMX_SC_C_KACHUNK_CNT = 31,
- IMX_SC_C_KACHUNK_SEL = 32,
- IMX_SC_C_SYNC_CTRL1 = 33,
- IMX_SC_C_DPI_RESET = 34,
- IMX_SC_C_MIPI_RESET = 35,
- IMX_SC_C_DUAL_MODE = 36,
- IMX_SC_C_VOLTAGE = 37,
- IMX_SC_C_PXL_LINK_SEL = 38,
- IMX_SC_C_OFS_SEL = 39,
- IMX_SC_C_OFS_AUDIO = 40,
- IMX_SC_C_OFS_PERIPH = 41,
- IMX_SC_C_OFS_IRQ = 42,
- IMX_SC_C_RST0 = 43,
- IMX_SC_C_RST1 = 44,
- IMX_SC_C_SEL0 = 45,
- IMX_SC_C_LAST
-};
-
-#endif /* _SC_TYPES_H */
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
new file mode 100644
index 000000000000..4875dd38af7e
--- /dev/null
+++ b/include/linux/fsl/enetc_mdio.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#ifndef _FSL_ENETC_MDIO_H_
+#define _FSL_ENETC_MDIO_H_
+
+#include <linux/phy.h>
+
+/* PCS registers */
+#define ENETC_PCS_LINK_TIMER1 0x12
+#define ENETC_PCS_LINK_TIMER1_VAL 0x06a0
+#define ENETC_PCS_LINK_TIMER2 0x13
+#define ENETC_PCS_LINK_TIMER2_VAL 0x0003
+#define ENETC_PCS_IF_MODE 0x14
+#define ENETC_PCS_IF_MODE_SGMII_EN BIT(0)
+#define ENETC_PCS_IF_MODE_USE_SGMII_AN BIT(1)
+#define ENETC_PCS_IF_MODE_SGMII_SPEED(x) (((x) << 2) & GENMASK(3, 2))
+
+/* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset
+ * Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS
+ * still thinks it's at gigabit.
+ */
+enum enetc_pcs_speed {
+ ENETC_PCS_SPEED_10 = 0,
+ ENETC_PCS_SPEED_100 = 1,
+ ENETC_PCS_SPEED_1000 = 2,
+ ENETC_PCS_SPEED_2500 = 2,
+};
+
+struct enetc_hw;
+
+struct enetc_mdio_priv {
+ struct enetc_hw *hw;
+ int mdio_base;
+};
+
+#if IS_REACHABLE(CONFIG_FSL_ENETC_MDIO)
+
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs);
+
+#else
+
+static inline int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{ return -EINVAL; }
+static inline int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
+{ return -EINVAL; }
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
+{ return ERR_PTR(-EINVAL); }
+
+#endif
+
+#endif
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 975553a9f75d..a9deb83579cb 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -12,6 +12,9 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+#include <uapi/linux/fsl_mc.h>
#define FSL_MC_VENDOR_FREESCALE 0x1957
@@ -48,6 +51,9 @@ struct fsl_mc_driver {
#define to_fsl_mc_driver(_drv) \
container_of(_drv, struct fsl_mc_driver, driver)
+#define to_fsl_mc_bus(_mc_dev) \
+ container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
+
/**
* enum fsl_mc_pool_type - Types of allocatable MC bus resources
*
@@ -161,6 +167,7 @@ struct fsl_mc_obj_desc {
* @regions: pointer to array of MMIO region entries
* @irqs: pointer to array of pointers to interrupts allocated to this device
* @resource: generic resource associated with this MC object device, if any.
+ * @driver_override: Driver name to force a match
*
* Generic device object for MC object devices that are "attached" to a
* MC bus.
@@ -186,7 +193,7 @@ struct fsl_mc_device {
struct device dev;
u64 dma_mask;
u16 flags;
- u16 icid;
+ u32 icid;
u16 mc_handle;
struct fsl_mc_io *mc_io;
struct fsl_mc_obj_desc obj_desc;
@@ -194,13 +201,12 @@ struct fsl_mc_device {
struct fsl_mc_device_irq **irqs;
struct fsl_mc_resource *resource;
struct device_link *consumer_link;
+ const char *driver_override;
};
#define to_fsl_mc_device(_dev) \
container_of(_dev, struct fsl_mc_device, dev)
-#define MC_CMD_NUM_OF_PARAMS 7
-
struct mc_cmd_header {
u8 src_id;
u8 flags_hw;
@@ -210,11 +216,6 @@ struct mc_cmd_header {
__le16 cmd_id;
};
-struct fsl_mc_command {
- __le64 header;
- __le64 params[MC_CMD_NUM_OF_PARAMS];
-};
-
enum mc_cmd_status {
MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
@@ -339,7 +340,7 @@ struct fsl_mc_io {
* This field is only meaningful if the
* FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set
*/
- spinlock_t spinlock; /* serializes mc_send_command() */
+ raw_spinlock_t spinlock; /* serializes mc_send_command() */
};
};
@@ -360,6 +361,10 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd);
#define fsl_mc_cont_dev(_dev) (fsl_mc_is_cont_dev(_dev) ? \
(_dev) : (_dev)->parent)
+#define fsl_mc_is_dev_coherent(_dev) \
+ (!((to_fsl_mc_device(_dev))->obj_desc.flags & \
+ FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY))
+
/*
* module_fsl_mc_driver() - Helper macro for drivers that don't do
* anything special in module init/exit. This eliminates a lot of
@@ -370,6 +375,8 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd);
module_driver(__fsl_mc_driver, fsl_mc_driver_register, \
fsl_mc_driver_unregister)
+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
+
/*
* Macro to avoid include chaining to get THIS_MODULE
*/
@@ -381,6 +388,22 @@ int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
void fsl_mc_driver_unregister(struct fsl_mc_driver *driver);
+/**
+ * struct fsl_mc_version
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ * backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ * and/or bug fixes that have no impact on API
+ */
+struct fsl_mc_version {
+ u32 major;
+ u32 minor;
+ u32 revision;
+};
+
+struct fsl_mc_version *fsl_mc_get_version(void);
+
int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
u16 mc_io_flags,
struct fsl_mc_io **new_mc_io);
@@ -409,12 +432,17 @@ extern struct device_type fsl_mc_bus_dprc_type;
extern struct device_type fsl_mc_bus_dpni_type;
extern struct device_type fsl_mc_bus_dpio_type;
extern struct device_type fsl_mc_bus_dpsw_type;
+extern struct device_type fsl_mc_bus_dpdmux_type;
extern struct device_type fsl_mc_bus_dpbp_type;
extern struct device_type fsl_mc_bus_dpcon_type;
extern struct device_type fsl_mc_bus_dpmcp_type;
extern struct device_type fsl_mc_bus_dpmac_type;
extern struct device_type fsl_mc_bus_dprtc_type;
extern struct device_type fsl_mc_bus_dpseci_type;
+extern struct device_type fsl_mc_bus_dpdcei_type;
+extern struct device_type fsl_mc_bus_dpaiop_type;
+extern struct device_type fsl_mc_bus_dpci_type;
+extern struct device_type fsl_mc_bus_dpdmai_type;
static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
{
@@ -436,6 +464,11 @@ static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev)
return mc_dev->dev.type == &fsl_mc_bus_dpsw_type;
}
+static inline bool is_fsl_mc_bus_dpdmux(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdmux_type;
+}
+
static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev)
{
return mc_dev->dev.type == &fsl_mc_bus_dpbp_type;
@@ -466,6 +499,359 @@ static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
}
+static inline bool is_fsl_mc_bus_dpdcei(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdcei_type;
+}
+
+static inline bool is_fsl_mc_bus_dpaiop(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpaiop_type;
+}
+
+static inline bool is_fsl_mc_bus_dpci(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpci_type;
+}
+
+static inline bool is_fsl_mc_bus_dpdmai(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdmai_type;
+}
+
+/*
+ * Data Path Resource Container (DPRC) API
+ */
+
+/* Minimal supported DPRC Version */
+#define DPRC_MIN_VER_MAJOR 6
+#define DPRC_MIN_VER_MINOR 0
+
+/* DPRC command versioning */
+#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_2ND_VERSION 2
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
+
+/* DPRC command IDs */
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
+#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
+
+#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
+#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
+
+#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
+#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
+#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
+#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
+#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
+
+#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
+#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
+#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
+#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
+#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
+#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+
+struct dprc_cmd_open {
+ __le32 container_id;
+};
+
+struct dprc_cmd_reset_container {
+ __le32 child_container_id;
+};
+
+struct dprc_cmd_set_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+#define DPRC_ENABLE 0x1
+
+struct dprc_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_attributes {
+ /* response word 0 */
+ __le32 container_id;
+ __le32 icid;
+ /* response word 1 */
+ __le32 options;
+ __le32 portal_id;
+};
+
+struct dprc_rsp_get_obj_count {
+ __le32 pad;
+ __le32 obj_count;
+};
+
+struct dprc_cmd_get_obj {
+ __le32 obj_index;
+};
+
+struct dprc_rsp_get_obj {
+ /* response word 0 */
+ __le32 pad0;
+ __le32 id;
+ /* response word 1 */
+ __le16 vendor;
+ u8 irq_count;
+ u8 region_count;
+ __le32 state;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+ __le16 flags;
+ __le16 pad1;
+ /* response word 3-4 */
+ u8 type[16];
+ /* response word 5-6 */
+ u8 label[16];
+};
+
+struct dprc_cmd_get_obj_region {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le16 pad0;
+ u8 region_index;
+ u8 pad1;
+ /* cmd word 1-2 */
+ __le64 pad2[2];
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_region {
+ /* response word 0 */
+ __le64 pad0;
+ /* response word 1 */
+ __le64 base_offset;
+ /* response word 2 */
+ __le32 size;
+ u8 type;
+ u8 pad2[3];
+ /* response word 3 */
+ __le32 flags;
+ __le32 pad3;
+ /* response word 4 */
+ /* base_addr may be zero if older MC firmware is used */
+ __le64 base_addr;
+};
+
+struct dprc_cmd_set_obj_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+ __le32 obj_id;
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+/*
+ * DPRC API for managing and querying DPAA resources
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+ u16 *token);
+
+int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/* DPRC IRQ events */
+
+/* IRQ event - Indicates that a new object added to the container */
+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
+/* IRQ event - Indicates that an object was removed from the container */
+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
+/*
+ * IRQ event - Indicates that one of the descendant containers that opened by
+ * this container is destroyed
+ */
+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
+
+/*
+ * IRQ event - Indicates that on one of the container's opened object is
+ * destroyed
+ */
+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
+
+/* Irq event - Indicates that object is created at the container */
+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
+
+/**
+ * struct dprc_irq_cfg - IRQ configuration
+ * @paddr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+ * @irq_num: A user defined number associated with this IRQ
+ */
+struct dprc_irq_cfg {
+ phys_addr_t paddr;
+ u32 val;
+ int irq_num;
+};
+
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dprc_attributes - Container attributes
+ * @container_id: Container's ID
+ * @icid: Container's ICID
+ * @portal_id: Container's portal ID
+ * @options: Container's options as set at container's creation
+ */
+struct dprc_attributes {
+ int container_id;
+ u32 icid;
+ int portal_id;
+ u64 options;
+};
+
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attributes);
+
+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count);
+
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct fsl_mc_obj_desc *obj_desc);
+
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+/* Region flags */
+/* Cacheable - Indicates that region should be mapped as cacheable */
+#define DPRC_REGION_CACHEABLE 0x00000001
+#define DPRC_REGION_SHAREABLE 0x00000002
+
+/**
+ * enum dprc_region_type - Region type
+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
+ */
+enum dprc_region_type {
+ DPRC_REGION_TYPE_MC_PORTAL,
+ DPRC_REGION_TYPE_QBMAN_PORTAL,
+ DPRC_REGION_TYPE_QBMAN_MEM_BACKED_PORTAL
+};
+
+/**
+ * struct dprc_region_desc - Mappable region descriptor
+ * @base_offset: Region offset from region's base address.
+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals
+ * base address; For DPIO, region base is offset from SoC QMan portals
+ * base address
+ * @size: Region size (in bytes)
+ * @flags: Region attributes
+ * @type: Portal region type
+ */
+struct dprc_region_desc {
+ u32 base_offset;
+ u32 size;
+ u32 flags;
+ enum dprc_region_type type;
+ u64 base_address;
+};
+
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc);
+
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id);
+
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id);
+
/*
* Data Path Buffer Pool (DPBP) API
* Contains initialization APIs and runtime control APIs for DPBP
@@ -574,4 +960,96 @@ int dpcon_set_notification(struct fsl_mc_io *mc_io,
u16 token,
struct dpcon_notification_cfg *cfg);
+struct irq_domain;
+struct msi_domain_info;
+
+/**
+ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
+ * IRQ pool
+ */
+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
+
+/**
+ * struct fsl_mc_resource_pool - Pool of MC resources of a given
+ * type
+ * @type: type of resources in the pool
+ * @max_count: maximum number of resources in the pool
+ * @free_count: number of free resources in the pool
+ * @mutex: mutex to serialize access to the pool's free list
+ * @free_list: anchor node of list of free resources in the pool
+ * @mc_bus: pointer to the MC bus that owns this resource pool
+ */
+struct fsl_mc_resource_pool {
+ enum fsl_mc_pool_type type;
+ int max_count;
+ int free_count;
+ struct mutex mutex; /* serializes access to free_list */
+ struct list_head free_list;
+ struct fsl_mc_bus *mc_bus;
+};
+
+/**
+ * struct fsl_mc_uapi - information associated with a device file
+ * @misc: struct miscdevice linked to the root dprc
+ * @device: newly created device in /dev
+ * @mutex: mutex lock to serialize the open/release operations
+ * @local_instance_in_use: local MC I/O instance in use or not
+ * @static_mc_io: pointer to the static MC I/O object
+ */
+struct fsl_mc_uapi {
+ struct miscdevice misc;
+ struct device *device;
+ struct mutex mutex; /* serialize open/release operations */
+ u32 local_instance_in_use;
+ struct fsl_mc_io *static_mc_io;
+};
+
+/**
+ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
+ * @mc_dev: fsl-mc device for the bus device itself.
+ * @resource_pools: array of resource pools (one pool per resource type)
+ * for this MC bus. These resources represent allocatable entities
+ * from the physical DPRC.
+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
+ * @scan_mutex: Serializes bus scanning
+ * @dprc_attr: DPRC attributes
+ * @uapi_misc: struct that abstracts the interaction with userspace
+ */
+struct fsl_mc_bus {
+ struct fsl_mc_device mc_dev;
+ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
+ struct fsl_mc_device_irq *irq_resources;
+ struct mutex scan_mutex; /* serializes bus scanning */
+ struct dprc_attributes dprc_attr;
+ struct fsl_mc_uapi uapi_misc;
+ int irq_enabled;
+};
+
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ const char *driver_override,
+ bool alloc_interrupts,
+ unsigned int *total_irq_count);
+
+int __must_check fsl_create_mc_io(struct device *dev,
+ phys_addr_t mc_portal_phys_addr,
+ u32 mc_portal_size,
+ struct fsl_mc_device *dpmcp_dev,
+ u32 flags, struct fsl_mc_io **new_mc_io);
+
+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
+
+int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
+ struct irq_domain **mc_msi_domain);
+
+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+ unsigned int irq_count);
+
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
+
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
+void fsl_mc_get_root_dprc(struct device *dev, struct device **root_dprc_dev);
+
#endif /* _FSL_MC_H_ */
diff --git a/include/linux/fsl/svr.h b/include/linux/fsl/svr.h
new file mode 100644
index 000000000000..e95c8f43ae67
--- /dev/null
+++ b/include/linux/fsl/svr.h
@@ -0,0 +1,97 @@
+/*
+ * MPC85xx cpu type detection
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef FSL_SVR_H
+#define FSL_SVR_H
+
+#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */
+#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/
+#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/
+
+/* Some parts define SVR[0:23] as the SOC version */
+#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */
+
+#define SVR_8533 0x803400
+#define SVR_8535 0x803701
+#define SVR_8536 0x803700
+#define SVR_8540 0x803000
+#define SVR_8541 0x807200
+#define SVR_8543 0x803200
+#define SVR_8544 0x803401
+#define SVR_8545 0x803102
+#define SVR_8547 0x803101
+#define SVR_8548 0x803100
+#define SVR_8555 0x807100
+#define SVR_8560 0x807000
+#define SVR_8567 0x807501
+#define SVR_8568 0x807500
+#define SVR_8569 0x808000
+#define SVR_8572 0x80E000
+#define SVR_P1010 0x80F100
+#define SVR_P1011 0x80E500
+#define SVR_P1012 0x80E501
+#define SVR_P1013 0x80E700
+#define SVR_P1014 0x80F101
+#define SVR_P1017 0x80F700
+#define SVR_P1020 0x80E400
+#define SVR_P1021 0x80E401
+#define SVR_P1022 0x80E600
+#define SVR_P1023 0x80F600
+#define SVR_P1024 0x80E402
+#define SVR_P1025 0x80E403
+#define SVR_P2010 0x80E300
+#define SVR_P2020 0x80E200
+#define SVR_P2040 0x821000
+#define SVR_P2041 0x821001
+#define SVR_P3041 0x821103
+#define SVR_P4040 0x820100
+#define SVR_P4080 0x820000
+#define SVR_P5010 0x822100
+#define SVR_P5020 0x822000
+#define SVR_P5021 0X820500
+#define SVR_P5040 0x820400
+#define SVR_T4240 0x824000
+#define SVR_T4120 0x824001
+#define SVR_T4160 0x824100
+#define SVR_T4080 0x824102
+#define SVR_C291 0x850000
+#define SVR_C292 0x850020
+#define SVR_C293 0x850030
+#define SVR_B4860 0X868000
+#define SVR_G4860 0x868001
+#define SVR_G4060 0x868003
+#define SVR_B4440 0x868100
+#define SVR_G4440 0x868101
+#define SVR_B4420 0x868102
+#define SVR_B4220 0x868103
+#define SVR_T1040 0x852000
+#define SVR_T1041 0x852001
+#define SVR_T1042 0x852002
+#define SVR_T1020 0x852100
+#define SVR_T1021 0x852101
+#define SVR_T1022 0x852102
+#define SVR_T1023 0x854100
+#define SVR_T1024 0x854000
+#define SVR_T2080 0x853000
+#define SVR_T2081 0x853100
+
+#define SVR_8610 0x80A000
+#define SVR_8641 0x809000
+#define SVR_8641D 0x809001
+
+#define SVR_9130 0x860001
+#define SVR_9131 0x860000
+#define SVR_9132 0x861000
+#define SVR_9232 0x861400
+
+#define SVR_Unknown 0xFFFFFF
+
+#endif
diff --git a/include/linux/fsl_bman.h b/include/linux/fsl_bman.h
new file mode 100644
index 000000000000..43942221f7fb
--- /dev/null
+++ b/include/linux/fsl_bman.h
@@ -0,0 +1,532 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FSL_BMAN_H
+#define FSL_BMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Last updated for v00.79 of the BG */
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
+#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */
+
+/* This wrapper represents a bit-array for the depletion state of the 64 Bman
+ * buffer pools. */
+struct bman_depletion {
+ u32 __state[2];
+};
+#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } }
+#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } }
+#define __bmdep_word(x) ((x) >> 5)
+#define __bmdep_shift(x) ((x) & 0x1f)
+#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x))
+static inline void bman_depletion_init(struct bman_depletion *c)
+{
+ c->__state[0] = c->__state[1] = 0;
+}
+static inline void bman_depletion_fill(struct bman_depletion *c)
+{
+ c->__state[0] = c->__state[1] = ~0;
+}
+static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid)
+{
+ return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid);
+}
+static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid)
+{
+ c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid);
+}
+static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid)
+{
+ c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid);
+}
+
+/* ------------------------------------------------------- */
+/* --- Bman data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
+struct bm_mc_command; /* MC (Management Command) command */
+struct bm_mc_result; /* MC result */
+
+/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
+ * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
+ * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */
+struct bm_buffer {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1;
+ u8 bpid;
+ u16 hi; /* High 16-bits of 48-bit address */
+ u32 lo; /* Low 32-bits of 48-bit address */
+#else
+ u32 lo;
+ u16 hi;
+ u8 bpid;
+ u8 __reserved;
+#endif
+ };
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u64 __notaddress:16;
+ u64 addr:48;
+#else
+ u64 addr:48;
+ u64 __notaddress:16;
+#endif
+ };
+ u64 opaque;
+ };
+} __aligned(8);
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+ return buf->addr;
+}
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+ return (dma_addr_t)buf->addr;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define bm_buffer_set64(buf, v) \
+ do { \
+ struct bm_buffer *__buf931 = (buf); \
+ __buf931->hi = upper_32_bits(v); \
+ __buf931->lo = lower_32_bits(v); \
+ } while (0)
+
+/* See 1.5.3.5.4: "Release Command" */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 __dont_write_directly__verb;
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+} __packed;
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+/* See 1.5.3.1: "Acquire Command" */
+/* See 1.5.3.2: "Query Command" */
+struct bm_mcc_acquire {
+ u8 bpid;
+ u8 __reserved1[62];
+} __packed;
+struct bm_mcc_query {
+ u8 __reserved2[63];
+} __packed;
+struct bm_mc_command {
+ u8 __dont_write_directly__verb;
+ union {
+ struct bm_mcc_acquire acquire;
+ struct bm_mcc_query query;
+ };
+} __packed;
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* See 1.5.3.3: "Acquire Response" */
+/* See 1.5.3.4: "Query Response" */
+struct bm_pool_state {
+ u8 __reserved1[32];
+ /* "availability state" and "depletion state" */
+ struct {
+ u8 __reserved1[8];
+ /* Access using bman_depletion_***() */
+ struct bman_depletion state;
+ } as, ds;
+};
+struct bm_mc_result {
+ union {
+ struct {
+ u8 verb;
+ u8 __reserved1[63];
+ };
+ union {
+ struct {
+ u8 __reserved1;
+ u8 bpid;
+ u8 __reserved2[62];
+ };
+ struct bm_buffer bufs[8];
+ } acquire;
+ struct bm_pool_state query;
+ };
+} __packed;
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+/* Determine the "availability state" of pool 'p' from a query result 'r' */
+#define BM_MCR_QUERY_AVAILABILITY(r, p) \
+ bman_depletion_get(&r->query.as.state, p)
+/* Determine the "depletion state" of pool 'p' from a query result 'r' */
+#define BM_MCR_QUERY_DEPLETION(r, p) \
+ bman_depletion_get(&r->query.ds.state, p)
+
+/*******************************************************************/
+/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
+/*******************************************************************/
+
+ /* Portal and Buffer Pools */
+ /* ----------------------- */
+/* Represents a managed portal */
+struct bman_portal;
+
+/* This object type represents Bman buffer pools. */
+struct bman_pool;
+
+struct bman_portal_config {
+ /* This is used for any "core-affine" portals, ie. default portals
+ * associated to the corresponding cpu. -1 implies that there is no core
+ * affinity configured. */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /* the unique index of this portal */
+ u32 index;
+ /* Is this portal shared? (If so, it has coarser locking and demuxes
+ * processing on behalf of other CPUs.) */
+ int is_shared;
+ /* These are the buffer pool IDs that may be used via this portal. */
+ struct bman_depletion mask;
+};
+
+/* This callback type is used when handling pool depletion entry/exit. The
+ * 'cb_ctx' value is the opaque value associated with the pool object in
+ * bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on
+ * depletion-exit. */
+typedef void (*bman_cb_depletion)(struct bman_portal *bm,
+ struct bman_pool *pool, void *cb_ctx, int depleted);
+
+/* This struct specifies parameters for a bman_pool object. */
+struct bman_pool_params {
+ /* index of the buffer pool to encapsulate (0-63), ignored if
+ * BMAN_POOL_FLAG_DYNAMIC_BPID is set. */
+ u32 bpid;
+ /* bit-mask of BMAN_POOL_FLAG_*** options */
+ u32 flags;
+ /* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */
+ bman_cb_depletion cb;
+ /* opaque user value passed as a parameter to 'cb' */
+ void *cb_ctx;
+ /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
+ * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
+ * when run in the control plane (which controls Bman CCSR). This array
+ * matches the definition of bm_pool_set(). */
+ u32 thresholds[4];
+};
+
+/* Flags to bman_new_pool() */
+#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
+#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
+#define BMAN_POOL_FLAG_DEPLETION 0x00000004 /* track depletion entry/exit */
+#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
+#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
+#define BMAN_POOL_FLAG_STOCKPILE 0x00000020 /* stockpile to reduce hw ops */
+
+/* Flags to bman_release() */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+#define BMAN_RELEASE_FLAG_WAIT 0x00000001 /* wait if RCR is full */
+#define BMAN_RELEASE_FLAG_WAIT_INT 0x00000002 /* if we wait, interruptible? */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+#define BMAN_RELEASE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
+#endif
+#endif
+#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
+
+/* Flags to bman_acquire() */
+#define BMAN_ACQUIRE_FLAG_STOCKPILE 0x00000001 /* no hw op, stockpile only */
+
+ /* Portal Management */
+ /* ----------------- */
+/**
+ * bman_get_portal_config - get portal configuration settings
+ *
+ * This returns a read-only view of the current cpu's affine portal settings.
+ */
+const struct bman_portal_config *bman_get_portal_config(void);
+
+/**
+ * bman_irqsource_get - return the portal work that is interrupt-driven
+ *
+ * Returns a bitmask of BM_PIRQ_**I processing sources that are currently
+ * enabled for interrupt handling on the current cpu's affine portal. These
+ * sources will trigger the portal interrupt and the interrupt handler (or a
+ * tasklet/bottom-half it defers to) will perform the corresponding processing
+ * work. The bman_poll_***() functions will only process sources that are not in
+ * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
+ * this always returns zero.
+ */
+u32 bman_irqsource_get(void);
+
+/**
+ * bman_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of BM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via bman_poll_***() functions). Returns zero for success, or
+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
+int bman_irqsource_add(u32 bits);
+
+/**
+ * bman_irqsource_remove - remove processing sources from being interrupt-driven
+ * @bits: bitmask of BM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via bman_poll_***() functions. Returns zero for success,
+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
+int bman_irqsource_remove(u32 bits);
+
+/**
+ * bman_affine_cpus - return a mask of cpus that have affine portals
+ */
+const cpumask_t *bman_affine_cpus(void);
+
+/**
+ * bman_poll_slow - process anything that isn't interrupt-driven.
+ *
+ * This function does any portal processing that isn't interrupt-driven. If the
+ * current CPU is sharing a portal hosted on another CPU, this function will
+ * return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources
+ * indicating what interrupt sources were actually processed by the call.
+ *
+ * NB, unlike the legacy wrapper bman_poll(), this function will
+ * deterministically check for the presence of portal processing work and do it,
+ * which implies some latency even if there's nothing to do. The bman_poll()
+ * wrapper on the other hand (like the qman_poll() wrapper) attenuates this by
+ * checking for (and doing) portal processing infrequently. Ie. such that
+ * qman_poll() and bman_poll() can be called from core-processing loops. Use
+ * bman_poll_slow() when you yourself are deciding when to incur the overhead of
+ * processing.
+ */
+u32 bman_poll_slow(void);
+
+/**
+ * bman_poll - process anything that isn't interrupt-driven.
+ *
+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
+ * affine portal. This function does whatever processing is not triggered by
+ * interrupts. This is a legacy wrapper that can be used in core-processing
+ * loops but mitigates the performance overhead of portal processing by
+ * adaptively bypassing true portal processing most of the time. (Processing is
+ * done once every 10 calls if the previous processing revealed that work needed
+ * to be done, or once very 1000 calls if the previous processing revealed no
+ * work needed doing.) If you wish to control this yourself, call
+ * bman_poll_slow() instead, which always checks for portal processing work.
+ */
+void bman_poll(void);
+
+/**
+ * bman_rcr_is_empty - Determine if portal's RCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * releases for the local portal have been processed by Bman but can't use the
+ * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
+ * The function forces tracking of RCR consumption (which normally doesn't
+ * happen until release processing needs to find space to put new release
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int bman_rcr_is_empty(void);
+
+/**
+ * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
+ * @result: is set by the API to the base BPID of the allocated range
+ * @count: the number of BPIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count BPIDs
+ *
+ * Returns the number of buffer pools allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * BPs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int bman_alloc_bpid(u32 *result)
+{
+ int ret = bman_alloc_bpid_range(result, 1, 0, 0);
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * bman_release_bpid_range - Release the specified range of buffer pool IDs
+ * @bpid: the base BPID of the range to deallocate
+ * @count: the number of BPIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of BPIDs
+ * that it can subsequently allocate from.
+ */
+void bman_release_bpid_range(u32 bpid, unsigned int count);
+static inline void bman_release_bpid(u32 bpid)
+{
+ bman_release_bpid_range(bpid, 1);
+}
+
+int bman_reserve_bpid_range(u32 bpid, unsigned int count);
+static inline int bman_reserve_bpid(u32 bpid)
+{
+ return bman_reserve_bpid_range(bpid, 1);
+}
+
+void bman_seed_bpid_range(u32 bpid, unsigned int count);
+
+
+int bman_shutdown_pool(u32 bpid);
+
+ /* Pool management */
+ /* --------------- */
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ * @params: parameters specifying the buffer pool ID and behaviour
+ *
+ * Creates a pool object for the given @params. A portal and the depletion
+ * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
+ * is set. NB, the fields from @params are copied into the new pool object, so
+ * the structure provided by the caller can be released or reused after the
+ * function returns.
+ */
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ *
+ */
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_params - Returns a pool object's parameters.
+ * @pool: the pool object
+ *
+ * The returned pointer refers to state within the pool object so must not be
+ * modified and can no longer be read once the pool object is destroyed.
+ */
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
+ *
+ * Adds the given buffers to RCR entries. If the portal @p was created with the
+ * "COMPACT" flag, then it will be using a compaction algorithm to improve
+ * utilisation of RCR. As such, these buffers may join an existing ring entry
+ * and/or it may not be issued right away so as to allow future releases to join
+ * the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this
+ * behaviour by committing the RCR entry (or entries) right away. If the RCR
+ * ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT
+ * is selected, in which case it will sleep waiting for space to become
+ * available in RCR. If the function receives a signal before such time (and
+ * BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise,
+ * it returns zero.
+ */
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered. In
+ * the latter case, the content of @bufs is undefined.
+ */
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags);
+
+/**
+ * bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool
+ * @pool: the buffer pool object the stockpile belongs
+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
+ *
+ * Adds stockpile buffers to RCR entries until the stockpile is empty.
+ * The return value will be a negative error code if a h/w error occurred.
+ * If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full,
+ * -EAGAIN will be returned.
+ */
+int bman_flush_stockpile(struct bman_pool *pool, u32 flags);
+
+/**
+ * bman_query_pools - Query all buffer pool states
+ * @state: storage for the queried availability and depletion states
+ */
+int bman_query_pools(struct bm_pool_state *state);
+
+#ifdef CONFIG_FSL_BMAN_CONFIG
+/**
+ * bman_query_free_buffers - Query how many free buffers are in buffer pool
+ * @pool: the buffer pool object to query
+ *
+ * Return the number of the free buffers
+ */
+u32 bman_query_free_buffers(struct bman_pool *pool);
+
+/**
+ * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
+ * @pool: the buffer pool object to which the thresholds will be set
+ * @thresholds: the new thresholds
+ */
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
+#endif
+
+/**
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+*/
+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FSL_BMAN_H */
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index 0af96a45e903..d8c1908c6114 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -846,6 +846,11 @@ struct fsl_ifc_ctrl {
u32 nand_stat;
wait_queue_head_t nand_wait;
bool little_endian;
+#ifdef CONFIG_PM_SLEEP
+ /*save regs when system goes to deep sleep*/
+ struct fsl_ifc_global *saved_gregs;
+ struct fsl_ifc_runtime *saved_rregs;
+#endif
};
extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
diff --git a/include/linux/fsl_qman.h b/include/linux/fsl_qman.h
new file mode 100644
index 000000000000..e39a17c2b6e5
--- /dev/null
+++ b/include/linux/fsl_qman.h
@@ -0,0 +1,3910 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FSL_QMAN_H
+#define FSL_QMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Last updated for v00.800 of the BG */
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_CAAM 0x80
+#define QMAN_CHANNEL_PME 0xa0
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+#define QMAN_CHANNEL_CAAM_REV3 0x840
+#define QMAN_CHANNEL_PME_REV3 0x860
+#define QMAN_CHANNEL_DCE 0x8a0
+#define QMAN_CHANNEL_DCE_QMANREV312 0x880
+extern u16 qm_channel_pool1;
+extern u16 qm_channel_caam;
+extern u16 qm_channel_pme;
+extern u16 qm_channel_dce;
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1,
+ qm_dc_portal_caam = 2,
+ qm_dc_portal_pme = 3,
+ qm_dc_portal_rman = 4,
+ qm_dc_portal_dce = 5
+};
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
+#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
+#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
+#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
+/* This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing. */
+#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+ QM_PIRQ_MRI | QM_PIRQ_CCSCI)
+
+/* --- Clock speed --- */
+/* A qman driver instance may or may not know the current qman clock speed.
+ * However, certain CEETM calculations may not be possible if this is not known.
+ * The 'set' function will only succeed (return zero) if the driver did not
+ * already know the clock speed. Likewise, the 'get' function will only succeed
+ * if the driver does know the clock speed (either because it knew when booting,
+ * or was told via 'set'). In cases where software is running on a driver
+ * instance that does not know the clock speed (eg. on a hypervised data-plane),
+ * and the user can obtain the current qman clock speed by other means (eg. from
+ * a message sent from the control-plane), then the 'set' function can be used
+ * to enable rate-calculations in a driver where it would otherwise not be
+ * possible. */
+int qm_get_clock(u64 *clock_hz);
+int qm_set_clock(u64 clock_hz);
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+ return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+}
+
+/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID. */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+
+/* ------------------------------------------------------- */
+/* --- Qman data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
+struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
+struct qm_mr_entry; /* MR (Message Ring) entries */
+struct qm_mc_command; /* MC (Management Command) command */
+struct qm_mc_result; /* MC result */
+
+/* See David Lapp's "Frame formats" document, "dpateam", Jan 07, 2008 */
+#define QM_FD_FORMAT_SG 0x4
+#define QM_FD_FORMAT_LONG 0x2
+#define QM_FD_FORMAT_COMPOUND 0x1
+enum qm_fd_format {
+ /* 'contig' implies a contiguous buffer, whereas 'sg' implies a
+ * scatter-gather table. 'big' implies a 29-bit length with no offset
+ * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+ * implies a s/g-like table, where each entry itself represents a frame
+ * (contiguous or scatter-gather) and the 29-bit "length" is
+ * interpreted purely for congestion calculations, ie. a "congestion
+ * weight". */
+ qm_fd_contig = 0,
+ qm_fd_contig_big = QM_FD_FORMAT_LONG,
+ qm_fd_sg = QM_FD_FORMAT_SG,
+ qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+ qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+/* Capitalised versions are un-typed but can be used in static expressions */
+#define QM_FD_CONTIG 0
+#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
+#define QM_FD_SG QM_FD_FORMAT_SG
+#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
+#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
+
+/* See 1.5.1.1: "Frame Descriptor (FD)" */
+struct qm_fd {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 dd:2; /* dynamic debug */
+ u8 liodn_offset:6;
+ u8 bpid:8; /* Buffer Pool ID */
+ u8 eliodn_offset:4;
+ u8 __reserved:4;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#else
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u8 __reserved:4;
+ u8 eliodn_offset:4;
+ u8 bpid:8; /* Buffer Pool ID */
+ u8 liodn_offset:6;
+ u8 dd:2; /* dynamic debug */
+#endif
+ };
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u64 __notaddress:24;
+ u64 addr:40;
+#else
+ u64 addr:40;
+ u64 __notaddress:24;
+#endif
+ };
+ u64 opaque_addr;
+ };
+ /* The 'format' field indicates the interpretation of the remaining 29
+ * bits of the 32-bit word. For packing reasons, it is duplicated in the
+ * other union elements. Note, union'd structs are difficult to use with
+ * static initialisation under gcc, in which case use the "opaque" form
+ * with one of the macros. */
+ union {
+ /* For easier/faster copying of this part of the fd (eg. from a
+ * DQRR entry to an EQCR entry) copy 'opaque' */
+ u32 opaque;
+ /* If 'format' is _contig or _sg, 20b length and 9b offset */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format format:3;
+ u16 offset:9;
+ u32 length20:20;
+#else
+ u32 length20:20;
+ u16 offset:9;
+ enum qm_fd_format format:3;
+#endif
+ };
+ /* If 'format' is _contig_big or _sg_big, 29b length */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format _format1:3;
+ u32 length29:29;
+#else
+ u32 length29:29;
+ enum qm_fd_format _format1:3;
+#endif
+ };
+ /* If 'format' is _compound, 29b "congestion weight" */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format _format2:3;
+ u32 cong_weight:29;
+#else
+ u32 cong_weight:29;
+ enum qm_fd_format _format2:3;
+#endif
+ };
+ };
+ union {
+ u32 cmd;
+ u32 status;
+ };
+} __aligned(8);
+#define QM_FD_DD_NULL 0x00
+#define QM_FD_PID_MASK 0x3f
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+ return fd->addr;
+}
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+ return (dma_addr_t)fd->addr;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_fd_addr_set64(fd, v) \
+ do { \
+ struct qm_fd *__fd931 = (fd); \
+ __fd931->addr = v; \
+ } while (0)
+
+/* For static initialisation of FDs (which is complicated by the use of unions
+ * in "struct qm_fd"), use the following macros. Note that;
+ * - 'dd', 'pid' and 'bpid' are ignored because there's no static initialisation
+ * use-case),
+ * - use capitalised QM_FD_*** formats for static initialisation.
+ */
+#define QM_FD_FMT_20(cmd, addr_hi, addr_lo, fmt, off, len) \
+ { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
+ { (((fmt)&0x7) << 29) | (((off)&0x1ff) << 20) | ((len)&0xfffff) }, \
+ { cmd } }
+#define QM_FD_FMT_29(cmd, addr_hi, addr_lo, fmt, len) \
+ { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
+ { (((fmt)&0x7) << 29) | ((len)&0x1fffffff) }, \
+ { cmd } }
+
+/* See 2.2.1.3 Multi-Core Datapath Acceleration Architecture */
+#define QM_SG_OFFSET_MASK 0x1FFF
+struct qm_sg_entry {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1[3];
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#else
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u8 __reserved1[3];
+#endif
+ };
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u64 __notaddress:24;
+ u64 addr:40;
+#else
+ u64 addr:40;
+ u64 __notaddress:24;
+#endif
+ };
+ u64 opaque;
+ };
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 extension:1; /* Extension bit */
+ u32 final:1; /* Final bit */
+ u32 length:30;
+#else
+ u32 length:30;
+ u32 final:1; /* Final bit */
+ u32 extension:1; /* Extension bit */
+#endif
+ };
+ u32 sgt_efl;
+ };
+ u8 __reserved2;
+ u8 bpid;
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved3:3;
+ u16 offset:13;
+#else
+ u16 offset:13;
+ u16 __reserved3:3;
+#endif
+ };
+ u16 opaque_offset;
+ };
+} __packed;
+union qm_sg_efl {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 extension:1; /* Extension bit */
+ u32 final:1; /* Final bit */
+ u32 length:30;
+#else
+ u32 length:30;
+ u32 final:1; /* Final bit */
+ u32 extension:1; /* Extension bit */
+#endif
+ };
+ u32 efl;
+};
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+ return (dma_addr_t)be64_to_cpu(sg->opaque) & 0xffffffffffULL;
+}
+static inline u8 qm_sg_entry_get_ext(const struct qm_sg_entry *sg)
+{
+ union qm_sg_efl u;
+
+ u.efl = be32_to_cpu(sg->sgt_efl);
+ return u.extension;
+}
+static inline u8 qm_sg_entry_get_final(const struct qm_sg_entry *sg)
+{
+ union qm_sg_efl u;
+
+ u.efl = be32_to_cpu(sg->sgt_efl);
+ return u.final;
+}
+static inline u32 qm_sg_entry_get_len(const struct qm_sg_entry *sg)
+{
+ union qm_sg_efl u;
+
+ u.efl = be32_to_cpu(sg->sgt_efl);
+ return u.length;
+}
+static inline u8 qm_sg_entry_get_bpid(const struct qm_sg_entry *sg)
+{
+ return sg->bpid;
+}
+static inline u16 qm_sg_entry_get_offset(const struct qm_sg_entry *sg)
+{
+ u32 opaque_offset = be16_to_cpu(sg->opaque_offset);
+
+ return opaque_offset & 0x1fff;
+}
+
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_sg_entry_set64(sg, v) \
+ do { \
+ struct qm_sg_entry *__sg931 = (sg); \
+ __sg931->opaque = cpu_to_be64(v); \
+ } while (0)
+#define qm_sg_entry_set_ext(sg, v) \
+ do { \
+ union qm_sg_efl __u932; \
+ __u932.efl = be32_to_cpu((sg)->sgt_efl); \
+ __u932.extension = v; \
+ (sg)->sgt_efl = cpu_to_be32(__u932.efl); \
+ } while (0)
+#define qm_sg_entry_set_final(sg, v) \
+ do { \
+ union qm_sg_efl __u933; \
+ __u933.efl = be32_to_cpu((sg)->sgt_efl); \
+ __u933.final = v; \
+ (sg)->sgt_efl = cpu_to_be32(__u933.efl); \
+ } while (0)
+#define qm_sg_entry_set_len(sg, v) \
+ do { \
+ union qm_sg_efl __u934; \
+ __u934.efl = be32_to_cpu((sg)->sgt_efl); \
+ __u934.length = v; \
+ (sg)->sgt_efl = cpu_to_be32(__u934.efl); \
+ } while (0)
+#define qm_sg_entry_set_bpid(sg, v) \
+ do { \
+ struct qm_sg_entry *__u935 = (sg); \
+ __u935->bpid = v; \
+ } while (0)
+#define qm_sg_entry_set_offset(sg, v) \
+ do { \
+ struct qm_sg_entry *__u936 = (sg); \
+ __u936->opaque_offset = cpu_to_be16(v); \
+ } while (0)
+
+/* See 1.5.8.1: "Enqueue Command" */
+struct qm_eqcr_entry {
+ u8 __dont_write_directly__verb;
+ u8 dca;
+ u16 seqnum;
+ u32 orp; /* 24-bit */
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd;
+ u8 __reserved3[32];
+} __packed;
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */
+#define QM_EQCR_VERB_COLOUR_GREEN 0x00
+#define QM_EQCR_VERB_COLOUR_YELLOW 0x08
+#define QM_EQCR_VERB_COLOUR_RED 0x10
+#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18
+#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */
+#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */
+#define QM_EQCR_DCA_ENABLE 0x80
+#define QM_EQCR_DCA_PARK 0x40
+#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */
+
+/* See 1.5.8.2: "Frame Dequeue Response" */
+struct qm_dqrr_entry {
+ u8 verb;
+ u8 stat;
+ u16 seqnum; /* 15-bit */
+ u8 tok;
+ u8 __reserved2[3];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ struct qm_fd fd;
+ u8 __reserved4[32];
+};
+#define QM_DQRR_VERB_VBIT 0x80
+#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
+
+/* See 1.5.8.3: "ERN Message Response" */
+/* See 1.5.8.4: "FQ State Change Notification" */
+struct qm_mr_entry {
+ u8 verb;
+ union {
+ struct {
+ u8 dca;
+ u16 seqnum;
+ u8 rc; /* Rejection Code */
+ u32 orp:24;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd;
+ } __packed ern;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
+ u8 __reserved1:3;
+ enum qm_dc_portal portal:3;
+#else
+ enum qm_dc_portal portal:3;
+ u8 __reserved1:3;
+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
+#endif
+ u16 __reserved2;
+ u8 rc; /* Rejection Code */
+ u32 __reserved3:24;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd;
+ } __packed dcern;
+ struct {
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[6];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ u8 __reserved2[16];
+ } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
+ };
+ u8 __reserved2[32];
+} __packed;
+#define QM_MR_VERB_VBIT 0x80
+/* The "ern" VERB bits match QM_EQCR_VERB_*** so aren't reproduced here. ERNs
+ * originating from direct-connect portals ("dcern") use 0x20 as a verb which
+ * would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished from
+ * the other MR types by noting if the 0x20 bit is unset. */
+#define QM_MR_VERB_TYPE_MASK 0x27
+#define QM_MR_VERB_DC_ERN 0x20
+#define QM_MR_VERB_FQRN 0x21
+#define QM_MR_VERB_FQRNI 0x22
+#define QM_MR_VERB_FQRL 0x23
+#define QM_MR_VERB_FQPN 0x24
+#define QM_MR_RC_MASK 0xf0 /* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP 0x00
+#define QM_MR_RC_WRED 0x10
+#define QM_MR_RC_ERROR 0x20
+#define QM_MR_RC_ORPWINDOW_EARLY 0x30
+#define QM_MR_RC_ORPWINDOW_LATE 0x40
+#define QM_MR_RC_FQ_TAILDROP 0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
+#define QM_MR_RC_ORP_ZERO 0x70
+#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MR_DCERN_COLOUR_GREEN 0x00
+#define QM_MR_DCERN_COLOUR_YELLOW 0x01
+#define QM_MR_DCERN_COLOUR_RED 0x02
+#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
+
+/* An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation. */
+struct qm_fqd_stashing {
+ /* See QM_STASHING_EXCL_<...> */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 exclusive;
+ u8 __reserved1:2;
+ /* Numbers of cachelines */
+ u8 annotation_cl:2;
+ u8 data_cl:2;
+ u8 context_cl:2;
+#else
+ u8 context_cl:2;
+ u8 data_cl:2;
+ u8 annotation_cl:2;
+ u8 __reserved1:2;
+ u8 exclusive;
+#endif
+} __packed;
+struct qm_fqd_taildrop {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved1:3;
+ u16 mant:8;
+ u16 exp:5;
+#else
+ u16 exp:5;
+ u16 mant:8;
+ u16 __reserved1:3;
+#endif
+} __packed;
+struct qm_fqd_oac {
+ /* See QM_OAC_<...> */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 oac:2; /* "Overhead Accounting Control" */
+ u8 __reserved1:6;
+#else
+ u8 __reserved1:6;
+ u8 oac:2; /* "Overhead Accounting Control" */
+#endif
+ /* Two's-complement value (-128 to +127) */
+ signed char oal; /* "Overhead Accounting Length" */
+} __packed;
+struct qm_fqd {
+ union {
+ u8 orpc;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1:2;
+ u8 orprws:3;
+ u8 oa:1;
+ u8 olws:2;
+#else
+ u8 olws:2;
+ u8 oa:1;
+ u8 orprws:3;
+ u8 __reserved1:2;
+#endif
+ } __packed;
+ };
+ u8 cgid;
+ u16 fq_ctrl; /* See QM_FQCTRL_<...> */
+ union {
+ u16 dest_wq;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 channel:13; /* qm_channel */
+ u16 wq:3;
+#else
+ u16 wq:3;
+ u16 channel:13; /* qm_channel */
+#endif
+ } __packed dest;
+ };
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved2:1;
+ u16 ics_cred:15;
+#else
+ u16 __reserved2:1;
+ u16 ics_cred:15;
+#endif
+ /* For "Initialize Frame Queue" commands, the write-enable mask
+ * determines whether 'td' or 'oac_init' is observed. For query
+ * commands, this field is always 'td', and 'oac_query' (below) reflects
+ * the Overhead ACcounting values. */
+ union {
+ struct qm_fqd_taildrop td;
+ struct qm_fqd_oac oac_init;
+ };
+ u32 context_b;
+ union {
+ /* Treat it as 64-bit opaque */
+ u64 opaque;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 hi;
+ u32 lo;
+#else
+ u32 lo;
+ u32 hi;
+#endif
+ };
+ /* Treat it as s/w portal stashing config */
+ /* See 1.5.6.7.1: "FQD Context_A field used for [...] */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ struct qm_fqd_stashing stashing;
+ /* 48-bit address of FQ context to
+ * stash, must be cacheline-aligned */
+ u16 context_hi;
+ u32 context_lo;
+#else
+ u32 context_lo;
+ u16 context_hi;
+ struct qm_fqd_stashing stashing;
+#endif
+ } __packed;
+ } context_a;
+ struct qm_fqd_oac oac_query;
+} __packed;
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+ return ((u64)fqd->context_a.context_hi << 32) |
+ (u64)fqd->context_a.context_lo;
+}
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+ return (dma_addr_t)qm_fqd_stashing_get64(fqd);
+}
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+ return ((u64)fqd->context_a.hi << 32) |
+ (u64)fqd->context_a.lo;
+}
+/* Macro, so we compile better when 'v' isn't necessarily 64-bit */
+#define qm_fqd_stashing_set64(fqd, v) \
+ do { \
+ struct qm_fqd *__fqd931 = (fqd); \
+ __fqd931->context_a.context_hi = upper_32_bits(v); \
+ __fqd931->context_a.context_lo = lower_32_bits(v); \
+ } while (0)
+#define qm_fqd_context_a_set64(fqd, v) \
+ do { \
+ struct qm_fqd *__fqd931 = (fqd); \
+ __fqd931->context_a.hi = upper_32_bits(v); \
+ __fqd931->context_a.lo = lower_32_bits(v); \
+ } while (0)
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+ if (val > 0xe0000000)
+ return -ERANGE;
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ td->exp = e;
+ td->mant = val;
+ return 0;
+}
+/* and the other direction */
+static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
+{
+ return (u32)td->mant << td->exp;
+}
+
+/* See 1.5.2.2: "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
+#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
+#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
+#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See 1.5.6.7.1: "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION 0x04
+#define QM_STASHING_EXCL_DATA 0x02
+#define QM_STASHING_EXCL_CTX 0x01
+
+/* See 1.5.5.3: "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
+
+/* See 1.5.8.4: "FQ State Change Notification" */
+/* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ * MaxTH = MA * (2 ^ Mn)
+ * Slope = SA / (2 ^ Sn)
+ * MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+ union {
+ u32 word;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 MA:8;
+ u32 Mn:5;
+ u32 SA:7; /* must be between 64-127 */
+ u32 Sn:6;
+ u32 Pn:6;
+#else
+ u32 Pn:6;
+ u32 Sn:6;
+ u32 SA:7; /* must be between 64-127 */
+ u32 Mn:5;
+ u32 MA:8;
+#endif
+ } __packed;
+ };
+} __packed;
+/* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ * CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+ union {
+ u16 hword;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved:3;
+ u16 TA:8;
+ u16 Tn:5;
+#else
+ u16 Tn:5;
+ u16 TA:8;
+ u16 __reserved:3;
+#endif
+ } __packed;
+ };
+} __packed;
+/* This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct. */
+struct __qm_mc_cgr {
+ struct qm_cgr_wr_parm wr_parm_g;
+ struct qm_cgr_wr_parm wr_parm_y;
+ struct qm_cgr_wr_parm wr_parm_r;
+ u8 wr_en_g; /* boolean, use QM_CGR_EN */
+ u8 wr_en_y; /* boolean, use QM_CGR_EN */
+ u8 wr_en_r; /* boolean, use QM_CGR_EN */
+ u8 cscn_en; /* boolean, use QM_CGR_EN */
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+#else
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+#endif
+ };
+ u32 cscn_targ; /* use QM_CGR_TARG_* */
+ };
+ u8 cstd_en; /* boolean, use QM_CGR_EN */
+ u8 cs; /* boolean, only used in query response */
+ union {
+ /* use qm_cgr_cs_thres_set64() */
+ struct qm_cgr_cs_thres cs_thres;
+ u16 __cs_thres;
+ };
+ u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+ return (u64)th->TA << th->Tn;
+}
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ th->Tn = e;
+ th->TA = val;
+ return 0;
+}
+
+/* See 1.5.8.5.1: "Initialize FQ" */
+/* See 1.5.8.5.2: "Query FQ" */
+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
+/* See 1.5.8.5.4: "Alter FQ State Commands " */
+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
+/* See 1.5.8.6.2: "CGR Test Write" */
+/* See 1.5.8.6.3: "Query CGR" */
+/* See 1.5.8.6.4: "Query Congestion Group State" */
+struct qm_mcc_initfq {
+ u8 __reserved1;
+ u16 we_mask; /* Write Enable Mask */
+ u32 fqid; /* 24-bit */
+ u16 count; /* Initialises 'count+1' FQDs */
+ struct qm_fqd fqd; /* the FQD fields go here */
+ u8 __reserved3[30];
+} __packed;
+struct qm_mcc_queryfq {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+struct qm_mcc_queryfq_np {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+struct qm_mcc_alterfq {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2;
+ u8 count; /* number of consecutive FQID */
+ u8 __reserved3[10];
+ u32 context_b; /* frame queue context b */
+ u8 __reserved4[40];
+} __packed;
+struct qm_mcc_initcgr {
+ u8 __reserved1;
+ u16 we_mask; /* Write Enable Mask */
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[2];
+ u8 cgid;
+ u8 __reserved4[32];
+} __packed;
+struct qm_mcc_cgrtestwrite {
+ u8 __reserved1[2];
+ u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved2[23];
+ u8 cgid;
+ u8 __reserved3[32];
+} __packed;
+struct qm_mcc_querycgr {
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
+} __packed;
+struct qm_mcc_querycongestion {
+ u8 __reserved[63];
+} __packed;
+struct qm_mcc_querywq {
+ u8 __reserved;
+ /* select channel if verb != QUERYWQ_DEDICATED */
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 id:13; /* qm_channel */
+ u16 __reserved1:3;
+#else
+ u16 __reserved1:3;
+ u16 id:13; /* qm_channel */
+#endif
+ } __packed channel;
+ };
+ u8 __reserved2[60];
+} __packed;
+
+struct qm_mcc_ceetm_lfqmt_config {
+ u8 __reserved1[4];
+ u32 lfqid:24;
+ u8 __reserved2[2];
+ u16 cqid;
+ u8 __reserved3[2];
+ u16 dctidx;
+ u8 __reserved4[48];
+} __packed;
+
+struct qm_mcc_ceetm_lfqmt_query {
+ u8 __reserved1[4];
+ u32 lfqid:24;
+ u8 __reserved2[56];
+} __packed;
+
+struct qm_mcc_ceetm_cq_config {
+ u8 __reserved1;
+ u16 cqid;
+ u8 dcpid;
+ u8 __reserved2;
+ u16 ccgid;
+ u8 __reserved3[56];
+} __packed;
+
+struct qm_mcc_ceetm_cq_query {
+ u8 __reserved1;
+ u16 cqid;
+ u8 dcpid;
+ u8 __reserved2[59];
+} __packed;
+
+struct qm_mcc_ceetm_dct_config {
+ u8 __reserved1;
+ u16 dctidx;
+ u8 dcpid;
+ u8 __reserved2[15];
+ u32 context_b;
+ u64 context_a;
+ u8 __reserved3[32];
+} __packed;
+
+struct qm_mcc_ceetm_dct_query {
+ u8 __reserved1;
+ u16 dctidx;
+ u8 dcpid;
+ u8 __reserved2[59];
+} __packed;
+
+struct qm_mcc_ceetm_class_scheduler_config {
+ u8 __reserved1;
+ u16 cqcid;
+ u8 dcpid;
+ u8 __reserved2[6];
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 gpc_reserved:1;
+ u8 gpc_combine_flag:1;
+ u8 gpc_prio_b:3;
+ u8 gpc_prio_a:3;
+#else
+ u8 gpc_prio_a:3;
+ u8 gpc_prio_b:3;
+ u8 gpc_combine_flag:1;
+ u8 gpc_reserved:1;
+#endif
+ u16 crem;
+ u16 erem;
+ u8 w[8];
+ u8 __reserved3[40];
+} __packed;
+
+struct qm_mcc_ceetm_class_scheduler_query {
+ u8 __reserved1;
+ u16 cqcid;
+ u8 dcpid;
+ u8 __reserved2[59];
+} __packed;
+
+#define CEETM_COMMAND_CHANNEL_MAPPING (0 << 12)
+#define CEETM_COMMAND_SP_MAPPING (1 << 12)
+#define CEETM_COMMAND_CHANNEL_SHAPER (2 << 12)
+#define CEETM_COMMAND_LNI_SHAPER (3 << 12)
+#define CEETM_COMMAND_TCFC (4 << 12)
+
+#define CEETM_CCGRID_MASK 0x01FF
+#define CEETM_CCGR_CM_CONFIGURE (0 << 14)
+#define CEETM_CCGR_DN_CONFIGURE (1 << 14)
+#define CEETM_CCGR_TEST_WRITE (2 << 14)
+#define CEETM_CCGR_CM_QUERY (0 << 14)
+#define CEETM_CCGR_DN_QUERY (1 << 14)
+#define CEETM_CCGR_DN_QUERY_FLUSH (2 << 14)
+#define CEETM_QUERY_CONGESTION_STATE (3 << 14)
+
+struct qm_mcc_ceetm_mapping_shaper_tcfc_config {
+ u8 __reserved1;
+ u16 cid;
+ u8 dcpid;
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 map_shaped:1;
+ u8 map_reserved:4;
+ u8 map_lni_id:3;
+#else
+ u8 map_lni_id:3;
+ u8 map_reserved:4;
+ u8 map_shaped:1;
+#endif
+ u8 __reserved2[58];
+ } __packed channel_mapping;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 map_reserved:5;
+ u8 map_lni_id:3;
+#else
+ u8 map_lni_id:3;
+ u8 map_reserved:5;
+#endif
+ u8 __reserved2[58];
+ } __packed sp_mapping;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 cpl:1;
+ u8 cpl_reserved:2;
+ u8 oal:5;
+#else
+ u8 oal:5;
+ u8 cpl_reserved:2;
+ u8 cpl:1;
+#endif
+ u32 crtcr:24;
+ u32 ertcr:24;
+ u16 crtbl;
+ u16 ertbl;
+ u8 mps; /* This will be hardcoded by driver with 60 */
+ u8 __reserved2[47];
+ } __packed shaper_config;
+ struct {
+ u8 __reserved2[11];
+ u64 lnitcfcc;
+ u8 __reserved3[40];
+ } __packed tcfc_config;
+ };
+} __packed;
+
+struct qm_mcc_ceetm_mapping_shaper_tcfc_query {
+ u8 __reserved1;
+ u16 cid;
+ u8 dcpid;
+ u8 __reserved2[59];
+} __packed;
+
+struct qm_mcc_ceetm_ccgr_config {
+ u8 __reserved1;
+ u16 ccgrid;
+ u8 dcpid;
+ u8 __reserved2;
+ u16 we_mask;
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 ctl_reserved:1;
+ u8 ctl_wr_en_g:1;
+ u8 ctl_wr_en_y:1;
+ u8 ctl_wr_en_r:1;
+ u8 ctl_td_en:1;
+ u8 ctl_td_mode:1;
+ u8 ctl_cscn_en:1;
+ u8 ctl_mode:1;
+#else
+ u8 ctl_mode:1;
+ u8 ctl_cscn_en:1;
+ u8 ctl_td_mode:1;
+ u8 ctl_td_en:1;
+ u8 ctl_wr_en_r:1;
+ u8 ctl_wr_en_y:1;
+ u8 ctl_wr_en_g:1;
+ u8 ctl_reserved:1;
+#endif
+ u8 cdv;
+ u16 cscn_tupd;
+ u8 oal;
+ u8 __reserved3;
+ struct qm_cgr_cs_thres cs_thres;
+ struct qm_cgr_cs_thres cs_thres_x;
+ struct qm_cgr_cs_thres td_thres;
+ struct qm_cgr_wr_parm wr_parm_g;
+ struct qm_cgr_wr_parm wr_parm_y;
+ struct qm_cgr_wr_parm wr_parm_r;
+ } __packed cm_config;
+ struct {
+ u8 dnc;
+ u8 dn0;
+ u8 dn1;
+ u64 dnba:40;
+ u8 __reserved3[2];
+ u16 dnth_0;
+ u8 __reserved4[2];
+ u16 dnth_1;
+ u8 __reserved5[8];
+ } __packed dn_config;
+ struct {
+ u8 __reserved3[3];
+ u64 i_cnt:40;
+ u8 __reserved4[16];
+ } __packed test_write;
+ };
+ u8 __reserved5[32];
+} __packed;
+
+struct qm_mcc_ceetm_ccgr_query {
+ u8 __reserved1;
+ u16 ccgrid;
+ u8 dcpid;
+ u8 __reserved2[59];
+} __packed;
+
+struct qm_mcc_ceetm_cq_peek_pop_xsfdrread {
+ u8 __reserved1;
+ u16 cqid;
+ u8 dcpid;
+ u8 ct;
+ u16 xsfdr;
+ u8 __reserved2[56];
+} __packed;
+
+#define CEETM_QUERY_DEQUEUE_STATISTICS 0x00
+#define CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS 0x01
+#define CEETM_WRITE_DEQUEUE_STATISTICS 0x02
+#define CEETM_QUERY_REJECT_STATISTICS 0x03
+#define CEETM_QUERY_REJECT_CLEAR_STATISTICS 0x04
+#define CEETM_WRITE_REJECT_STATISTICS 0x05
+struct qm_mcc_ceetm_statistics_query_write {
+ u8 __reserved1;
+ u16 cid;
+ u8 dcpid;
+ u8 ct;
+ u8 __reserved2[13];
+ u64 frm_cnt:40;
+ u8 __reserved3[2];
+ u64 byte_cnt:48;
+ u8 __reserved[32];
+} __packed;
+
+struct qm_mc_command {
+ u8 __dont_write_directly__verb;
+ union {
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_queryfq queryfq;
+ struct qm_mcc_queryfq_np queryfq_np;
+ struct qm_mcc_alterfq alterfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_cgrtestwrite cgrtestwrite;
+ struct qm_mcc_querycgr querycgr;
+ struct qm_mcc_querycongestion querycongestion;
+ struct qm_mcc_querywq querywq;
+ struct qm_mcc_ceetm_lfqmt_config lfqmt_config;
+ struct qm_mcc_ceetm_lfqmt_query lfqmt_query;
+ struct qm_mcc_ceetm_cq_config cq_config;
+ struct qm_mcc_ceetm_cq_query cq_query;
+ struct qm_mcc_ceetm_dct_config dct_config;
+ struct qm_mcc_ceetm_dct_query dct_query;
+ struct qm_mcc_ceetm_class_scheduler_config csch_config;
+ struct qm_mcc_ceetm_class_scheduler_query csch_query;
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config mst_config;
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query mst_query;
+ struct qm_mcc_ceetm_ccgr_config ccgr_config;
+ struct qm_mcc_ceetm_ccgr_query ccgr_query;
+ struct qm_mcc_ceetm_cq_peek_pop_xsfdrread cq_ppxr;
+ struct qm_mcc_ceetm_statistics_query_write stats_query_write;
+ };
+} __packed;
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC 0x0100
+#define QM_INITFQ_WE_ORPC 0x0080
+#define QM_INITFQ_WE_CGID 0x0040
+#define QM_INITFQ_WE_FQCTRL 0x0020
+#define QM_INITFQ_WE_DESTWQ 0x0010
+#define QM_INITFQ_WE_ICSCRED 0x0008
+#define QM_INITFQ_WE_TDTHRESH 0x0004
+#define QM_INITFQ_WE_CONTEXTB 0x0002
+#define QM_INITFQ_WE_CONTEXTA 0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G 0x0400
+#define QM_CGR_WE_WR_PARM_Y 0x0200
+#define QM_CGR_WE_WR_PARM_R 0x0100
+#define QM_CGR_WE_WR_EN_G 0x0080
+#define QM_CGR_WE_WR_EN_Y 0x0040
+#define QM_CGR_WE_WR_EN_R 0x0020
+#define QM_CGR_WE_CSCN_EN 0x0010
+#define QM_CGR_WE_CSCN_TARG 0x0008
+#define QM_CGR_WE_CSTD_EN 0x0004
+#define QM_CGR_WE_CS_THRES 0x0002
+#define QM_CGR_WE_MODE 0x0001
+
+/* See 1.5.9.7 CEETM Management Commands */
+#define QM_CEETM_VERB_LFQMT_CONFIG 0x70
+#define QM_CEETM_VERB_LFQMT_QUERY 0x71
+#define QM_CEETM_VERB_CQ_CONFIG 0x72
+#define QM_CEETM_VERB_CQ_QUERY 0x73
+#define QM_CEETM_VERB_DCT_CONFIG 0x74
+#define QM_CEETM_VERB_DCT_QUERY 0x75
+#define QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG 0x76
+#define QM_CEETM_VERB_CLASS_SCHEDULER_QUERY 0x77
+#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG 0x78
+#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY 0x79
+#define QM_CEETM_VERB_CCGR_CONFIG 0x7A
+#define QM_CEETM_VERB_CCGR_QUERY 0x7B
+#define QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD 0x7C
+#define QM_CEETM_VERB_STATISTICS_QUERY_WRITE 0x7D
+
+/* See 1.5.8.5.1: "Initialize FQ" */
+/* See 1.5.8.5.2: "Query FQ" */
+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
+/* See 1.5.8.5.4: "Alter FQ State Commands " */
+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
+/* See 1.5.8.6.2: "CGR Test Write" */
+/* See 1.5.8.6.3: "Query CGR" */
+/* See 1.5.8.6.4: "Query Congestion Group State" */
+struct qm_mcr_initfq {
+ u8 __reserved1[62];
+} __packed;
+struct qm_mcr_queryfq {
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+struct qm_mcr_queryfq_np {
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved2;
+ u32 fqd_link:24;
+ u16 __reserved3:2;
+ u16 odp_seq:14;
+ u16 __reserved4:2;
+ u16 orp_nesn:14;
+ u16 __reserved5:1;
+ u16 orp_ea_hseq:15;
+ u16 __reserved6:1;
+ u16 orp_ea_tseq:15;
+ u8 __reserved7;
+ u32 orp_ea_hptr:24;
+ u8 __reserved8;
+ u32 orp_ea_tptr:24;
+ u8 __reserved9;
+ u32 pfdr_hptr:24;
+ u8 __reserved10;
+ u32 pfdr_tptr:24;
+ u8 __reserved11[5];
+ u8 __reserved12:7;
+ u8 is:1;
+ u16 ics_surp;
+ u32 byte_cnt;
+ u8 __reserved13;
+ u32 frm_cnt:24;
+ u32 __reserved14;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved15;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+#else
+ u8 __reserved2;
+ u32 fqd_link:24;
+
+ u16 odp_seq:14;
+ u16 __reserved3:2;
+
+ u16 orp_nesn:14;
+ u16 __reserved4:2;
+
+ u16 orp_ea_hseq:15;
+ u16 __reserved5:1;
+
+ u16 orp_ea_tseq:15;
+ u16 __reserved6:1;
+
+ u8 __reserved7;
+ u32 orp_ea_hptr:24;
+
+ u8 __reserved8;
+ u32 orp_ea_tptr:24;
+
+ u8 __reserved9;
+ u32 pfdr_hptr:24;
+
+ u8 __reserved10;
+ u32 pfdr_tptr:24;
+
+ u8 __reserved11[5];
+ u8 is:1;
+ u8 __reserved12:7;
+ u16 ics_surp;
+ u32 byte_cnt;
+ u8 __reserved13;
+ u32 frm_cnt:24;
+ u32 __reserved14;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved15;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+#endif
+} __packed;
+
+
+struct qm_mcr_alterfq {
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+} __packed;
+struct qm_mcr_initcgr {
+ u8 __reserved1[62];
+} __packed;
+struct qm_mcr_cgrtestwrite {
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[3];
+ u32 __reserved3:24;
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 __reserved4:24;
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u16 lgt; /* Last Group Tick */
+ u16 wr_prob_g;
+ u16 wr_prob_y;
+ u16 wr_prob_r;
+ u8 __reserved5[8];
+} __packed;
+struct qm_mcr_querycgr {
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[3];
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved3:24;
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+#else
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 __reserved3:24;
+#endif
+ };
+ u64 i_bcnt;
+ };
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved4:24;
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+#else
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 __reserved4:24;
+#endif
+ };
+ u64 a_bcnt;
+ };
+ union {
+ u32 cscn_targ_swp[4];
+ u8 __reserved5[16];
+ };
+} __packed;
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+ return be64_to_cpu(q->i_bcnt);
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+ return be64_to_cpu(q->a_bcnt);
+}
+static inline u64 qm_mcr_cgrtestwrite_i_get64(
+ const struct qm_mcr_cgrtestwrite *q)
+{
+ return be64_to_cpu(((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo);
+}
+static inline u64 qm_mcr_cgrtestwrite_a_get64(
+ const struct qm_mcr_cgrtestwrite *q)
+{
+ return be64_to_cpu(((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo);
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_mcr_querycgr_i_set64(q, v) \
+ do { \
+ struct qm_mcr_querycgr *__q931 = (fd); \
+ __q931->i_bcnt_hi = upper_32_bits(v); \
+ __q931->i_bcnt_lo = lower_32_bits(v); \
+ } while (0)
+#define qm_mcr_querycgr_a_set64(q, v) \
+ do { \
+ struct qm_mcr_querycgr *__q931 = (fd); \
+ __q931->a_bcnt_hi = upper_32_bits(v); \
+ __q931->a_bcnt_lo = lower_32_bits(v); \
+ } while (0)
+struct __qm_mcr_querycongestion {
+ u32 __state[8];
+};
+struct qm_mcr_querycongestion {
+ u8 __reserved[30];
+ /* Access this struct using QM_MCR_QUERYCONGESTION() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+struct qm_mcr_querywq {
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 id:13; /* qm_channel */
+ u16 __reserved:3;
+#else
+ u16 __reserved:3;
+ u16 id:13; /* qm_channel */
+#endif
+ } __packed channel;
+ };
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+/* QMAN CEETM Management Command Response */
+struct qm_mcr_ceetm_lfqmt_config {
+ u8 __reserved1[62];
+} __packed;
+struct qm_mcr_ceetm_lfqmt_query {
+ u8 __reserved1[8];
+ u16 cqid;
+ u8 __reserved2[2];
+ u16 dctidx;
+ u8 __reserved3[2];
+ u16 ccgid;
+ u8 __reserved4[44];
+} __packed;
+
+struct qm_mcr_ceetm_cq_config {
+ u8 __reserved1[62];
+} __packed;
+
+struct qm_mcr_ceetm_cq_query {
+ u8 __reserved1[4];
+ u16 ccgid;
+ u16 state;
+ u32 pfdr_hptr:24;
+ u32 pfdr_tptr:24;
+ u16 od1_xsfdr;
+ u16 od2_xsfdr;
+ u16 od3_xsfdr;
+ u16 od4_xsfdr;
+ u16 od5_xsfdr;
+ u16 od6_xsfdr;
+ u16 ra1_xsfdr;
+ u16 ra2_xsfdr;
+ u8 __reserved2;
+ u32 frm_cnt:24;
+ u8 __reserved333[28];
+} __packed;
+
+struct qm_mcr_ceetm_dct_config {
+ u8 __reserved1[62];
+} __packed;
+
+struct qm_mcr_ceetm_dct_query {
+ u8 __reserved1[18];
+ u32 context_b;
+ u64 context_a;
+ u8 __reserved2[32];
+} __packed;
+
+struct qm_mcr_ceetm_class_scheduler_config {
+ u8 __reserved1[62];
+} __packed;
+
+struct qm_mcr_ceetm_class_scheduler_query {
+ u8 __reserved1[9];
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 gpc_reserved:1;
+ u8 gpc_combine_flag:1;
+ u8 gpc_prio_b:3;
+ u8 gpc_prio_a:3;
+#else
+ u8 gpc_prio_a:3;
+ u8 gpc_prio_b:3;
+ u8 gpc_combine_flag:1;
+ u8 gpc_reserved:1;
+#endif
+ u16 crem;
+ u16 erem;
+ u8 w[8];
+ u8 __reserved2[5];
+ u32 wbfslist:24;
+ u32 d8;
+ u32 d9;
+ u32 d10;
+ u32 d11;
+ u32 d12;
+ u32 d13;
+ u32 d14;
+ u32 d15;
+} __packed;
+
+struct qm_mcr_ceetm_mapping_shaper_tcfc_config {
+ u16 cid;
+ u8 __reserved2[60];
+} __packed;
+
+struct qm_mcr_ceetm_mapping_shaper_tcfc_query {
+ u16 cid;
+ u8 __reserved1;
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 map_shaped:1;
+ u8 map_reserved:4;
+ u8 map_lni_id:3;
+#else
+ u8 map_lni_id:3;
+ u8 map_reserved:4;
+ u8 map_shaped:1;
+#endif
+ u8 __reserved2[58];
+ } __packed channel_mapping_query;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 map_reserved:5;
+ u8 map_lni_id:3;
+#else
+ u8 map_lni_id:3;
+ u8 map_reserved:5;
+#endif
+ u8 __reserved2[58];
+ } __packed sp_mapping_query;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 cpl:1;
+ u8 cpl_reserved:2;
+ u8 oal:5;
+#else
+ u8 oal:5;
+ u8 cpl_reserved:2;
+ u8 cpl:1;
+#endif
+ u32 crtcr:24;
+ u32 ertcr:24;
+ u16 crtbl;
+ u16 ertbl;
+ u8 mps;
+ u8 __reserved2[15];
+ u32 crat;
+ u32 erat;
+ u8 __reserved3[24];
+ } __packed shaper_query;
+ struct {
+ u8 __reserved1[11];
+ u64 lnitcfcc;
+ u8 __reserved3[40];
+ } __packed tcfc_query;
+ };
+} __packed;
+
+struct qm_mcr_ceetm_ccgr_config {
+ u8 __reserved1[46];
+ union {
+ u8 __reserved2[8];
+ struct {
+ u16 timestamp;
+ u16 wr_porb_g;
+ u16 wr_prob_y;
+ u16 wr_prob_r;
+ } __packed test_write;
+ };
+ u8 __reserved3[8];
+} __packed;
+
+struct qm_mcr_ceetm_ccgr_query {
+ u8 __reserved1[6];
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 ctl_reserved:1;
+ u8 ctl_wr_en_g:1;
+ u8 ctl_wr_en_y:1;
+ u8 ctl_wr_en_r:1;
+ u8 ctl_td_en:1;
+ u8 ctl_td_mode:1;
+ u8 ctl_cscn_en:1;
+ u8 ctl_mode:1;
+#else
+ u8 ctl_mode:1;
+ u8 ctl_cscn_en:1;
+ u8 ctl_td_mode:1;
+ u8 ctl_td_en:1;
+ u8 ctl_wr_en_r:1;
+ u8 ctl_wr_en_y:1;
+ u8 ctl_wr_en_g:1;
+ u8 ctl_reserved:1;
+#endif
+ u8 cdv;
+ u8 __reserved2[2];
+ u8 oal;
+ u8 __reserved3;
+ struct qm_cgr_cs_thres cs_thres;
+ struct qm_cgr_cs_thres cs_thres_x;
+ struct qm_cgr_cs_thres td_thres;
+ struct qm_cgr_wr_parm wr_parm_g;
+ struct qm_cgr_wr_parm wr_parm_y;
+ struct qm_cgr_wr_parm wr_parm_r;
+ u16 cscn_targ_dcp;
+ u8 dcp_lsn;
+ u64 i_cnt:40;
+ u8 __reserved4[3];
+ u64 a_cnt:40;
+ u32 cscn_targ_swp[4];
+ } __packed cm_query;
+ struct {
+ u8 dnc;
+ u8 dn0;
+ u8 dn1;
+ u64 dnba:40;
+ u8 __reserved2[2];
+ u16 dnth_0;
+ u8 __reserved3[2];
+ u16 dnth_1;
+ u8 __reserved4[10];
+ u16 dnacc_0;
+ u8 __reserved5[2];
+ u16 dnacc_1;
+ u8 __reserved6[24];
+ } __packed dn_query;
+ struct {
+ u8 __reserved2[24];
+ struct __qm_mcr_querycongestion state;
+ } __packed congestion_state;
+
+ };
+} __packed;
+
+struct qm_mcr_ceetm_cq_peek_pop_xsfdrread {
+ u8 stat;
+ u8 __reserved1[11];
+ u16 dctidx;
+ struct qm_fd fd;
+ u8 __reserved2[32];
+} __packed;
+
+struct qm_mcr_ceetm_statistics_query {
+ u8 __reserved1[17];
+ u64 frm_cnt:40;
+ u8 __reserved2[2];
+ u64 byte_cnt:48;
+ u8 __reserved3[32];
+} __packed;
+
+struct qm_mc_result {
+ u8 verb;
+ u8 result;
+ union {
+ struct qm_mcr_initfq initfq;
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_queryfq_np queryfq_np;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_initcgr initcgr;
+ struct qm_mcr_cgrtestwrite cgrtestwrite;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ struct qm_mcr_ceetm_lfqmt_config lfqmt_config;
+ struct qm_mcr_ceetm_lfqmt_query lfqmt_query;
+ struct qm_mcr_ceetm_cq_config cq_config;
+ struct qm_mcr_ceetm_cq_query cq_query;
+ struct qm_mcr_ceetm_dct_config dct_config;
+ struct qm_mcr_ceetm_dct_query dct_query;
+ struct qm_mcr_ceetm_class_scheduler_config csch_config;
+ struct qm_mcr_ceetm_class_scheduler_query csch_query;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_config mst_config;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query mst_query;
+ struct qm_mcr_ceetm_ccgr_config ccgr_config;
+ struct qm_mcr_ceetm_ccgr_query ccgr_query;
+ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread cq_ppxr;
+ struct qm_mcr_ceetm_statistics_query stats_query;
+ };
+} __packed;
+
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+/* This extracts the state for congestion group 'n' from a query response.
+ * Eg.
+ * u8 cgr = [...];
+ * struct qm_mc_result *res = [...];
+ * printf("congestion group %d congestion state: %d\n", cgr,
+ * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
+ */
+#define __CGR_WORD(num) (num >> 5)
+#define __CGR_SHIFT(num) (num & 0x1f)
+#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
+ u8 cgr)
+{
+ return p->__state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
+}
+
+
+/*********************/
+/* Utility interface */
+/*********************/
+
+/* Represents an allocator over a range of FQIDs. NB, accesses are not locked,
+ * spinlock them yourself if needed. */
+struct qman_fqid_pool;
+
+/* Create/destroy a FQID pool, num must be a multiple of 32. NB, _destroy()
+ * always succeeds, but returns non-zero if there were "leaked" FQID
+ * allocations. */
+struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num);
+int qman_fqid_pool_destroy(struct qman_fqid_pool *pool);
+/* Alloc/free a FQID from the range. _alloc() returns zero for success. */
+int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid);
+void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid);
+u32 qman_fqid_pool_used(struct qman_fqid_pool *pool);
+
+/*******************************************************************/
+/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
+/*******************************************************************/
+
+ /* Portal and Frame Queues */
+ /* ----------------------- */
+/* Represents a managed portal */
+struct qman_portal;
+
+/* This object type represents Qman frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down. */
+struct qman_fq;
+
+/* This object type represents a Qman congestion group, it is defined further
+ * down. */
+struct qman_cgr;
+
+struct qman_portal_config {
+ /* If the caller enables DQRR stashing (and thus wishes to operate the
+ * portal from only one cpu), this is the logical CPU that the portal
+ * will stash to. Whether stashing is enabled or not, this setting is
+ * also used for any "core-affine" portals, ie. default portals
+ * associated to the corresponding cpu. -1 implies that there is no core
+ * affinity configured. */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /* the unique index of this portal */
+ u32 index;
+ /* Is this portal shared? (If so, it has coarser locking and demuxes
+ * processing on behalf of other CPUs.) */
+ int is_shared;
+ /* The portal's dedicated channel id, use this value for initialising
+ * frame queues to target this portal when scheduled. */
+ u16 channel;
+ /* A mask of which pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) */
+ u32 pools;
+};
+
+/* This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because contextB is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume. */
+enum qman_cb_dqrr_result {
+ /* DQRR entry can be consumed */
+ qman_cb_dqrr_consume,
+ /* Like _consume, but requests parking - FQ must be held-active */
+ qman_cb_dqrr_park,
+ /* Does not consume, for DCA mode only. This allows out-of-order
+ * consumes by explicit calls to qman_dca() and/or the use of implicit
+ * DCA via EQCR entries. */
+ qman_cb_dqrr_defer,
+ /* Stop processing without consuming this ring entry. Exits the current
+ * qman_poll_dqrr() or interrupt-handling, as appropriate. If within an
+ * interrupt handler, the callback would typically call
+ * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+ * otherwise the interrupt will reassert immediately. */
+ qman_cb_dqrr_stop,
+ /* Like qman_cb_dqrr_stop, but consumes the current entry. */
+ qman_cb_dqrr_consume_stop
+};
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr);
+
+/* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns. */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+ const struct qm_mr_entry *msg);
+
+/* This callback type is used when handling DCP ERNs */
+typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
+ const struct qm_mr_entry *msg);
+
+/* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked". */
+enum qman_fq_state {
+ qman_fq_state_oos,
+ qman_fq_state_parked,
+ qman_fq_state_sched,
+ qman_fq_state_retired
+};
+
+/* Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ * // myfq is allocated and driver_fq callbacks filled in;
+ * struct my_fq {
+ * struct qman_fq base;
+ * int an_extra_field;
+ * [ ... add other fields to be associated with each FQ ...]
+ * } *myfq = some_my_fq_allocator();
+ * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ * // in a dequeue callback, access extra fields from 'fq' via a cast;
+ * struct my_fq *myfq = (struct my_fq *)fq;
+ * do_something_with(myfq->an_extra_field);
+ * [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ * many cachelines are required to stash 'struct my_fq', to accelerate not
+ * only the Qman driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+ qman_cb_dqrr dqrr; /* for dequeued frames */
+ qman_cb_mr ern; /* for s/w ERNs */
+ qman_cb_mr fqs; /* frame-queue state changes*/
+};
+
+struct qman_fq {
+ /* Caller of qman_create_fq() provides these demux callbacks */
+ struct qman_fq_cb cb;
+ /* These are internal to the driver, don't touch. In particular, they
+ * may change, be removed, or extended (so you shouldn't rely on
+ * sizeof(qman_fq) being a constant). */
+ spinlock_t fqlock;
+ u32 fqid;
+ volatile unsigned long flags;
+ enum qman_fq_state state;
+ int cgr_groupid;
+ struct rb_node node;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ u32 key;
+#endif
+};
+
+/* This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+ struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+ /* Set these prior to qman_create_cgr() */
+ u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+ qman_cb_cgr cb;
+ /* These are private to the driver */
+ u16 chan; /* portal channel this object is created on */
+ struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
+#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_destroy_fq() */
+#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
+
+/* Flags from qman_fq_state() */
+#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
+
+/* Flags to qman_volatile_dequeue() */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
+#endif
+
+/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
+ * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
+ * any change here should be audited in PME.) */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+#define QMAN_ENQUEUE_FLAG_WAIT 0x00010000 /* wait if EQCR is full */
+#define QMAN_ENQUEUE_FLAG_WAIT_INT 0x00020000 /* if wait, interruptible? */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+#define QMAN_ENQUEUE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
+#endif
+#endif
+#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
+#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
+#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
+#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
+ (((u32)(p) << 2) & 0x00000f00)
+#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
+#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
+#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
+#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
+/* For the ORP-specific qman_enqueue_orp() variant;
+ * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
+ * of a frame. */
+#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
+/* - this flag performs no enqueue but fills in an ORP sequence number that
+ * would otherwise block it (eg. if a frame has been dropped). */
+#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
+/* - this flag performs no enqueue but advances NESN to the given sequence
+ * number. */
+#define QMAN_ENQUEUE_FLAG_NESN 0x04000000
+
+/* Flags to qman_modify_cgr() */
+#define QMAN_CGR_FLAG_USE_INIT 0x00000001
+#define QMAN_CGR_MODE_FRAME 0x00000001
+
+ /* Portal Management */
+ /* ----------------- */
+/**
+ * qman_get_portal_config - get portal configuration settings
+ *
+ * This returns a read-only view of the current cpu's affine portal settings.
+ */
+const struct qman_portal_config *qman_get_portal_config(void);
+
+/**
+ * qman_irqsource_get - return the portal work that is interrupt-driven
+ *
+ * Returns a bitmask of QM_PIRQ_**I processing sources that are currently
+ * enabled for interrupt handling on the current cpu's affine portal. These
+ * sources will trigger the portal interrupt and the interrupt handler (or a
+ * tasklet/bottom-half it defers to) will perform the corresponding processing
+ * work. The qman_poll_***() functions will only process sources that are not in
+ * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
+ * this always returns zero.
+ */
+u32 qman_irqsource_get(void);
+
+/**
+ * qman_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions). Returns zero for success, or
+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_add(u32 bits);
+
+/**
+ * qman_irqsource_remove - remove processing sources from being interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions. Returns zero for success,
+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_remove(u32 bits);
+
+/**
+ * qman_affine_cpus - return a mask of cpus that have affine portals
+ */
+const cpumask_t *qman_affine_cpus(void);
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the mask returned from qman_affine_cpus().
+ */
+u16 qman_affine_channel(int cpu);
+
+/**
+ * qman_get_affine_portal - return the portal pointer affine to cpu
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ */
+void *qman_get_affine_portal(int cpu);
+
+/**
+ * qman_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * Ie. the value returned by qman_irqsource_get() should not include
+ * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
+ * this function will return -EINVAL, otherwise the return value is >=0 and
+ * represents the number of DQRR entries processed.
+ */
+int qman_poll_dqrr(unsigned int limit);
+
+/**
+ * qman_poll_slow - process anything (except DQRR) that isn't interrupt-driven.
+ *
+ * This function does any portal processing that isn't interrupt-driven. If the
+ * current CPU is sharing a portal hosted on another CPU, this function will
+ * return (u32)-1, otherwise the return value is a bitmask of QM_PIRQ_* sources
+ * indicating what interrupt sources were actually processed by the call.
+ */
+u32 qman_poll_slow(void);
+
+/**
+ * qman_poll - legacy wrapper for qman_poll_dqrr() and qman_poll_slow()
+ *
+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
+ * affine portal. There are two classes of portal processing in question;
+ * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
+ * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
+ * thresholds, congestion state changes, etc). This function does whatever
+ * processing is not triggered by interrupts.
+ *
+ * Note, if DQRR and some slow-path processing are poll-driven (rather than
+ * interrupt-driven) then this function uses a heuristic to determine how often
+ * to run slow-path processing - as slow-path processing introduces at least a
+ * minimum latency each time it is run, whereas fast-path (DQRR) processing is
+ * close to zero-cost if there is no work to be done. Applications can tune this
+ * behaviour themselves by using qman_poll_dqrr() and qman_poll_slow() directly
+ * rather than going via this wrapper.
+ */
+void qman_poll(void);
+
+/**
+ * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
+ *
+ * Disables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_stop_dequeues(void);
+
+/**
+ * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
+ *
+ * Enables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_start_dequeues(void);
+
+/**
+ * qman_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+void qman_static_dequeue_add(u32 pools);
+
+/**
+ * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Removes a set of pool channels from the portal's static dequeue command
+ * register (SDQCR). The requested pools are limited to those the portal has
+ * dequeue access to.
+ */
+void qman_static_dequeue_del(u32 pools);
+
+/**
+ * qman_static_dequeue_get - return the portal's current SDQCR
+ *
+ * Returns the portal's current static dequeue command register (SDQCR). The
+ * entire register is returned, so if only the currently-enabled pool channels
+ * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
+ */
+u32 qman_static_dequeue_get(void);
+
+/**
+ * qman_dca - Perform a Discrete Consumption Acknowledgement
+ * @dq: the DQRR entry to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+void qman_dca(struct qm_dqrr_entry *dq, int park_request);
+
+/**
+ * qman_eqcr_is_empty - Determine if portal's EQCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * enqueues for the local portal have been processed by Qman but can't use the
+ * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
+ * The function forces tracking of EQCR consumption (which normally doesn't
+ * happen until enqueue processing needs to find space to put new enqueue
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int qman_eqcr_is_empty(void);
+
+/**
+ * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
+ * @handler: callback for processing DCP ERNs
+ * @affine: whether this handler is specific to the locally affine portal
+ *
+ * If a hardware block's interface to Qman (ie. its direct-connect portal, or
+ * DCP) is configured not to receive enqueue rejections, then any enqueues
+ * through that DCP that are rejected will be sent to a given software portal.
+ * If @affine is non-zero, then this handler will only be used for DCP ERNs
+ * received on the portal affine to the current CPU. If multiple CPUs share a
+ * portal and they all call this function, they will be setting the handler for
+ * the same portal! If @affine is zero, then this handler will be global to all
+ * portals handled by this instance of the driver. Only those portals that do
+ * not have their own affine handler will use the global handler.
+ */
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
+
+ /* FQ management */
+ /* ------------- */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller. If the AS_IS flag is specified, management commands
+ * will be used on portal @p to query state for frame queue @fqid and construct
+ * a frame queue object based on that, rather than assuming/requiring that it be
+ * Out of Service.
+ */
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ * @flags: bit-mask of QMAN_FQ_FREE_*** options
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
+ * is specified, in which case it may also be in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq, u32 flags);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_fq_state - Queries the state of a FQ object
+ * @fq: the frame queue object to query
+ * @state: pointer to state enum to return the FQ scheduling state
+ * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
+ *
+ * Queries the state of the FQ object, without performing any h/w commands.
+ * This captures the state, as seen by the driver, at the time the function
+ * executes.
+ */
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ * initialised to a value used by the driver for demux.
+ * - if context_b is initialised for demux, so is context_a in case stashing
+ * is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_flow_control - Set the XON/XOFF state of a FQ
+ * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
+ * or 'retired' or 'parked' state
+ * @xon: boolean to set fq in XON or XOFF state
+ *
+ * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
+ * otherwise the IFSI interrupt will be asserted.
+ */
+int qman_fq_flow_control(struct qman_fq *fq, int xon);
+
+/**
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/**
+ * qman_query_fq_np - Queries non-programmable FQD fields
+ * @fq: the frame queue object to be queried
+ * @np: storage for the queried FQD fields
+ */
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
+
+/**
+ * qman_query_wq - Queries work queue lengths
+ * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
+ * to this software portal. Otherwise, query length of WQs in a
+ * channel specified in wq.
+ * @wq: storage for the queried WQs lengths. Also specified the channel to
+ * to query if query_dedicated is zero.
+ */
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
+
+/**
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
+ * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
+ * "flags" retrieved from qman_fq_state().
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full
+ * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
+ * specified), etc. If the ring is full and FLAG_WAIT is specified, this
+ * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
+ * interrupt will assert when Qman consumes the EQCR entry (subject to "status
+ * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
+ * perform an implied "discrete consumption acknowledgement" on the dequeue
+ * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
+ * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
+ * this implicit DCA can delay the release of a "held active" frame queue
+ * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
+ * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
+ * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
+ * acknowledgement should "park request" the "held active" frame queue. Ie.
+ * when the portal eventually releases that frame queue, it will be left in the
+ * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
+ * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
+ * is requested, and the FQ is a member of a congestion group, then this
+ * function returns -EAGAIN if the congestion group is currently congested.
+ * Note, this does not eliminate ERNs, as the async interface means we can be
+ * sending enqueue commands to an un-congested FQ that becomes congested before
+ * the enqueue commands are processed, but it does minimise needless thrashing
+ * of an already busy hardware resource by throttling many of the to-be-dropped
+ * enqueues "at the source".
+ */
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
+
+typedef int (*qman_cb_precommit) (void *arg);
+/**
+ * qman_enqueue_precommit - Enqueue a frame to a frame queue and call cb
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ * @cb: user supplied callback function to invoke before writing commit verb.
+ * @cb_arg: callback function argument
+ *
+ * This is similar to qman_enqueue except that it will invoke a user supplied
+ * callback function just before writng the commit verb. This is useful
+ * when the user want to do something *just before* enqueuing the request and
+ * the enqueue can't fail.
+ */
+int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
+ u32 flags, qman_cb_precommit cb, void *cb_arg);
+
+/**
+ * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ * @orp: the frame queue object used as an order restoration point.
+ * @orp_seqnum: the sequence number of this frame in the order restoration path
+ *
+ * Similar to qman_enqueue(), but with the addition of an Order Restoration
+ * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
+ * enqueue operation to employ order restoration. Each frame queue object acts
+ * as an Order Definition Point (ODP) by providing each frame dequeued from it
+ * with an incrementing sequence number, this value is generally ignored unless
+ * that sequence of dequeued frames will need order restoration later. Each
+ * frame queue object also encapsulates an Order Restoration Point (ORP), which
+ * is a re-assembly context for re-ordering frames relative to their sequence
+ * numbers as they are enqueued. The ORP does not have to be within the frame
+ * queue that receives the enqueued frame, in fact it is usually the frame
+ * queue from which the frames were originally dequeued. For the purposes of
+ * order restoration, multiple frames (or "fragments") can be enqueued for a
+ * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
+ * enqueues except the final fragment of a given sequence number. Ordering
+ * between sequence numbers is guaranteed, even if fragments of different
+ * sequence numbers are interlaced with one another. Fragments of the same
+ * sequence number will retain the order in which they are enqueued. If no
+ * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
+ * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
+ * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
+ * sequence number should become the ORP's "Next Expected Sequence Number".
+ *
+ * Side note: a frame queue object can be used purely as an ORP, without
+ * carrying any frames at all. Care should be taken not to deallocate a frame
+ * queue object that is being actively used as an ORP, as a future allocation
+ * of the frame queue object may start using the internal ORP before the
+ * previous use has finished.
+ */
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count FQIDs
+ *
+ * Returns the number of frame queues allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * FQs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_fqid(u32 *result)
+{
+ int ret = qman_alloc_fqid_range(result, 1, 0, 0);
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_fqid_range - Release the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of FQIDs
+ * that it can subsequently allocate from.
+ */
+void qman_release_fqid_range(u32 fqid, unsigned int count);
+static inline void qman_release_fqid(u32 fqid)
+{
+ qman_release_fqid_range(fqid, 1);
+}
+
+void qman_seed_fqid_range(u32 fqid, unsigned int count);
+
+
+int qman_shutdown_fq(u32 fqid);
+
+/**
+ * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ */
+int qman_reserve_fqid_range(u32 fqid, unsigned int count);
+static inline int qman_reserve_fqid(u32 fqid)
+{
+ return qman_reserve_fqid_range(fqid, 1);
+}
+
+ /* Pool-channel management */
+ /* ----------------------- */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of pool-channel IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_pool(u32 *result)
+{
+ int ret = qman_alloc_pool_range(result, 1, 0, 0);
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_pool_range - Release the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to deallocate
+ * @count: the number of pool-channel IDs in the range
+ */
+void qman_release_pool_range(u32 id, unsigned int count);
+static inline void qman_release_pool(u32 id)
+{
+ qman_release_pool_range(id, 1);
+}
+
+/**
+ * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to reserve
+ * @count: the number of pool-channel IDs in the range
+ */
+int qman_reserve_pool_range(u32 id, unsigned int count);
+static inline int qman_reserve_pool(u32 id)
+{
+ return qman_reserve_pool_range(id, 1);
+}
+
+void qman_seed_pool_range(u32 id, unsigned int count);
+
+ /* CGR management */
+ /* -------------- */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @dcp_portal: the DCP portal to which the cgr object is registered.
+ * @opts: optional state of CGR settings
+ *
+ */
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
+ * @cgr: the 'cgr' object to deregister
+ *
+ * This will select the proper CPU and run there qman_delete_cgr().
+ */
+void qman_delete_cgr_safe(struct qman_cgr *cgr);
+
+/**
+ * qman_modify_cgr - Modify CGR fields
+ * @cgr: the 'cgr' object to modify
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: the CGR-modification settings
+ *
+ * The @opts parameter comes from the low-level portal API, and can be NULL.
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver, in particular the 'cgrid' field is ignored (this operation
+ * only affects the given CGR object). If @flags contains
+ * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
+ * unspecified parameters) will be used rather than a modify hw hardware (which
+ * only modifies the specified parameters).
+ */
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+* qman_query_cgr - Queries CGR fields
+* @cgr: the 'cgr' object to query
+* @result: storage for the queried congestion group record
+*/
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
+
+/**
+ * qman_query_congestion - Queries the state of all congestion groups
+ * @congestion: storage for the queried state of all congestion groups
+ */
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of CGR IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_cgrid(u32 *result)
+{
+ int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_cgrid_range - Release the specified range of CGR IDs
+ * @id: the base CGR ID of the range to deallocate
+ * @count: the number of CGR IDs in the range
+ */
+void qman_release_cgrid_range(u32 id, unsigned int count);
+static inline void qman_release_cgrid(u32 id)
+{
+ qman_release_cgrid_range(id, 1);
+}
+
+/**
+ * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
+ * @id: the base CGR ID of the range to reserve
+ * @count: the number of CGR IDs in the range
+ */
+int qman_reserve_cgrid_range(u32 id, unsigned int count);
+static inline int qman_reserve_cgrid(u32 id)
+{
+ return qman_reserve_cgrid_range(id, 1);
+}
+
+void qman_seed_cgrid_range(u32 id, unsigned int count);
+
+
+ /* Helpers */
+ /* ------- */
+/**
+ * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
+ * @fqid: the FQID that will be initialised by other s/w
+ *
+ * In many situations, a FQID is provided for communication between s/w
+ * entities, and whilst the consumer is responsible for initialising and
+ * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
+ * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
+ * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
+ * However, data can not be enqueued to the FQ until it is initialised out of
+ * the OOS state - this function polls for that condition. It is particularly
+ * useful for users of IPC functions - each endpoint's Rx FQ is the other
+ * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
+ * and then use this API on the (NO_MODIFY) Tx FQ object in order to
+ * synchronise. The function returns zero for success, +1 if the FQ is still in
+ * the OOS state, or negative if there was an error.
+ */
+static inline int qman_poll_fq_for_init(struct qman_fq *fq)
+{
+ struct qm_mcr_queryfq_np np;
+ int err;
+ err = qman_query_fq_np(fq, &np);
+ if (err)
+ return err;
+ if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
+ return 1;
+ return 0;
+}
+
+ /* -------------- */
+ /* CEETM :: types */
+ /* -------------- */
+/**
+ * Token Rate Structure
+ * Shaping rates are based on a "credit" system and a pre-configured h/w
+ * internal timer. The following type represents a shaper "rate" parameter as a
+ * fractional number of "tokens". Here's how it works. This (fractional) number
+ * of tokens is added to the shaper's "credit" every time the h/w timer elapses
+ * (up to a limit which is set by another shaper parameter). Every time a frame
+ * is enqueued through a shaper, the shaper deducts as many tokens as there are
+ * bytes of data in the enqueued frame. A shaper will not allow itself to
+ * enqueue any frames if its token count is negative. As such;
+ *
+ * The rate at which data is enqueued is limited by the
+ * rate at which tokens are added.
+ *
+ * Therefore if the user knows the period between these h/w timer updates in
+ * seconds, they can calculate the maximum traffic rate of the shaper (in
+ * bytes-per-second) from the token rate. And vice versa, they can calculate
+ * the token rate to use in order to achieve a given traffic rate.
+ */
+struct qm_ceetm_rate {
+ /* The token rate is; whole + (fraction/8192) */
+ u32 whole:11; /* 0..2047 */
+ u32 fraction:13; /* 0..8191 */
+};
+
+struct qm_ceetm_weight_code {
+ /* The weight code is; 5 msbits + 3 lsbits */
+ u8 y:5;
+ u8 x:3;
+};
+
+struct qm_ceetm {
+ unsigned int idx;
+ struct list_head sub_portals;
+ struct list_head lnis;
+ unsigned int sp_range[2];
+ unsigned int lni_range[2];
+};
+
+struct qm_ceetm_sp {
+ struct list_head node;
+ unsigned int idx;
+ unsigned int dcp_idx;
+ int is_claimed;
+ struct qm_ceetm_lni *lni;
+};
+
+/* Logical Network Interface */
+struct qm_ceetm_lni {
+ struct list_head node;
+ unsigned int idx;
+ unsigned int dcp_idx;
+ int is_claimed;
+ struct qm_ceetm_sp *sp;
+ struct list_head channels;
+ int shaper_enable;
+ int shaper_couple;
+ int oal;
+ struct qm_ceetm_rate cr_token_rate;
+ struct qm_ceetm_rate er_token_rate;
+ u16 cr_token_bucket_limit;
+ u16 er_token_bucket_limit;
+};
+
+/* Class Queue Channel */
+struct qm_ceetm_channel {
+ struct list_head node;
+ unsigned int idx;
+ unsigned int lni_idx;
+ unsigned int dcp_idx;
+ struct list_head class_queues;
+ struct list_head ccgs;
+ u8 shaper_enable;
+ u8 shaper_couple;
+ struct qm_ceetm_rate cr_token_rate;
+ struct qm_ceetm_rate er_token_rate;
+ u16 cr_token_bucket_limit;
+ u16 er_token_bucket_limit;
+};
+
+struct qm_ceetm_ccg;
+
+/* This callback type is used when handling congestion entry/exit. The
+ * 'cb_ctx' value is the opaque value associated with ccg object.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
+ */
+typedef void (*qman_cb_ccgr)(struct qm_ceetm_ccg *ccg, void *cb_ctx,
+ int congested);
+
+/* Class Congestion Group */
+struct qm_ceetm_ccg {
+ struct qm_ceetm_channel *parent;
+ struct list_head node;
+ struct list_head cb_node;
+ qman_cb_ccgr cb;
+ void *cb_ctx;
+ unsigned int idx;
+};
+
+/* Class Queue */
+struct qm_ceetm_cq {
+ struct qm_ceetm_channel *parent;
+ struct qm_ceetm_ccg *ccg;
+ struct list_head node;
+ unsigned int idx;
+ int is_claimed;
+ struct list_head bound_lfqids;
+ struct list_head binding_node;
+};
+
+/* Logical Frame Queue */
+struct qm_ceetm_lfq {
+ struct qm_ceetm_channel *parent;
+ struct list_head node;
+ unsigned int idx;
+ unsigned int dctidx;
+ u64 context_a;
+ u32 context_b;
+ qman_cb_mr ern;
+};
+
+/**
+ * qman_ceetm_bps2tokenrate - Given a desired rate 'bps' measured in bps
+ * (ie. bits-per-second), compute the 'token_rate' fraction that best
+ * approximates that rate.
+ * @bps: the desired shaper rate in bps.
+ * @token_rate: the output token rate computed with the given kbps.
+ * @rounding: dictates how to round if an exact conversion is not possible; if
+ * it is negative then 'token_rate' will round down to the highest value that
+ * does not exceed the desired rate, if it is positive then 'token_rate' will
+ * round up to the lowest value that is greater than or equal to the desired
+ * rate, and if it is zero then it will round to the nearest approximation,
+ * whether that be up or down.
+ *
+ * Return 0 for success, or -EINVAL if prescaler or qman clock is not available.
+ */
+int qman_ceetm_bps2tokenrate(u64 bps,
+ struct qm_ceetm_rate *token_rate,
+ int rounding);
+
+/**
+ * qman_ceetm_tokenrate2bps - Given a 'token_rate', compute the
+ * corresponding number of 'bps'.
+ * @token_rate: the input desired token_rate fraction.
+ * @bps: the output shaper rate in bps computed with the give token rate.
+ * @rounding: has the same semantics as the previous function.
+ *
+ * Return 0 for success, or -EINVAL if prescaler or qman clock is not available.
+ */
+int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate,
+ u64 *bps,
+ int rounding);
+
+int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
+ int partial);
+static inline int qman_alloc_ceetm0_channel(u32 *result)
+{
+ int ret = qman_alloc_ceetm0_channel_range(result, 1, 0, 0);
+ return (ret > 0) ? 0 : ret;
+}
+void qman_release_ceetm0_channel_range(u32 channelid, u32 count);
+static inline void qman_release_ceetm0_channelid(u32 channelid)
+{
+ qman_release_ceetm0_channel_range(channelid, 1);
+}
+
+int qman_reserve_ceetm0_channel_range(u32 channelid, u32 count);
+static inline int qman_reserve_ceetm0_channelid(u32 channelid)
+{
+ return qman_reserve_ceetm0_channel_range(channelid, 1);
+}
+
+void qman_seed_ceetm0_channel_range(u32 channelid, u32 count);
+
+
+int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align,
+ int partial);
+static inline int qman_alloc_ceetm1_channel(u32 *result)
+{
+ int ret = qman_alloc_ceetm1_channel_range(result, 1, 0, 0);
+ return (ret > 0) ? 0 : ret;
+}
+void qman_release_ceetm1_channel_range(u32 channelid, u32 count);
+static inline void qman_release_ceetm1_channelid(u32 channelid)
+{
+ qman_release_ceetm1_channel_range(channelid, 1);
+}
+int qman_reserve_ceetm1_channel_range(u32 channelid, u32 count);
+static inline int qman_reserve_ceetm1_channelid(u32 channelid)
+{
+ return qman_reserve_ceetm1_channel_range(channelid, 1);
+}
+
+void qman_seed_ceetm1_channel_range(u32 channelid, u32 count);
+
+
+int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
+ int partial);
+static inline int qman_alloc_ceetm0_lfqid(u32 *result)
+{
+ int ret = qman_alloc_ceetm0_lfqid_range(result, 1, 0, 0);
+ return (ret > 0) ? 0 : ret;
+}
+void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count);
+static inline void qman_release_ceetm0_lfqid(u32 lfqid)
+{
+ qman_release_ceetm0_lfqid_range(lfqid, 1);
+}
+int qman_reserve_ceetm0_lfqid_range(u32 lfqid, u32 count);
+static inline int qman_reserve_ceetm0_lfqid(u32 lfqid)
+{
+ return qman_reserve_ceetm0_lfqid_range(lfqid, 1);
+}
+
+void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count);
+
+
+int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align,
+ int partial);
+static inline int qman_alloc_ceetm1_lfqid(u32 *result)
+{
+ int ret = qman_alloc_ceetm1_lfqid_range(result, 1, 0, 0);
+ return (ret > 0) ? 0 : ret;
+}
+void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count);
+static inline void qman_release_ceetm1_lfqid(u32 lfqid)
+{
+ qman_release_ceetm1_lfqid_range(lfqid, 1);
+}
+int qman_reserve_ceetm1_lfqid_range(u32 lfqid, u32 count);
+static inline int qman_reserve_ceetm1_lfqid(u32 lfqid)
+{
+ return qman_reserve_ceetm1_lfqid_range(lfqid, 1);
+}
+
+void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count);
+
+
+ /* ----------------------------- */
+ /* CEETM :: sub-portals */
+ /* ----------------------------- */
+
+/**
+ * qman_ceetm_sp_claim - Claims the given sub-portal, provided it is available
+ * to us and configured for traffic-management.
+ * @sp: the returned sub-portal object, if successful.
+ * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM
+ * instance),
+ * @sp_idx" is the desired sub-portal index from 0 to 15.
+ *
+ * Returns zero for success, or -ENODEV if the sub-portal is in use, or -EINVAL
+ * if the sp_idx is out of range.
+ *
+ * Note that if there are multiple driver domains (eg. a linux kernel versus
+ * user-space drivers in USDPAA, or multiple guests running under a hypervisor)
+ * then a sub-portal may be accessible by more than one instance of a qman
+ * driver and so it may be claimed multiple times. If this is the case, it is
+ * up to the system architect to prevent conflicting configuration actions
+ * coming from the different driver domains. The qman drivers do not have any
+ * behind-the-scenes coordination to prevent this from happening.
+ */
+int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp,
+ enum qm_dc_portal dcp_idx,
+ unsigned int sp_idx);
+
+/**
+ * qman_ceetm_sp_release - Releases a previously claimed sub-portal.
+ * @sp: the sub-portal to be released.
+ *
+ * Returns 0 for success, or -EBUSY for failure if the dependencies are not
+ * released.
+ */
+int qman_ceetm_sp_release(struct qm_ceetm_sp *sp);
+
+ /* ----------------------------------- */
+ /* CEETM :: logical network interfaces */
+ /* ----------------------------------- */
+
+/**
+ * qman_ceetm_lni_claim - Claims an unclaimed LNI.
+ * @lni: the returned LNI object, if successful.
+ * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM
+ * instance)
+ * @lni_idx: is the desired LNI index.
+ *
+ * Returns zero for success, or -EINVAL on failure, which will happen if the LNI
+ * is not available or has already been claimed (and not yet successfully
+ * released), or lni_dix is out of range.
+ *
+ * Note that there may be multiple driver domains (or instances) that need to
+ * transmit out the same LNI, so this claim is only guaranteeing exclusivity
+ * within the domain of the driver being called. See qman_ceetm_sp_claim() and
+ * qman_ceetm_sp_get_lni() for more information.
+ */
+int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni,
+ enum qm_dc_portal dcp_id,
+ unsigned int lni_idx);
+
+/**
+ * qman_ceetm_lni_releaes - Releases a previously claimed LNI.
+ * @lni: the lni needs to be released.
+ *
+ * This will only succeed if all dependent objects have been released.
+ * Returns zero for success, or -EBUSY if the dependencies are not released.
+ */
+int qman_ceetm_lni_release(struct qm_ceetm_lni *lni);
+
+/**
+ * qman_ceetm_sp_set_lni
+ * qman_ceetm_sp_get_lni - Set/get the LNI that the sub-portal is currently
+ * mapped to.
+ * @sp: the given sub-portal.
+ * @lni(in "set"function): the LNI object which the sp will be mappaed to.
+ * @lni_idx(in "get" function): the LNI index which the sp is mapped to.
+ *
+ * Returns zero for success, or -EINVAL for the "set" function when this sp-lni
+ * mapping has been set, or configure mapping command returns error, and
+ * -EINVAL for "get" function when this sp-lni mapping is not set or the query
+ * mapping command returns error.
+ *
+ * This may be useful in situations where multiple driver domains have access
+ * to the same sub-portals in order to all be able to transmit out the same
+ * physical interface (perhaps they're on different IP addresses or VPNs, so
+ * Fman is splitting Rx traffic and here we need to converge Tx traffic). In
+ * that case, a control-plane is likely to use qman_ceetm_lni_claim() followed
+ * by qman_ceetm_sp_set_lni() to configure the sub-portal, and other domains
+ * are likely to use qman_ceetm_sp_get_lni() followed by qman_ceetm_lni_claim()
+ * in order to determine the LNI that the control-plane had assigned. This is
+ * why the "get" returns an index, whereas the "set" takes an (already claimed)
+ * LNI object.
+ */
+int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp,
+ struct qm_ceetm_lni *lni);
+int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp,
+ unsigned int *lni_idx);
+
+/**
+ * qman_ceetm_lni_enable_shaper
+ * qman_ceetm_lni_disable_shaper - Enables/disables shaping on the LNI.
+ * @lni: the given LNI.
+ * @coupled: indicates whether CR and ER are coupled.
+ * @oal: the overhead accounting length which is added to the actual length of
+ * each frame when performing shaper calculations.
+ *
+ * When the number of (unused) committed-rate tokens reach the committed-rate
+ * token limit, 'coupled' indicates whether surplus tokens should be added to
+ * the excess-rate token count (up to the excess-rate token limit).
+ * When LNI is claimed, the shaper is disabled by default. The enable function
+ * will turn on this shaper for this lni.
+ * Whenever a claimed LNI is first enabled for shaping, its committed and
+ * excess token rates and limits are zero, so will need to be changed to do
+ * anything useful. The shaper can subsequently be enabled/disabled without
+ * resetting the shaping parameters, but the shaping parameters will be reset
+ * when the LNI is released.
+ *
+ * Returns zero for success, or errno for "enable" function in the cases as:
+ * a) -EINVAL if the shaper is already enabled,
+ * b) -EIO if the configure shaper command returns error.
+ * For "disable" function, returns:
+ * a) -EINVAL if the shaper is has already disabled.
+ * b) -EIO if calling configure shaper command returns error.
+ */
+int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled,
+ int oal);
+int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni);
+
+/**
+ * qman_ceetm_lni_is_shaper_enabled - Check LNI shaper status
+ * @lni: the give LNI
+ */
+int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni);
+
+/**
+ * qman_ceetm_lni_set_commit_rate
+ * qman_ceetm_lni_get_commit_rate
+ * qman_ceetm_lni_set_excess_rate
+ * qman_ceetm_lni_get_excess_rate - Set/get the shaper CR/ER token rate and
+ * token limit for the given LNI.
+ * @lni: the given LNI.
+ * @token_rate: the desired token rate for "set" fuction, or the token rate of
+ * the LNI queried by "get" function.
+ * @token_limit: the desired token bucket limit for "set" function, or the token
+ * limit of the given LNI queried by "get" function.
+ *
+ * Returns zero for success. The "set" function returns -EINVAL if the given
+ * LNI is unshapped or -EIO if the configure shaper command returns error.
+ * The "get" function returns -EINVAL if the token rate or the token limit is
+ * not set or the query command returns error.
+ */
+int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni,
+ const struct qm_ceetm_rate *token_rate,
+ u16 token_limit);
+int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni,
+ struct qm_ceetm_rate *token_rate,
+ u16 *token_limit);
+int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni,
+ const struct qm_ceetm_rate *token_rate,
+ u16 token_limit);
+int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni,
+ struct qm_ceetm_rate *token_rate,
+ u16 *token_limit);
+/**
+ * qman_ceetm_lni_set_commit_rate_bps
+ * qman_ceetm_lni_get_commit_rate_bps
+ * qman_ceetm_lni_set_excess_rate_bps
+ * qman_ceetm_lni_get_excess_rate_bps - Set/get the shaper CR/ER rate
+ * and token limit for the given LNI.
+ * @lni: the given LNI.
+ * @bps: the desired shaping rate in bps for "set" fuction, or the shaping rate
+ * of the LNI queried by "get" function.
+ * @token_limit: the desired token bucket limit for "set" function, or the token
+ * limit of the given LNI queried by "get" function.
+ *
+ * Returns zero for success. The "set" function returns -EINVAL if the given
+ * LNI is unshapped or -EIO if the configure shaper command returns error.
+ * The "get" function returns -EINVAL if the token rate or the token limit is
+ * not set or the query command returns error.
+ */
+int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni,
+ u64 bps,
+ u16 token_limit);
+int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni,
+ u64 *bps, u16 *token_limit);
+int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni,
+ u64 bps,
+ u16 token_limit);
+int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni,
+ u64 *bps, u16 *token_limit);
+
+/**
+ * qman_ceetm_lni_set_tcfcc
+ * qman_ceetm_lni_get_tcfcc - Configure/query "Traffic Class Flow Control".
+ * @lni: the given LNI.
+ * @cq_level: is between 0 and 15, representing individual class queue levels
+ * (CQ0 to CQ7 for every channel) and grouped class queue levels (CQ8 to CQ15
+ * for every channel).
+ * @traffic_class: is between 0 and 7 when associating a given class queue level
+ * to a traffic class, or -1 when disabling traffic class flow control for this
+ * class queue level.
+ *
+ * Return zero for success, or -EINVAL if the cq_level or traffic_class is out
+ * of range as indicated above, or -EIO if the configure/query tcfcc command
+ * returns error.
+ *
+ * Refer to the section of QMan CEETM traffic class flow control in the
+ * Reference Manual.
+ */
+int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni,
+ unsigned int cq_level,
+ int traffic_class);
+int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni,
+ unsigned int cq_level,
+ int *traffic_class);
+
+ /* ----------------------------- */
+ /* CEETM :: class queue channels */
+ /* ----------------------------- */
+
+/**
+ * qman_ceetm_channel_claim - Claims an unclaimed CQ channel that is mapped to
+ * the given LNI.
+ * @channel: the returned class queue channel object, if successful.
+ * @lni: the LNI that the channel belongs to.
+ *
+ * Channels are always initially "unshaped".
+ *
+ * Return zero for success, or -ENODEV if there is no channel available(all 32
+ * channels are claimed) or -EINVAL if the channel mapping command returns
+ * error.
+ */
+int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel,
+ struct qm_ceetm_lni *lni);
+
+/**
+ * qman_ceetm_channel_release - Releases a previously claimed CQ channel.
+ * @channel: the channel needs to be released.
+ *
+ * Returns zero for success, or -EBUSY if the dependencies are still in use.
+ *
+ * Note any shaping of the channel will be cleared to leave it in an unshaped
+ * state.
+ */
+int qman_ceetm_channel_release(struct qm_ceetm_channel *channel);
+
+/**
+ * qman_ceetm_channel_enable_shaper
+ * qman_ceetm_channel_disable_shaper - Enables/disables shaping on the channel.
+ * @channel: the given channel.
+ * @coupled: indicates whether surplus CR tokens should be added to the
+ * excess-rate token count (up to the excess-rate token limit) when the number
+ * of (unused) committed-rate tokens reach the committed_rate token limit.
+ *
+ * Whenever a claimed channel is first enabled for shaping, its committed and
+ * excess token rates and limits are zero, so will need to be changed to do
+ * anything useful. The shaper can subsequently be enabled/disabled without
+ * resetting the shaping parameters, but the shaping parameters will be reset
+ * when the channel is released.
+ *
+ * Return 0 for success, or -EINVAL for failure, in the case that the channel
+ * shaper has been enabled/disabled or the management command returns error.
+ */
+int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel,
+ int coupled);
+int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel);
+
+/**
+ * qman_ceetm_channel_is_shaper_enabled - Check channel shaper status.
+ * @channel: the give channel.
+ */
+int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel);
+
+/**
+ * qman_ceetm_channel_set_commit_rate
+ * qman_ceetm_channel_get_commit_rate
+ * qman_ceetm_channel_set_excess_rate
+ * qman_ceetm_channel_get_excess_rate - Set/get channel CR/ER shaper parameters.
+ * @channel: the given channel.
+ * @token_rate: the desired token rate for "set" function, or the queried token
+ * rate for "get" function.
+ * @token_limit: the desired token limit for "set" function, or the queried
+ * token limit for "get" function.
+ *
+ * Return zero for success. The "set" function returns -EINVAL if the channel
+ * is unshaped, or -EIO if the configure shapper command returns error. The
+ * "get" function returns -EINVAL if token rate of token limit is not set, or
+ * the query shaper command returns error.
+ */
+int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel,
+ const struct qm_ceetm_rate *token_rate,
+ u16 token_limit);
+int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel,
+ struct qm_ceetm_rate *token_rate,
+ u16 *token_limit);
+int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel,
+ const struct qm_ceetm_rate *token_rate,
+ u16 token_limit);
+int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel,
+ struct qm_ceetm_rate *token_rate,
+ u16 *token_limit);
+/**
+ * qman_ceetm_channel_set_commit_rate_bps
+ * qman_ceetm_channel_get_commit_rate_bps
+ * qman_ceetm_channel_set_excess_rate_bps
+ * qman_ceetm_channel_get_excess_rate_bps - Set/get channel CR/ER shaper
+ * parameters.
+ * @channel: the given channel.
+ * @token_rate: the desired shaper rate in bps for "set" function, or the
+ * shaper rate in bps for "get" function.
+ * @token_limit: the desired token limit for "set" function, or the queried
+ * token limit for "get" function.
+ *
+ * Return zero for success. The "set" function returns -EINVAL if the channel
+ * is unshaped, or -EIO if the configure shapper command returns error. The
+ * "get" function returns -EINVAL if token rate of token limit is not set, or
+ * the query shaper command returns error.
+ */
+int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel,
+ u64 bps, u16 token_limit);
+int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel,
+ u64 *bps, u16 *token_limit);
+int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel,
+ u64 bps, u16 token_limit);
+int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel,
+ u64 *bps, u16 *token_limit);
+
+/**
+ * qman_ceetm_channel_set_weight
+ * qman_ceetm_channel_get_weight - Set/get the weight for unshaped channel
+ * @channel: the given channel.
+ * @token_limit: the desired token limit as the weight of the unshaped channel
+ * for "set" function, or the queried token limit for "get" function.
+ *
+ * The algorithm of unshaped fair queuing (uFQ) is used for unshaped channel.
+ * It allows the unshaped channels to be included in the CR time eligible list,
+ * and thus use the configured CR token limit value as their fair queuing
+ * weight.
+ *
+ * Return zero for success, or -EINVAL if the channel is a shaped channel or
+ * the management command returns error.
+ */
+int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel,
+ u16 token_limit);
+int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel,
+ u16 *token_limit);
+
+/**
+ * qman_ceetm_channel_set_group
+ * qman_ceetm_channel_get_group - Set/get the grouping of the class scheduler.
+ * @channel: the given channel.
+ * @group_b: indicates whether there is group B in this channel.
+ * @prio_a: the priority of group A.
+ * @prio_b: the priority of group B.
+ *
+ * There are 8 individual class queues (CQ0-CQ7), and 8 grouped class queues
+ * (CQ8-CQ15). If 'group_b' is zero, then all the grouped class queues are in
+ * group A, otherwise they are split into group A (CQ8-11) and group B
+ * (CQ12-C15). The individual class queues and the group(s) are in strict
+ * priority order relative to each other. Within the group(s), the scheduling
+ * is not strict priority order, but the result of scheduling within a group
+ * is in strict priority order relative to the other class queues in the
+ * channel. 'prio_a' and 'prio_b' control the priority order of the groups
+ * relative to the individual class queues, and take values from 0-7. Eg. if
+ * 'group_b' is non-zero, 'prio_a' is 2 and 'prio_b' is 6, then the strict
+ * priority order would be;
+ * CQ0, CQ1, CQ2, GROUPA, CQ3, CQ4, CQ5, CQ6, GROUPB, CQ7
+ *
+ * Return 0 for success. For "set" function, returns -EINVAL if prio_a or
+ * prio_b are out of the range 0 - 7 (priority of group A or group B can not
+ * be 0, CQ0 is always the highest class queue in this channel.), or -EIO if
+ * the configure scheduler command returns error. For "get" function, return
+ * -EINVAL if the query scheduler command returns error.
+ */
+int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel,
+ int group_b,
+ unsigned int prio_a,
+ unsigned int prio_b);
+int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel,
+ int *group_b,
+ unsigned int *prio_a,
+ unsigned int *prio_b);
+
+/**
+ * qman_ceetm_channel_set_group_cr_eligibility
+ * qman_ceetm_channel_set_group_er_eligibility - Set channel group eligibility
+ * @channel: the given channel object
+ * @group_b: indicates whether there is group B in this channel.
+ * @cre: the commit rate eligibility, 1 for enable, 0 for disable.
+ *
+ * Return zero for success, or -EINVAL if eligibility setting fails.
+*/
+int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel
+ *channel, int group_b, int cre);
+int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel
+ *channel, int group_b, int ere);
+
+/**
+ * qman_ceetm_channel_set_cq_cr_eligibility
+ * qman_ceetm_channel_set_cq_er_eligibility - Set channel cq eligibility
+ * @channel: the given channel object
+ * @idx: is from 0 to 7 (representing CQ0 to CQ7).
+ * @cre: the commit rate eligibility, 1 for enable, 0 for disable.
+ *
+ * Return zero for success, or -EINVAL if eligibility setting fails.
+*/
+int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel,
+ unsigned int idx, int cre);
+int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel,
+ unsigned int idx, int ere);
+
+ /* --------------------- */
+ /* CEETM :: class queues */
+ /* --------------------- */
+
+/**
+ * qman_ceetm_cq_claim - Claims an individual class queue.
+ * @cq: the returned class queue object, if successful.
+ * @channel: the class queue channel.
+ * @idx: is from 0 to 7 (representing CQ0 to CQ7).
+ * @ccg: represents the class congestion group that this class queue should be
+ * subscribed to, or NULL if no congestion group membership is desired.
+ *
+ * Returns zero for success, or -EINVAL if @idx is out of range 0 - 7 or
+ * if this class queue has been claimed, or configure class queue command
+ * returns error, or returns -ENOMEM if allocating CQ memory fails.
+ */
+int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq,
+ struct qm_ceetm_channel *channel,
+ unsigned int idx,
+ struct qm_ceetm_ccg *ccg);
+
+/**
+ * qman_ceetm_cq_claim_A - Claims a class queue group A.
+ * @cq: the returned class queue object, if successful.
+ * @channel: the class queue channel.
+ * @idx: is from 8 to 15 if only group A exits, otherwise, it is from 8 to 11.
+ * @ccg: represents the class congestion group that this class queue should be
+ * subscribed to, or NULL if no congestion group membership is desired.
+ *
+ * Return zero for success, or -EINVAL if @idx is out the range or if
+ * this class queue has been claimed or configure class queue command returns
+ * error, or returns -ENOMEM if allocating CQ memory fails.
+ */
+int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq,
+ struct qm_ceetm_channel *channel,
+ unsigned int idx,
+ struct qm_ceetm_ccg *ccg);
+
+/**
+ * qman_ceetm_cq_claim_B - Claims a class queue group B.
+ * @cq: the returned class queue object, if successful.
+ * @channel: the class queue channel.
+ * @idx: is from 0 to 3 (CQ12 to CQ15).
+ * @ccg: represents the class congestion group that this class queue should be
+ * subscribed to, or NULL if no congestion group membership is desired.
+ *
+ * Return zero for success, or -EINVAL if @idx is out the range or if
+ * this class queue has been claimed or configure class queue command returns
+ * error, or returns -ENOMEM if allocating CQ memory fails.
+ */
+int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq,
+ struct qm_ceetm_channel *channel,
+ unsigned int idx,
+ struct qm_ceetm_ccg *ccg);
+
+/**
+ * qman_ceetm_cq_release - Releases a previously claimed class queue.
+ * @cq: The class queue to be released.
+ *
+ * Return zero for success, or -EBUSY if the dependent objects (eg. logical
+ * FQIDs) have not been released.
+ */
+int qman_ceetm_cq_release(struct qm_ceetm_cq *cq);
+
+/**
+ * qman_ceetm_set_queue_weight
+ * qman_ceetm_get_queue_weight - Configure/query the weight of a grouped class
+ * queue.
+ * @cq: the given class queue.
+ * @weight_code: the desired weight code to set for the given class queue for
+ * "set" function or the queired weight code for "get" function.
+ *
+ * Grouped class queues have a default weight code of zero, which corresponds to
+ * a scheduler weighting of 1. This function can be used to modify a grouped
+ * class queue to another weight, (Use the helpers qman_ceetm_wbfs2ratio()
+ * and qman_ceetm_ratio2wbfs() to convert between these 'weight_code' values
+ * and the corresponding sharing weight.)
+ *
+ * Returns zero for success, or -EIO if the configure weight command returns
+ * error for "set" function, or -EINVAL if the query command returns
+ * error for "get" function.
+ * See section "CEETM Weighted Scheduling among Grouped Classes" in Reference
+ * Manual for weight and weight code.
+ */
+int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq,
+ struct qm_ceetm_weight_code *weight_code);
+int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq,
+ struct qm_ceetm_weight_code *weight_code);
+
+/**
+ * qman_ceetm_set_queue_weight_in_ratio
+ * qman_ceetm_get_queue_weight_in_ratio - Configure/query the weight of a
+ * grouped class queue.
+ * @cq: the given class queue.
+ * @ratio: the weight in ratio. It should be the real ratio number multiplied
+ * by 100 to get rid of fraction.
+ *
+ * Returns zero for success, or -EIO if the configure weight command returns
+ * error for "set" function, or -EINVAL if the query command returns
+ * error for "get" function.
+ */
+int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio);
+int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio);
+
+/* Weights are encoded using a pseudo-exponential scheme. The weight codes 0,
+ * 32, 64, [...] correspond to weights of 1, 2, 4, [...]. The weights
+ * corresponding to intermediate weight codes are calculated using linear
+ * interpolation on the inverted values. Or put another way, the inverse weights
+ * for each 32nd weight code are 1, 1/2, 1/4, [...], and so the intervals
+ * between these are divided linearly into 32 intermediate values, the inverses
+ * of which form the remaining weight codes.
+ *
+ * The Weighted Bandwidth Fair Scheduling (WBFS) algorithm provides a form of
+ * scheduling within a group of class queues (group A or B). Weights are used to
+ * normalise the class queues to an underlying BFS algorithm where all class
+ * queues are assumed to require "equal bandwidth". So the weights referred to
+ * by the weight codes act as divisors on the size of frames being enqueued. Ie.
+ * one class queue in a group is assigned a weight of 2 whilst the other class
+ * queues in the group keep the default weight of 1, then the WBFS scheduler
+ * will effectively treat all frames enqueued on the weight-2 class queue as
+ * having half the number of bytes they really have. Ie. if all other things are
+ * equal, that class queue would get twice as much bytes-per-second bandwidth as
+ * the others. So weights should be chosen to provide bandwidth ratios between
+ * members of the same class queue group. These weights have no bearing on
+ * behaviour outside that group's WBFS mechanism though.
+ */
+
+/**
+ * qman_ceetm_wbfs2ratio - Given a weight code ('wbfs'), an accurate fractional
+ * representation of the corresponding weight is given (in order to not lose
+ * any precision).
+ * @weight_code: The given weight code in WBFS.
+ * @numerator: the numerator part of the weight computed by the weight code.
+ * @denominator: the denominator part of the weight computed by the weight code
+ *
+ * Returns zero for success or -EINVAL if the given weight code is illegal.
+ */
+int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code,
+ u32 *numerator,
+ u32 *denominator);
+/**
+ * qman_ceetm_ratio2wbfs - Given a weight, find the nearest possible weight code
+ * If the user needs to know how close this is, convert the resulting weight
+ * code back to a weight and compare.
+ * @numerator: numerator part of the given weight.
+ * @denominator: denominator part of the given weight.
+ * @weight_code: the weight code computed from the given weight.
+ *
+ * Returns zero for success, or -ERANGE if "numerator/denominator" is outside
+ * the range of weights.
+ */
+int qman_ceetm_ratio2wbfs(u32 numerator,
+ u32 denominator,
+ struct qm_ceetm_weight_code *weight_code,
+ int rounding);
+
+#define QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER 0x1
+/**
+ * qman_ceetm_cq_get_dequeue_statistics - Get the statistics provided by CEETM
+ * CQ counters.
+ * @cq: the given CQ object.
+ * @flags: indicates whether the statistics counter will be cleared after query.
+ * @frame_count: The number of the frames that have been counted since the
+ * counter was cleared last time.
+ * @byte_count: the number of bytes in all frames that have been counted.
+ *
+ * Return zero for success or -EINVAL if query statistics command returns error.
+ *
+ */
+int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags,
+ u64 *frame_count, u64 *byte_count);
+
+/**
+ * qman_ceetm_drain_cq - drain the CQ till it is empty.
+ * @cq: the give CQ object.
+ * Return 0 for success or -EINVAL for unsuccessful command to empty CQ.
+ */
+int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq);
+
+ /* ---------------------- */
+ /* CEETM :: logical FQIDs */
+ /* ---------------------- */
+/**
+ * qman_ceetm_lfq_claim - Claims an unused logical FQID, associates it with
+ * the given class queue.
+ * @lfq: the returned lfq object, if successful.
+ * @cq: the class queue which needs to claim a LFQID.
+ *
+ * Return zero for success, or -ENODEV if no LFQID is available or -ENOMEM if
+ * allocating memory for lfq fails, or -EINVAL if configuring LFQMT fails.
+ */
+int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq,
+ struct qm_ceetm_cq *cq);
+
+/**
+ * qman_ceetm_lfq_release - Releases a previously claimed logical FQID.
+ * @lfq: the lfq to be released.
+ *
+ * Return zero for success.
+ */
+int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq);
+
+/**
+ * qman_ceetm_lfq_set_context
+ * qman_ceetm_lfq_get_context - Set/get the context_a/context_b pair to the
+ * "dequeue context table" associated with the logical FQID.
+ * @lfq: the given logical FQ object.
+ * @context_a: contextA of the dequeue context.
+ * @context_b: contextB of the dequeue context.
+ *
+ * Returns zero for success, or -EINVAL if there is error to set/get the
+ * context pair.
+ */
+int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq,
+ u64 context_a,
+ u32 context_b);
+int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq,
+ u64 *context_a,
+ u32 *context_b);
+
+/**
+ * qman_ceetm_create_fq - Initialise a FQ object for the LFQ.
+ * @lfq: the given logic fq.
+ * @fq: the fq object created for the given logic fq.
+ *
+ * The FQ object can be used in qman_enqueue() and qman_enqueue_orp() APIs to
+ * target a logical FQID (and the class queue it is associated with).
+ * Note that this FQ object can only be used for enqueues, and
+ * in the case of qman_enqueue_orp() it can not be used as the 'orp' parameter,
+ * only as 'fq'. This FQ object can not (and shouldn't) be destroyed, it is only
+ * valid as long as the underlying 'lfq' remains claimed. It is the user's
+ * responsibility to ensure that the underlying 'lfq' is not released until any
+ * enqueues to this FQ object have completed. The only field the user needs to
+ * fill in is fq->cb.ern, as that enqueue rejection handler is the callback that
+ * could conceivably be called on this FQ object. This API can be called
+ * multiple times to create multiple FQ objects referring to the same logical
+ * FQID, and any enqueue rejections will respect the callback of the object that
+ * issued the enqueue (and will identify the object via the parameter passed to
+ * the callback too). There is no 'flags' parameter to this API as there is for
+ * qman_create_fq() - the created FQ object behaves as though qman_create_fq()
+ * had been called with the single flag QMAN_FQ_FLAG_NO_MODIFY.
+ *
+ * Returns 0 for success.
+ */
+int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq);
+
+ /* -------------------------------- */
+ /* CEETM :: class congestion groups */
+ /* -------------------------------- */
+
+/**
+ * qman_ceetm_ccg_claim - Claims an unused CCG.
+ * @ccg: the returned CCG object, if successful.
+ * @channel: the given class queue channel
+ * @cscn: the callback function of this CCG.
+ * @cb_ctx: the corresponding context to be used used if state change
+ * notifications are later enabled for this CCG.
+ *
+ * The congestion group is local to the given class queue channel, so only
+ * class queues within the channel can be associated with that congestion group.
+ * The association of class queues to congestion groups occurs when the class
+ * queues are claimed, see qman_ceetm_cq_claim() and related functions.
+ * Congestion groups are in a "zero" state when initially claimed, and they are
+ * returned to that state when released.
+ *
+ * Return zero for success, or -EINVAL if no CCG in the channel is available.
+ */
+int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg,
+ struct qm_ceetm_channel *channel,
+ unsigned int idx,
+ void (*cscn)(struct qm_ceetm_ccg *,
+ void *cb_ctx,
+ int congested),
+ void *cb_ctx);
+
+/**
+ * qman_ceetm_ccg_release - Releases a previously claimed CCG.
+ * @ccg: the given ccg.
+ *
+ * Returns zero for success, or -EBUSY if the given ccg's dependent objects
+ * (class queues that are associated with the CCG) have not been released.
+ */
+int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg);
+
+/* This struct is used to specify attributes for a CCG. The 'we_mask' field
+ * controls which CCG attributes are to be updated, and the remainder specify
+ * the values for those attributes. A CCG counts either frames or the bytes
+ * within those frames, but not both ('mode'). A CCG can optionally cause
+ * enqueues to be rejected, due to tail-drop or WRED, or both (they are
+ * independent options, 'td_en' and 'wr_en_g,wr_en_y,wr_en_r'). Tail-drop can be
+ * level-triggered due to a single threshold ('td_thres') or edge-triggered due
+ * to a "congestion state", but not both ('td_mode'). Congestion state has
+ * distinct entry and exit thresholds ('cs_thres_in' and 'cs_thres_out'), and
+ * notifications can be sent to software the CCG goes in to and out of this
+ * congested state ('cscn_en'). */
+struct qm_ceetm_ccg_params {
+ /* Boolean fields together in a single bitfield struct */
+ struct {
+ /* Whether to count bytes or frames. 1==frames */
+ u8 mode:1;
+ /* En/disable tail-drop. 1==enable */
+ u8 td_en:1;
+ /* Tail-drop on congestion-state or threshold. 1=threshold */
+ u8 td_mode:1;
+ /* Generate congestion state change notifications. 1==enable */
+ u8 cscn_en:1;
+ /* Enable WRED rejections (per colour). 1==enable */
+ u8 wr_en_g:1;
+ u8 wr_en_y:1;
+ u8 wr_en_r:1;
+ } __packed;
+ /* Tail-drop threshold. See qm_cgr_thres_[gs]et64(). */
+ struct qm_cgr_cs_thres td_thres;
+ /* Congestion state thresholds, for entry and exit. */
+ struct qm_cgr_cs_thres cs_thres_in;
+ struct qm_cgr_cs_thres cs_thres_out;
+ /* Overhead accounting length. Per-packet "tax", from -128 to +127 */
+ signed char oal;
+ /* Congestion state change notification for DCP portal, virtual CCGID*/
+ /* WRED parameters. */
+ struct qm_cgr_wr_parm wr_parm_g;
+ struct qm_cgr_wr_parm wr_parm_y;
+ struct qm_cgr_wr_parm wr_parm_r;
+};
+/* Bits used in 'we_mask' to qman_ceetm_ccg_set(), controls which attributes of
+ * the CCGR are to be updated. */
+#define QM_CCGR_WE_MODE 0x0001 /* mode (bytes/frames) */
+#define QM_CCGR_WE_CS_THRES_IN 0x0002 /* congestion state entry threshold */
+#define QM_CCGR_WE_TD_EN 0x0004 /* congestion state tail-drop enable */
+#define QM_CCGR_WE_CSCN_TUPD 0x0008 /* CSCN target update */
+#define QM_CCGR_WE_CSCN_EN 0x0010 /* congestion notification enable */
+#define QM_CCGR_WE_WR_EN_R 0x0020 /* WRED enable - red */
+#define QM_CCGR_WE_WR_EN_Y 0x0040 /* WRED enable - yellow */
+#define QM_CCGR_WE_WR_EN_G 0x0080 /* WRED enable - green */
+#define QM_CCGR_WE_WR_PARM_R 0x0100 /* WRED parameters - red */
+#define QM_CCGR_WE_WR_PARM_Y 0x0200 /* WRED parameters - yellow */
+#define QM_CCGR_WE_WR_PARM_G 0x0400 /* WRED parameters - green */
+#define QM_CCGR_WE_OAL 0x0800 /* overhead accounting length */
+#define QM_CCGR_WE_CS_THRES_OUT 0x1000 /* congestion state exit threshold */
+#define QM_CCGR_WE_TD_THRES 0x2000 /* tail-drop threshold */
+#define QM_CCGR_WE_TD_MODE 0x4000 /* tail-drop mode (state/threshold) */
+#define QM_CCGR_WE_CDV 0x8000 /* cdv */
+
+/**
+ * qman_ceetm_ccg_set
+ * qman_ceetm_ccg_get - Configure/query a subset of CCG attributes.
+ * @ccg: the given CCG object.
+ * @we_mask: the write enable mask.
+ * @params: the parameters setting for this ccg
+ *
+ * Return 0 for success, or -EIO if configure ccg command returns error for
+ * "set" function, or -EINVAL if query ccg command returns error for "get"
+ * function.
+ */
+int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg,
+ u16 we_mask,
+ const struct qm_ceetm_ccg_params *params);
+int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg,
+ struct qm_ceetm_ccg_params *params);
+
+/** qman_ceetm_cscn_swp_set - Add or remove a software portal from the target
+ * mask.
+ * qman_ceetm_cscn_swp_get - Query whether a given software portal index is
+ * in the cscn target mask.
+ * @ccg: the give CCG object.
+ * @swp_idx: the index of the software portal.
+ * @cscn_enabled: 1: Set the swp to be cscn target. 0: remove the swp from
+ * the target mask.
+ * @we_mask: the write enable mask.
+ * @params: the parameters setting for this ccg
+ *
+ * Return 0 for success, or -EINVAL if command in set/get function fails.
+ */
+int qman_ceetm_cscn_swp_set(struct qm_ceetm_ccg *ccg,
+ u16 swp_idx,
+ unsigned int cscn_enabled,
+ u16 we_mask,
+ const struct qm_ceetm_ccg_params *params);
+int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg,
+ u16 swp_idx,
+ unsigned int *cscn_enabled);
+
+/** qman_ceetm_cscn_dcp_set - Add or remove a direct connect portal from the\
+ * target mask.
+ * qman_ceetm_cscn_dcp_get - Query whether a given direct connect portal index
+ * is in the cscn target mask.
+ * @ccg: the give CCG object.
+ * @dcp_idx: the index of the direct connect portal.
+ * @vcgid: congestion state change notification for dcp portal, virtual CGID.
+ * @cscn_enabled: 1: Set the dcp to be cscn target. 0: remove the dcp from
+ * the target mask.
+ * @we_mask: the write enable mask.
+ * @params: the parameters setting for this ccg
+ *
+ * Return 0 for success, or -EINVAL if command in set/get function fails.
+ */
+int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg,
+ u16 dcp_idx,
+ u8 vcgid,
+ unsigned int cscn_enabled,
+ u16 we_mask,
+ const struct qm_ceetm_ccg_params *params);
+int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg,
+ u16 dcp_idx,
+ u8 *vcgid,
+ unsigned int *cscn_enabled);
+
+/**
+ * qman_ceetm_ccg_get_reject_statistics - Get the statistics provided by
+ * CEETM CCG counters.
+ * @ccg: the given CCG object.
+ * @flags: indicates whether the statistics counter will be cleared after query.
+ * @frame_count: The number of the frames that have been counted since the
+ * counter was cleared last time.
+ * @byte_count: the number of bytes in all frames that have been counted.
+ *
+ * Return zero for success or -EINVAL if query statistics command returns error.
+ *
+ */
+int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags,
+ u64 *frame_count, u64 *byte_count);
+
+/**
+ * qman_ceetm_query_lfqmt - Query the logical frame queue mapping table
+ * @lfqid: Logical Frame Queue ID
+ * @lfqmt_query: Results of the query command
+ *
+ * Returns zero for success or -EIO if the query command returns error.
+ *
+ */
+int qman_ceetm_query_lfqmt(int lfqid,
+ struct qm_mcr_ceetm_lfqmt_query *lfqmt_query);
+
+/**
+ * qman_ceetm_query_cq - Queries a CEETM CQ
+ * @cqid: the channel ID (first byte) followed by the CQ idx
+ * @dcpid: CEETM portal ID
+ * @cq_query: storage for the queried CQ fields
+ *
+ * Returns zero for success or -EIO if the query command returns error.
+ *
+*/
+int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
+ struct qm_mcr_ceetm_cq_query *cq_query);
+
+/**
+ * qman_ceetm_query_write_statistics - Query (and optionally write) statistics
+ * @cid: Target ID (CQID or CCGRID)
+ * @dcp_idx: CEETM portal ID
+ * @command_type: One of the following:
+ * 0 = Query dequeue statistics. CID carries the CQID to be queried.
+ * 1 = Query and clear dequeue statistics. CID carries the CQID to be queried
+ * 2 = Write dequeue statistics. CID carries the CQID to be written.
+ * 3 = Query reject statistics. CID carries the CCGRID to be queried.
+ * 4 = Query and clear reject statistics. CID carries the CCGRID to be queried
+ * 5 = Write reject statistics. CID carries the CCGRID to be written
+ * @frame_count: Frame count value to be written if this is a write command
+ * @byte_count: Bytes count value to be written if this is a write command
+ *
+ * Returns zero for success or -EIO if the query command returns error.
+ */
+int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx,
+ u16 command_type, u64 frame_count,
+ u64 byte_count);
+
+/**
+ * qman_set_wpm - Set waterfall power management
+ *
+ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
+ *
+ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
+ * accessible.
+ */
+int qman_set_wpm(int wpm_enable);
+
+/**
+ * qman_get_wpm - Query the waterfall power management setting
+ *
+ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
+ *
+ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
+ * accessible.
+ */
+int qman_get_wpm(int *wpm_enable);
+
+/* The below qman_p_***() variants might be called in a migration situation
+ * (e.g. cpu hotplug). They are used to continue accessing the portal that
+ * execution was affine to prior to migration.
+ * @qman_portal specifies which portal the APIs will use.
+*/
+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal
+ *p);
+int qman_p_irqsource_add(struct qman_portal *p, u32 bits);
+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
+u32 qman_p_poll_slow(struct qman_portal *p);
+void qman_p_poll(struct qman_portal *p);
+void qman_p_stop_dequeues(struct qman_portal *p);
+void qman_p_start_dequeues(struct qman_portal *p);
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools);
+u32 qman_p_static_dequeue_get(struct qman_portal *p);
+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
+ int park_request);
+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
+ u32 flags __maybe_unused, u32 vdqcr);
+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags);
+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum);
+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags,
+ qman_cb_precommit cb, void *cb_arg);
+
+static inline int qman_is_probed(void) {
+ return 1;
+}
+
+
+static inline int qman_portals_probed(void) {
+ return 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FSL_QMAN_H */
diff --git a/include/linux/fsl_usdpaa.h b/include/linux/fsl_usdpaa.h
new file mode 100644
index 000000000000..114e694644b5
--- /dev/null
+++ b/include/linux/fsl_usdpaa.h
@@ -0,0 +1,404 @@
+/* Copyright 2011-2012 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef FSL_USDPAA_H
+#define FSL_USDPAA_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/uaccess.h>
+#include <linux/ioctl.h>
+#include <linux/fsl_qman.h> /* For "enum qm_channel" */
+#include <linux/compat.h>
+
+#ifdef CONFIG_FSL_USDPAA
+
+/******************************/
+/* Allocation of resource IDs */
+/******************************/
+
+/* This enum is used to distinguish between the type of underlying object being
+ * manipulated. */
+enum usdpaa_id_type {
+ usdpaa_id_fqid,
+ usdpaa_id_bpid,
+ usdpaa_id_qpool,
+ usdpaa_id_cgrid,
+ usdpaa_id_ceetm0_lfqid,
+ usdpaa_id_ceetm0_channelid,
+ usdpaa_id_ceetm1_lfqid,
+ usdpaa_id_ceetm1_channelid,
+ usdpaa_id_max /* <-- not a valid type, represents the number of types */
+};
+#define USDPAA_IOCTL_MAGIC 'u'
+struct usdpaa_ioctl_id_alloc {
+ uint32_t base; /* Return value, the start of the allocated range */
+ enum usdpaa_id_type id_type; /* what kind of resource(s) to allocate */
+ uint32_t num; /* how many IDs to allocate (and return value) */
+ uint32_t align; /* must be a power of 2, 0 is treated like 1 */
+ int partial; /* whether to allow less than 'num' */
+};
+struct usdpaa_ioctl_id_release {
+ /* Input; */
+ enum usdpaa_id_type id_type;
+ uint32_t base;
+ uint32_t num;
+};
+struct usdpaa_ioctl_id_reserve {
+ enum usdpaa_id_type id_type;
+ uint32_t base;
+ uint32_t num;
+};
+
+
+/* ioctl() commands */
+#define USDPAA_IOCTL_ID_ALLOC \
+ _IOWR(USDPAA_IOCTL_MAGIC, 0x01, struct usdpaa_ioctl_id_alloc)
+#define USDPAA_IOCTL_ID_RELEASE \
+ _IOW(USDPAA_IOCTL_MAGIC, 0x02, struct usdpaa_ioctl_id_release)
+#define USDPAA_IOCTL_ID_RESERVE \
+ _IOW(USDPAA_IOCTL_MAGIC, 0x0A, struct usdpaa_ioctl_id_reserve)
+
+/**********************/
+/* Mapping DMA memory */
+/**********************/
+
+/* Maximum length for a map name, including NULL-terminator */
+#define USDPAA_DMA_NAME_MAX 16
+/* Flags for requesting DMA maps. Maps are private+unnamed or sharable+named.
+ * For a sharable and named map, specify _SHARED (whether creating one or
+ * binding to an existing one). If _SHARED is specified and _CREATE is not, then
+ * the mapping must already exist. If _SHARED and _CREATE are specified and the
+ * mapping doesn't already exist, it will be created. If _SHARED and _CREATE are
+ * specified and the mapping already exists, the mapping will fail unless _LAZY
+ * is specified. When mapping to a pre-existing sharable map, the length must be
+ * an exact match. Lengths must be a power-of-4 multiple of page size.
+ *
+ * Note that this does not actually map the memory to user-space, that is done
+ * by a subsequent mmap() using the page offset returned from this ioctl(). The
+ * ioctl() is what gives the process permission to do this, and a page-offset
+ * with which to do so.
+ */
+#define USDPAA_DMA_FLAG_SHARE 0x01
+#define USDPAA_DMA_FLAG_CREATE 0x02
+#define USDPAA_DMA_FLAG_LAZY 0x04
+#define USDPAA_DMA_FLAG_RDONLY 0x08
+struct usdpaa_ioctl_dma_map {
+ /* Output parameters - virtual and physical addresses */
+ void *ptr;
+ uint64_t phys_addr;
+ /* Input parameter, the length of the region to be created (or if
+ * mapping an existing region, this must match it). Must be a power-of-4
+ * multiple of page size. */
+ uint64_t len;
+ /* Input parameter, the USDPAA_DMA_FLAG_* settings. */
+ uint32_t flags;
+ /* If _FLAG_SHARE is specified, the name of the region to be created (or
+ * of the existing mapping to use). */
+ char name[USDPAA_DMA_NAME_MAX];
+ /* If this ioctl() creates the mapping, this is an input parameter
+ * stating whether the region supports locking. If mapping an existing
+ * region, this is a return value indicating the same thing. */
+ int has_locking;
+ /* In the case of a successful map with _CREATE and _LAZY, this return
+ * value indicates whether we created the mapped region or whether it
+ * already existed. */
+ int did_create;
+};
+
+#ifdef CONFIG_COMPAT
+struct usdpaa_ioctl_dma_map_compat {
+ /* Output parameters - virtual and physical addresses */
+ compat_uptr_t ptr;
+ uint64_t phys_addr;
+ /* Input parameter, the length of the region to be created (or if
+ * mapping an existing region, this must match it). Must be a power-of-4
+ * multiple of page size. */
+ uint64_t len;
+ /* Input parameter, the USDPAA_DMA_FLAG_* settings. */
+ uint32_t flags;
+ /* If _FLAG_SHARE is specified, the name of the region to be created (or
+ * of the existing mapping to use). */
+ char name[USDPAA_DMA_NAME_MAX];
+ /* If this ioctl() creates the mapping, this is an input parameter
+ * stating whether the region supports locking. If mapping an existing
+ * region, this is a return value indicating the same thing. */
+ int has_locking;
+ /* In the case of a successful map with _CREATE and _LAZY, this return
+ * value indicates whether we created the mapped region or whether it
+ * already existed. */
+ int did_create;
+};
+
+#define USDPAA_IOCTL_DMA_MAP_COMPAT \
+ _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map_compat)
+#endif
+
+
+#define USDPAA_IOCTL_DMA_MAP \
+ _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map)
+/* munmap() does not remove the DMA map, just the user-space mapping to it.
+ * This ioctl will do both (though you can munmap() before calling the ioctl
+ * too). */
+#define USDPAA_IOCTL_DMA_UNMAP \
+ _IOW(USDPAA_IOCTL_MAGIC, 0x04, unsigned char)
+/* We implement a cross-process locking scheme per DMA map. Call this ioctl()
+ * with a mmap()'d address, and the process will (interruptible) sleep if the
+ * lock is already held by another process. Process destruction will
+ * automatically clean up any held locks. */
+#define USDPAA_IOCTL_DMA_LOCK \
+ _IOW(USDPAA_IOCTL_MAGIC, 0x05, unsigned char)
+#define USDPAA_IOCTL_DMA_UNLOCK \
+ _IOW(USDPAA_IOCTL_MAGIC, 0x06, unsigned char)
+
+/***************************************/
+/* Mapping and using QMan/BMan portals */
+/***************************************/
+enum usdpaa_portal_type {
+ usdpaa_portal_qman,
+ usdpaa_portal_bman,
+};
+
+#define QBMAN_ANY_PORTAL_IDX 0xffffffff
+
+struct usdpaa_ioctl_portal_map {
+ /* Input parameter, is a qman or bman portal required. */
+
+ enum usdpaa_portal_type type;
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ for don't care. The portal index will be populated by the
+ driver when the ioctl() successfully completes */
+ uint32_t index;
+
+ /* Return value if the map succeeds, this gives the mapped
+ * cache-inhibited (cinh) and cache-enabled (cena) addresses. */
+ struct usdpaa_portal_map {
+ void *cinh;
+ void *cena;
+ } addr;
+ /* Qman-specific return values */
+ uint16_t channel;
+ uint32_t pools;
+};
+
+#ifdef CONFIG_COMPAT
+struct compat_usdpaa_ioctl_portal_map {
+ /* Input parameter, is a qman or bman portal required. */
+ enum usdpaa_portal_type type;
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ for don't care. The portal index will be populated by the
+ driver when the ioctl() successfully completes */
+ uint32_t index;
+ /* Return value if the map succeeds, this gives the mapped
+ * cache-inhibited (cinh) and cache-enabled (cena) addresses. */
+ struct usdpaa_portal_map_compat {
+ compat_uptr_t cinh;
+ compat_uptr_t cena;
+ } addr;
+ /* Qman-specific return values */
+ uint16_t channel;
+ uint32_t pools;
+};
+#define USDPAA_IOCTL_PORTAL_MAP_COMPAT \
+ _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct compat_usdpaa_ioctl_portal_map)
+#define USDPAA_IOCTL_PORTAL_UNMAP_COMPAT \
+ _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map_compat)
+#endif
+
+#define USDPAA_IOCTL_PORTAL_MAP \
+ _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct usdpaa_ioctl_portal_map)
+#define USDPAA_IOCTL_PORTAL_UNMAP \
+ _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map)
+
+struct usdpaa_ioctl_irq_map {
+ enum usdpaa_portal_type type; /* Type of portal to map */
+ int fd; /* File descriptor that contains the portal */
+ void *portal_cinh; /* Cache inhibited area to identify the portal */
+};
+
+#define USDPAA_IOCTL_PORTAL_IRQ_MAP \
+ _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct usdpaa_ioctl_irq_map)
+
+#ifdef CONFIG_COMPAT
+
+struct compat_ioctl_irq_map {
+ enum usdpaa_portal_type type; /* Type of portal to map */
+ compat_int_t fd; /* File descriptor that contains the portal */
+ compat_uptr_t portal_cinh; /* Used identify the portal */};
+
+#define USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT \
+ _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct compat_ioctl_irq_map)
+#endif
+
+/* ioctl to query the amount of DMA memory used in the system */
+struct usdpaa_ioctl_dma_used {
+ uint64_t free_bytes;
+ uint64_t total_bytes;
+};
+#define USDPAA_IOCTL_DMA_USED \
+ _IOR(USDPAA_IOCTL_MAGIC, 0x0B, struct usdpaa_ioctl_dma_used)
+
+/* ioctl to allocate a raw portal */
+struct usdpaa_ioctl_raw_portal {
+ /* inputs */
+ enum usdpaa_portal_type type; /* Type of portal to allocate */
+
+ /* set to non zero to turn on stashing */
+ uint8_t enable_stash;
+ /* Stashing attributes for the portal */
+ uint32_t cpu;
+ uint32_t cache;
+ uint32_t window;
+
+ /* Specifies the stash request queue this portal should use */
+ uint8_t sdest;
+
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ * for don't care. The portal index will be populated by the
+ * driver when the ioctl() successfully completes */
+ uint32_t index;
+
+ /* outputs */
+ uint64_t cinh;
+ uint64_t cena;
+};
+
+#define USDPAA_IOCTL_ALLOC_RAW_PORTAL \
+ _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct usdpaa_ioctl_raw_portal)
+
+#define USDPAA_IOCTL_FREE_RAW_PORTAL \
+ _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct usdpaa_ioctl_raw_portal)
+
+#ifdef CONFIG_COMPAT
+
+struct compat_ioctl_raw_portal {
+ /* inputs */
+ enum usdpaa_portal_type type; /* Type of portal to allocate */
+
+ /* set to non zero to turn on stashing */
+ uint8_t enable_stash;
+ /* Stashing attributes for the portal */
+ uint32_t cpu;
+ uint32_t cache;
+ uint32_t window;
+ /* Specifies the stash request queue this portal should use */
+ uint8_t sdest;
+
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ * for don't care. The portal index will be populated by the
+ * driver when the ioctl() successfully completes */
+ uint32_t index;
+
+ /* outputs */
+ uint64_t cinh;
+ uint64_t cena;
+};
+
+#define USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT \
+ _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct compat_ioctl_raw_portal)
+
+#define USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT \
+ _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct compat_ioctl_raw_portal)
+
+#endif
+
+#ifdef __KERNEL__
+
+/* Early-boot hook */
+int __init fsl_usdpaa_init_early(void);
+
+/* Fault-handling in arch/powerpc/mm/mem.c gives USDPAA an opportunity to detect
+ * faults within its ranges via this hook. */
+int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size);
+
+#endif /* __KERNEL__ */
+
+#endif /* CONFIG_FSL_USDPAA */
+
+#ifdef __KERNEL__
+/* This interface is needed in a few places and though it's not specific to
+ * USDPAA as such, creating a new header for it doesn't make any sense. The
+ * qbman kernel driver implements this interface and uses it as the backend for
+ * both the FQID and BPID allocators. The fsl_usdpaa driver also uses this
+ * interface for tracking per-process allocations handed out to user-space. */
+struct dpa_alloc {
+ struct list_head free;
+ spinlock_t lock;
+ struct list_head used;
+};
+#define DECLARE_DPA_ALLOC(name) \
+ struct dpa_alloc name = { \
+ .free = { \
+ .prev = &name.free, \
+ .next = &name.free \
+ }, \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .used = { \
+ .prev = &name.used, \
+ .next = &name.used \
+ } \
+ }
+static inline void dpa_alloc_init(struct dpa_alloc *alloc)
+{
+ INIT_LIST_HEAD(&alloc->free);
+ INIT_LIST_HEAD(&alloc->used);
+ spin_lock_init(&alloc->lock);
+}
+int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
+ int partial);
+void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count);
+void dpa_alloc_seed(struct dpa_alloc *alloc, u32 fqid, u32 count);
+
+/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
+ * desired range is not available, or 0 for success. */
+int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base_id, u32 count);
+/* Pops and returns contiguous ranges from the allocator. Returns -ENOMEM when
+ * 'alloc' is empty. */
+int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count);
+/* Returns 1 if the specified id is alloced, 0 otherwise */
+int dpa_alloc_check(struct dpa_alloc *list, u32 id);
+#endif /* __KERNEL__ */
+
+
+/************************************
+ * Link Status support for user space
+ * interface
+ ************************************/
+#define IF_NAME_MAX_LEN 16
+#define NODE_NAME_LEN 32
+
+struct usdpaa_ioctl_link_status {
+ /* network device node name */
+ char if_name[IF_NAME_MAX_LEN];
+ /* Eventfd value */
+ uint32_t efd;
+};
+
+#define USDPAA_IOCTL_ENABLE_LINK_STATUS_INTERRUPT \
+ _IOW(USDPAA_IOCTL_MAGIC, 0x0E, struct usdpaa_ioctl_link_status)
+
+#define USDPAA_IOCTL_DISABLE_LINK_STATUS_INTERRUPT \
+ _IOW(USDPAA_IOCTL_MAGIC, 0x0F, char *)
+
+struct usdpaa_ioctl_link_status_args {
+ /* network device node name */
+ char if_name[IF_NAME_MAX_LEN];
+ /* link status(UP/DOWN) */
+ int link_status;
+};
+
+#define USDPAA_IOCTL_GET_LINK_STATUS \
+ _IOWR(USDPAA_IOCTL_MAGIC, 0x10, struct usdpaa_ioctl_link_status_args)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FSL_USDPAA_H */
diff --git a/include/linux/hantrodec.h b/include/linux/hantrodec.h
new file mode 100644
index 000000000000..f423ea82a04e
--- /dev/null
+++ b/include/linux/hantrodec.h
@@ -0,0 +1,29 @@
+/*****************************************************************************
+*
+* The GPL License (GPL)
+*
+* Copyright (c) 2015-2017, VeriSilicon Inc.
+* Copyright (c) 2011-2014, Google Inc.
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version 2
+* of the License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+*
+*****************************************************************************/
+
+#ifndef _HANTRODEC_H_
+#define _HANTRODEC_H_
+
+#include <uapi/linux/hantrodec.h>
+
+#endif /* !_HANTRODEC_H_ */
diff --git a/include/linux/hx280enc.h b/include/linux/hx280enc.h
new file mode 100755
index 000000000000..186a00e3446f
--- /dev/null
+++ b/include/linux/hx280enc.h
@@ -0,0 +1,31 @@
+/*****************************************************************************
+ * Encoder device driver (kernel module header)
+ *
+ * Copyright (C) 2012 Google Finland Oy.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+--------------------------------------------------------------------------------
+--
+-- Abstract : 6280/7280/8270/8290/H1 Encoder device driver (kernel module)
+--
+*****************************************************************************/
+
+#ifndef _HX280ENC_H_
+#define _HX280ENC_H_
+
+#include <uapi/linux/hx280enc.h>
+
+#endif /* !_HX280ENC_H_ */
diff --git a/include/linux/imx_mic_epf.h b/include/linux/imx_mic_epf.h
new file mode 100644
index 000000000000..c2ec7be5029e
--- /dev/null
+++ b/include/linux/imx_mic_epf.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright 2020 NXP
+
+#ifndef IMX_MIC_EPF_H
+#define IMX_MIC_EPF_H
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+#define SWIOTLB_REGION_ADDR 0xf8000000
+
+struct imx_mic_mw {
+ phys_addr_t pci_pa;
+ void __iomem *pci_va;
+ resource_size_t pci_len;
+ phys_addr_t rc_shmem_pa;
+ phys_addr_t rc_map_addr;
+};
+
+struct imx_mic_epf {
+ struct device *dev;
+ struct pci_epf *epf;
+ void *reg[6];
+ const struct pci_epc_features *epc_features;
+ struct delayed_work bar0_handler;
+ struct imx_mic_mw aper;
+};
+
+void imx_mic_get_doorbell_info(u32 *doorbell_reg_base, u32 *doorbell_reg_size);
+int imx_mic_probe(struct imx_mic_epf *mic_epf);
+void imx_mic_remove(void);
+#endif
diff --git a/include/linux/imx_rpmsg.h b/include/linux/imx_rpmsg.h
new file mode 100644
index 000000000000..e0d5e979a3e7
--- /dev/null
+++ b/include/linux/imx_rpmsg.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 NXP.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * @file linux/imx_rpmsg.h
+ *
+ * @brief Global header file for iMX RPMSG
+ *
+ * @ingroup RPMSG
+ */
+#ifndef __LINUX_IMX_RPMSG_H__
+#define __LINUX_IMX_RPMSG_H__
+
+/* Category define */
+#define IMX_RMPSG_LIFECYCLE 1
+#define IMX_RPMSG_PMIC 2
+#define IMX_RPMSG_AUDIO 3
+#define IMX_RPMSG_KEY 4
+#define IMX_RPMSG_GPIO 5
+#define IMX_RPMSG_RTC 6
+#define IMX_RPMSG_SENSOR 7
+/* rpmsg version */
+#define IMX_RMPSG_MAJOR 1
+#define IMX_RMPSG_MINOR 0
+
+#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
+
+struct imx_rpmsg_head {
+ u8 cate;
+ u8 major;
+ u8 minor;
+ u8 type;
+ u8 cmd;
+ u8 reserved[5];
+} __packed;
+
+#endif /* __LINUX_IMX_RPMSG_H__ */
diff --git a/include/linux/imx_sema4.h b/include/linux/imx_sema4.h
new file mode 100644
index 000000000000..19850ae7742b
--- /dev/null
+++ b/include/linux/imx_sema4.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_IMX_SEMA4_H__
+#define __LINUX_IMX_SEMA4_H__
+
+#define SEMA4_NUM_DEVICES 1
+#define SEMA4_NUM_GATES 16
+
+#define SEMA4_UNLOCK 0x00
+#define SEMA4_A9_LOCK 0x01
+#define SEMA4_GATE_MASK 0x03
+
+#define CORE_MUTEX_VALID (('c'<<24)|('m'<<24)|('t'<<24)|'x')
+
+/*
+ * The enumerates
+ */
+enum {
+ /* sema4 registers offset */
+ SEMA4_CP0INE = 0x40,
+ SEMA4_CP1INE = 0x48,
+ SEMA4_CP0NTF = 0x80,
+ SEMA4_CP1NTF = 0x88,
+};
+
+static const unsigned int idx_sema4[SEMA4_NUM_GATES] = {
+ 1 << 7, 1 << 6, 1 << 5, 1 << 4,
+ 1 << 3, 1 << 2, 1 << 1, 1 << 0,
+ 1 << 15, 1 << 14, 1 << 13, 1 << 12,
+ 1 << 11, 1 << 10, 1 << 9, 1 << 8,
+};
+
+struct imx_sema4_mutex {
+ u32 valid;
+ u32 gate_num;
+ unsigned char gate_val;
+ wait_queue_head_t wait_q;
+};
+
+struct imx_sema4_mutex_device {
+ struct device *dev;
+ u16 cpntf_val;
+ u16 cpine_val;
+ void __iomem *ioaddr; /* Mapped address */
+ spinlock_t lock; /* Mutex */
+ int irq;
+
+ u16 alloced;
+ struct imx_sema4_mutex *mutex_ptr[SEMA4_NUM_GATES];
+};
+
+struct imx_sema4_mutex *
+ imx_sema4_mutex_create(u32 dev_num, u32 mutex_num);
+#ifdef CONFIG_IMX_SEMA4
+int imx_sema4_mutex_destroy(struct imx_sema4_mutex *mutex_ptr);
+int imx_sema4_mutex_trylock(struct imx_sema4_mutex *mutex_ptr);
+int imx_sema4_mutex_lock(struct imx_sema4_mutex *mutex_ptr);
+int imx_sema4_mutex_unlock(struct imx_sema4_mutex *mutex_ptr);
+#else
+static inline int imx_sema4_mutex_destroy(struct imx_sema4_mutex *mutex_ptr)
+{
+ return 0;
+}
+static inline int imx_sema4_mutex_trylock(struct imx_sema4_mutex *mutex_ptr)
+{
+ return 0;
+}
+static inline int imx_sema4_mutex_lock(struct imx_sema4_mutex *mutex_ptr)
+{
+ return 0;
+}
+static inline int imx_sema4_mutex_unlock(struct imx_sema4_mutex *mutex_ptr)
+{
+ return 0;
+}
+#endif
+#endif /* __LINUX_IMX_SEMA4_H__ */
diff --git a/include/linux/ipu-v3-pre.h b/include/linux/ipu-v3-pre.h
new file mode 100644
index 000000000000..f5a26e5eadbf
--- /dev/null
+++ b/include/linux/ipu-v3-pre.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#ifndef __LINUX_IPU_V3_PRE_H_
+#define __LINUX_IPU_V3_PRE_H_
+
+#define IPU_PRE_MAX_WIDTH 1920
+#define IPU_PRE_MAX_BPP 4
+#define IPU_PRE_SMALL_LINE 9 /* to workaround errata ERR009624*/
+
+struct ipu_rect {
+ int left;
+ int top;
+ int width;
+ int height;
+};
+
+struct ipu_pre_context {
+ bool repeat;
+ bool vflip;
+ bool handshake_en;
+ bool hsk_abort_en;
+ unsigned int hsk_line_num;
+ bool sdw_update;
+ unsigned int block_size;
+ unsigned int interlaced;
+ unsigned int prefetch_mode;
+
+ unsigned long cur_buf;
+ unsigned long next_buf;
+
+ unsigned int tile_fmt;
+
+ unsigned int read_burst;
+ unsigned int prefetch_input_bpp;
+ unsigned int prefetch_input_pixel_fmt;
+ unsigned int prefetch_shift_offset;
+ unsigned int prefetch_shift_width;
+ bool shift_bypass;
+ bool field_inverse;
+ bool tpr_coor_offset_en;
+ /* the output of prefetch is
+ * also the input of store
+ */
+ struct ipu_rect prefetch_output_size;
+ unsigned int prefetch_input_active_width;
+ unsigned int prefetch_input_width;
+ unsigned int prefetch_input_height;
+ unsigned int store_pitch;
+ int interlace_offset;
+
+ bool store_en;
+ unsigned int write_burst;
+ unsigned int store_output_bpp;
+
+ unsigned int sec_buf_off;
+ unsigned int trd_buf_off;
+
+ /* return for IPU fb caller */
+ unsigned long store_addr;
+};
+
+/*
+ * In order to workaround the PRE SoC bug recorded by errata ERR009624,
+ * the software cannot write the PRE_CTRL register when the PRE writes
+ * the PRE_CTRL register automatically to set the ENABLE bit(bit0) to 1
+ * in the PRE repeat mode.
+ * The software mechanism to set the PRE_CTRL register is different for
+ * PRE Y resolution higher than 9 lines and lower or equal to 9 lines.
+ * Use this helper to check the Y resolution.
+ */
+static inline bool ipu_pre_yres_is_small(unsigned int yres)
+{
+ return yres <= IPU_PRE_SMALL_LINE;
+}
+
+#ifdef CONFIG_MXC_IPU_V3_PRE
+int ipu_pre_alloc(int ipu_id, ipu_channel_t ipu_ch);
+void ipu_pre_free(unsigned int *id);
+unsigned long ipu_pre_alloc_double_buffer(unsigned int id, unsigned int size);
+void ipu_pre_free_double_buffer(unsigned int id);
+int ipu_pre_config(int id, struct ipu_pre_context *config);
+int ipu_pre_set_ctrl(unsigned int id, struct ipu_pre_context *config);
+int ipu_pre_enable(int id);
+void ipu_pre_disable(int id);
+int ipu_pre_set_fb_buffer(int id, bool resolve,
+ unsigned long fb_paddr,
+ unsigned int y_res,
+ unsigned int x_crop,
+ unsigned int y_crop,
+ unsigned int sec_buf_off,
+ unsigned int trd_buf_off);
+int ipu_pre_sdw_update(int id);
+#else
+int ipu_pre_alloc(int ipu_id, ipu_channel_t channel)
+{
+ return -ENODEV;
+}
+
+void ipu_pre_free(unsigned int *id)
+{
+}
+
+unsigned long ipu_pre_alloc_double_buffer(unsigned int id, unsigned int size)
+{
+ return -ENODEV;
+}
+
+void ipu_pre_free_double_buffer(unsigned int id)
+{
+}
+
+int ipu_pre_config(int id, struct ipu_pre_context *config)
+{
+ return -ENODEV;
+}
+
+int ipu_pre_set_ctrl(unsigned int id, struct ipu_pre_context *config)
+{
+ return -ENODEV;
+}
+
+int ipu_pre_enable(int id)
+{
+ return -ENODEV;
+}
+
+void ipu_pre_disable(int id)
+{
+ return;
+}
+
+int ipu_pre_set_fb_buffer(int id, bool resolve,
+ unsigned long fb_paddr,
+ unsigned int y_res,
+ unsigned int x_crop,
+ unsigned int y_crop,
+ unsigned int sec_buf_off,
+ unsigned int trd_buf_off)
+{
+ return -ENODEV;
+}
+int ipu_pre_sdw_update(int id)
+{
+ return -ENODEV;
+}
+#endif
+#endif /* __LINUX_IPU_V3_PRE_H_ */
diff --git a/include/linux/ipu-v3-prg.h b/include/linux/ipu-v3-prg.h
new file mode 100644
index 000000000000..2ab803a6699f
--- /dev/null
+++ b/include/linux/ipu-v3-prg.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#ifndef __LINUX_IPU_V3_PRG_H_
+#define __LINUX_IPU_V3_PRG_H_
+
+#include <linux/ipu-v3.h>
+
+#define PRG_SO_INTERLACE 1
+#define PRG_SO_PROGRESSIVE 0
+#define PRG_BLOCK_MODE 1
+#define PRG_SCAN_MODE 0
+
+struct ipu_prg_config {
+ unsigned int id;
+ unsigned int pre_num;
+ ipu_channel_t ipu_ch;
+ unsigned int stride;
+ unsigned int height;
+ unsigned int ipu_height;
+ unsigned int crop_line;
+ unsigned int so;
+ unsigned int ilo;
+ unsigned int block_mode;
+ bool vflip;
+ u32 baddr;
+ u32 offset;
+};
+
+#ifdef CONFIG_MXC_IPU_V3_PRG
+int ipu_prg_config(struct ipu_prg_config *config);
+int ipu_prg_disable(unsigned int ipu_id, unsigned int pre_num);
+int ipu_prg_wait_buf_ready(unsigned int ipu_id, unsigned int pre_num,
+ unsigned int hsk_line_num,
+ int pre_store_out_height);
+#else
+int ipu_prg_config(struct ipu_prg_config *config)
+{
+ return -ENODEV;
+}
+
+int ipu_prg_disable(unsigned int ipu_id, unsigned int pre_num)
+{
+ return -ENODEV;
+}
+
+int ipu_prg_wait_buf_ready(unsigned int ipu_id, unsigned int pre_num,
+ unsigned int hsk_line_num,
+ int pre_store_out_height)
+{
+ return -ENODEV;
+}
+#endif
+#endif /* __LINUX_IPU_V3_PRG_H_ */
diff --git a/include/linux/ipu-v3.h b/include/linux/ipu-v3.h
new file mode 100644
index 000000000000..ae09614a2257
--- /dev/null
+++ b/include/linux/ipu-v3.h
@@ -0,0 +1,770 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2011-2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef __LINUX_IPU_V3_H_
+#define __LINUX_IPU_V3_H_
+
+#include <linux/ipu.h>
+
+/* IPU Driver channels definitions. */
+/* Note these are different from IDMA channels */
+#define IPU_MAX_CH 32
+#define _MAKE_CHAN(num, v_in, g_in, a_in, out) \
+ ((num << 24) | (v_in << 18) | (g_in << 12) | (a_in << 6) | out)
+#define _MAKE_ALT_CHAN(ch) (ch | (IPU_MAX_CH << 24))
+#define IPU_CHAN_ID(ch) (ch >> 24)
+#define IPU_CHAN_ALT(ch) (ch & 0x02000000)
+#define IPU_CHAN_ALPHA_IN_DMA(ch) ((uint32_t) (ch >> 6) & 0x3F)
+#define IPU_CHAN_GRAPH_IN_DMA(ch) ((uint32_t) (ch >> 12) & 0x3F)
+#define IPU_CHAN_VIDEO_IN_DMA(ch) ((uint32_t) (ch >> 18) & 0x3F)
+#define IPU_CHAN_OUT_DMA(ch) ((uint32_t) (ch & 0x3F))
+#define NO_DMA 0x3F
+#define ALT 1
+/*!
+ * Enumeration of IPU logical channels. An IPU logical channel is defined as a
+ * combination of an input (memory to IPU), output (IPU to memory), and/or
+ * secondary input IDMA channels and in some cases an Image Converter task.
+ * Some channels consist of only an input or output.
+ */
+typedef enum {
+ CHAN_NONE = -1,
+ MEM_ROT_ENC_MEM = _MAKE_CHAN(1, 45, NO_DMA, NO_DMA, 48),
+ MEM_ROT_VF_MEM = _MAKE_CHAN(2, 46, NO_DMA, NO_DMA, 49),
+ MEM_ROT_PP_MEM = _MAKE_CHAN(3, 47, NO_DMA, NO_DMA, 50),
+
+ MEM_PRP_ENC_MEM = _MAKE_CHAN(4, 12, 14, 17, 20),
+ MEM_PRP_VF_MEM = _MAKE_CHAN(5, 12, 14, 17, 21),
+ MEM_PP_MEM = _MAKE_CHAN(6, 11, 15, 18, 22),
+
+ MEM_DC_SYNC = _MAKE_CHAN(7, 28, NO_DMA, NO_DMA, NO_DMA),
+ MEM_DC_ASYNC = _MAKE_CHAN(8, 41, NO_DMA, NO_DMA, NO_DMA),
+ MEM_BG_SYNC = _MAKE_CHAN(9, 23, NO_DMA, 51, NO_DMA),
+ MEM_FG_SYNC = _MAKE_CHAN(10, 27, NO_DMA, 31, NO_DMA),
+
+ MEM_BG_ASYNC0 = _MAKE_CHAN(11, 24, NO_DMA, 52, NO_DMA),
+ MEM_FG_ASYNC0 = _MAKE_CHAN(12, 29, NO_DMA, 33, NO_DMA),
+ MEM_BG_ASYNC1 = _MAKE_ALT_CHAN(MEM_BG_ASYNC0),
+ MEM_FG_ASYNC1 = _MAKE_ALT_CHAN(MEM_FG_ASYNC0),
+
+ DIRECT_ASYNC0 = _MAKE_CHAN(13, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
+ DIRECT_ASYNC1 = _MAKE_CHAN(14, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
+
+ CSI_MEM0 = _MAKE_CHAN(15, NO_DMA, NO_DMA, NO_DMA, 0),
+ CSI_MEM1 = _MAKE_CHAN(16, NO_DMA, NO_DMA, NO_DMA, 1),
+ CSI_MEM2 = _MAKE_CHAN(17, NO_DMA, NO_DMA, NO_DMA, 2),
+ CSI_MEM3 = _MAKE_CHAN(18, NO_DMA, NO_DMA, NO_DMA, 3),
+
+ CSI_MEM = CSI_MEM0,
+
+ CSI_PRP_ENC_MEM = _MAKE_CHAN(19, NO_DMA, NO_DMA, NO_DMA, 20),
+ CSI_PRP_VF_MEM = _MAKE_CHAN(20, NO_DMA, NO_DMA, NO_DMA, 21),
+
+ /* for vdi mem->vdi->ic->mem , add graphics plane and alpha*/
+ MEM_VDI_PRP_VF_MEM_P = _MAKE_CHAN(21, 8, 14, 17, 21),
+ MEM_VDI_PRP_VF_MEM = _MAKE_CHAN(22, 9, 14, 17, 21),
+ MEM_VDI_PRP_VF_MEM_N = _MAKE_CHAN(23, 10, 14, 17, 21),
+
+ /* for vdi mem->vdi->mem */
+ MEM_VDI_MEM_P = _MAKE_CHAN(24, 8, NO_DMA, NO_DMA, 5),
+ MEM_VDI_MEM = _MAKE_CHAN(25, 9, NO_DMA, NO_DMA, 5),
+ MEM_VDI_MEM_N = _MAKE_CHAN(26, 10, NO_DMA, NO_DMA, 5),
+
+ /* fake channel for vdoa to link with IPU */
+ MEM_VDOA_MEM = _MAKE_CHAN(27, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
+
+ MEM_PP_ADC = CHAN_NONE,
+ ADC_SYS2 = CHAN_NONE,
+
+} ipu_channel_t;
+
+/*!
+ * Enumeration of types of buffers for a logical channel.
+ */
+typedef enum {
+ IPU_OUTPUT_BUFFER = 0, /*!< Buffer for output from IPU */
+ IPU_ALPHA_IN_BUFFER = 1, /*!< Buffer for input to IPU */
+ IPU_GRAPH_IN_BUFFER = 2, /*!< Buffer for input to IPU */
+ IPU_VIDEO_IN_BUFFER = 3, /*!< Buffer for input to IPU */
+ IPU_INPUT_BUFFER = IPU_VIDEO_IN_BUFFER,
+ IPU_SEC_INPUT_BUFFER = IPU_GRAPH_IN_BUFFER,
+} ipu_buffer_t;
+
+#define IPU_PANEL_SERIAL 1
+#define IPU_PANEL_PARALLEL 2
+
+/*!
+ * Enumeration of ADC channel operation mode.
+ */
+typedef enum {
+ Disable,
+ WriteTemplateNonSeq,
+ ReadTemplateNonSeq,
+ WriteTemplateUnCon,
+ ReadTemplateUnCon,
+ WriteDataWithRS,
+ WriteDataWoRS,
+ WriteCmd
+} mcu_mode_t;
+
+/*!
+ * Enumeration of ADC channel addressing mode.
+ */
+typedef enum {
+ FullWoBE,
+ FullWithBE,
+ XY
+} display_addressing_t;
+
+/*!
+ * Union of initialization parameters for a logical channel.
+ */
+typedef union {
+ struct {
+ uint32_t csi;
+ uint32_t mipi_id;
+ uint32_t mipi_vc;
+ bool mipi_en;
+ bool interlaced;
+ } csi_mem;
+ struct {
+ uint32_t in_width;
+ uint32_t in_height;
+ uint32_t in_pixel_fmt;
+ uint32_t out_width;
+ uint32_t out_height;
+ uint32_t out_pixel_fmt;
+ uint32_t outh_resize_ratio;
+ uint32_t outv_resize_ratio;
+ uint32_t csi;
+ uint32_t mipi_id;
+ uint32_t mipi_vc;
+ bool mipi_en;
+ } csi_prp_enc_mem;
+ struct {
+ uint32_t in_width;
+ uint32_t in_height;
+ uint32_t in_pixel_fmt;
+ uint32_t out_width;
+ uint32_t out_height;
+ uint32_t out_pixel_fmt;
+ uint32_t outh_resize_ratio;
+ uint32_t outv_resize_ratio;
+ } mem_prp_enc_mem;
+ struct {
+ uint32_t in_width;
+ uint32_t in_height;
+ uint32_t in_pixel_fmt;
+ uint32_t out_width;
+ uint32_t out_height;
+ uint32_t out_pixel_fmt;
+ } mem_rot_enc_mem;
+ struct {
+ uint32_t in_width;
+ uint32_t in_height;
+ uint32_t in_pixel_fmt;
+ uint32_t out_width;
+ uint32_t out_height;
+ uint32_t out_pixel_fmt;
+ uint32_t outh_resize_ratio;
+ uint32_t outv_resize_ratio;
+ bool graphics_combine_en;
+ bool global_alpha_en;
+ bool key_color_en;
+ uint32_t in_g_pixel_fmt;
+ uint8_t alpha;
+ uint32_t key_color;
+ bool alpha_chan_en;
+ ipu_motion_sel motion_sel;
+ enum v4l2_field field_fmt;
+ uint32_t csi;
+ uint32_t mipi_id;
+ uint32_t mipi_vc;
+ bool mipi_en;
+ } csi_prp_vf_mem;
+ struct {
+ uint32_t in_width;
+ uint32_t in_height;
+ uint32_t in_pixel_fmt;
+ uint32_t out_width;
+ uint32_t out_height;
+ uint32_t out_pixel_fmt;
+ bool graphics_combine_en;
+ bool global_alpha_en;
+ bool key_color_en;
+ display_port_t disp;
+ uint32_t out_left;
+ uint32_t out_top;
+ } csi_prp_vf_adc;
+ struct {
+ uint32_t in_width;
+ uint32_t in_height;
+ uint32_t in_pixel_fmt;
+ uint32_t out_width;
+ uint32_t out_height;
+ uint32_t out_pixel_fmt;
+ uint32_t outh_resize_ratio;
+ uint32_t outv_resize_ratio;
+ bool graphics_combine_en;
+ bool global_alpha_en;
+ bool key_color_en;
+ uint32_t in_g_pixel_fmt;
+ uint8_t alpha;
+ uint32_t key_color;
+ bool alpha_chan_en;
+ ipu_motion_sel motion_sel;
+ enum v4l2_field field_fmt;
+ } mem_prp_vf_mem;
+ struct {
+ uint32_t temp;
+ } mem_prp_vf_adc;
+ struct {
+ uint32_t temp;
+ } mem_rot_vf_mem;
+ struct {
+ uint32_t in_width;
+ uint32_t in_height;
+ uint32_t in_pixel_fmt;
+ uint32_t out_width;
+ uint32_t out_height;
+ uint32_t out_pixel_fmt;
+ uint32_t outh_resize_ratio;
+ uint32_t outv_resize_ratio;
+ bool graphics_combine_en;
+ bool global_alpha_en;
+ bool key_color_en;
+ uint32_t in_g_pixel_fmt;
+ uint8_t alpha;
+ uint32_t key_color;
+ bool alpha_chan_en;
+ } mem_pp_mem;
+ struct {
+ uint32_t temp;
+ } mem_rot_mem;
+ struct {
+ uint32_t in_width;
+ uint32_t in_height;
+ uint32_t in_pixel_fmt;
+ uint32_t out_width;
+ uint32_t out_height;
+ uint32_t out_pixel_fmt;
+ bool graphics_combine_en;
+ bool global_alpha_en;
+ bool key_color_en;
+ display_port_t disp;
+ uint32_t out_left;
+ uint32_t out_top;
+ } mem_pp_adc;
+ struct {
+ uint32_t di;
+ bool interlaced;
+ uint32_t in_pixel_fmt;
+ uint32_t out_pixel_fmt;
+ } mem_dc_sync;
+ struct {
+ uint32_t temp;
+ } mem_sdc_fg;
+ struct {
+ uint32_t di;
+ bool interlaced;
+ uint32_t in_pixel_fmt;
+ uint32_t out_pixel_fmt;
+ bool alpha_chan_en;
+ } mem_dp_bg_sync;
+ struct {
+ uint32_t temp;
+ } mem_sdc_bg;
+ struct {
+ uint32_t di;
+ bool interlaced;
+ uint32_t in_pixel_fmt;
+ uint32_t out_pixel_fmt;
+ bool alpha_chan_en;
+ } mem_dp_fg_sync;
+ struct {
+ uint32_t di;
+ } direct_async;
+ struct {
+ display_port_t disp;
+ mcu_mode_t ch_mode;
+ uint32_t out_left;
+ uint32_t out_top;
+ } adc_sys1;
+ struct {
+ display_port_t disp;
+ mcu_mode_t ch_mode;
+ uint32_t out_left;
+ uint32_t out_top;
+ } adc_sys2;
+} ipu_channel_params_t;
+
+/*
+ * IPU_IRQF_ONESHOT - Interrupt is not reenabled after the irq handler finished.
+ */
+#define IPU_IRQF_NONE 0x00000000
+#define IPU_IRQF_ONESHOT 0x00000001
+
+/*!
+ * Enumeration of IPU interrupt sources.
+ */
+enum ipu_irq_line {
+ IPU_IRQ_CSI0_OUT_EOF = 0,
+ IPU_IRQ_CSI1_OUT_EOF = 1,
+ IPU_IRQ_CSI2_OUT_EOF = 2,
+ IPU_IRQ_CSI3_OUT_EOF = 3,
+ IPU_IRQ_VDIC_OUT_EOF = 5,
+ IPU_IRQ_VDI_P_IN_EOF = 8,
+ IPU_IRQ_VDI_C_IN_EOF = 9,
+ IPU_IRQ_VDI_N_IN_EOF = 10,
+ IPU_IRQ_PP_IN_EOF = 11,
+ IPU_IRQ_PRP_IN_EOF = 12,
+ IPU_IRQ_PRP_GRAPH_IN_EOF = 14,
+ IPU_IRQ_PP_GRAPH_IN_EOF = 15,
+ IPU_IRQ_PRP_ALPHA_IN_EOF = 17,
+ IPU_IRQ_PP_ALPHA_IN_EOF = 18,
+ IPU_IRQ_PRP_ENC_OUT_EOF = 20,
+ IPU_IRQ_PRP_VF_OUT_EOF = 21,
+ IPU_IRQ_PP_OUT_EOF = 22,
+ IPU_IRQ_BG_SYNC_EOF = 23,
+ IPU_IRQ_BG_ASYNC_EOF = 24,
+ IPU_IRQ_FG_SYNC_EOF = 27,
+ IPU_IRQ_DC_SYNC_EOF = 28,
+ IPU_IRQ_FG_ASYNC_EOF = 29,
+ IPU_IRQ_FG_ALPHA_SYNC_EOF = 31,
+
+ IPU_IRQ_FG_ALPHA_ASYNC_EOF = 33,
+ IPU_IRQ_DC_READ_EOF = 40,
+ IPU_IRQ_DC_ASYNC_EOF = 41,
+ IPU_IRQ_DC_CMD1_EOF = 42,
+ IPU_IRQ_DC_CMD2_EOF = 43,
+ IPU_IRQ_DC_MASK_EOF = 44,
+ IPU_IRQ_PRP_ENC_ROT_IN_EOF = 45,
+ IPU_IRQ_PRP_VF_ROT_IN_EOF = 46,
+ IPU_IRQ_PP_ROT_IN_EOF = 47,
+ IPU_IRQ_PRP_ENC_ROT_OUT_EOF = 48,
+ IPU_IRQ_PRP_VF_ROT_OUT_EOF = 49,
+ IPU_IRQ_PP_ROT_OUT_EOF = 50,
+ IPU_IRQ_BG_ALPHA_SYNC_EOF = 51,
+ IPU_IRQ_BG_ALPHA_ASYNC_EOF = 52,
+
+ IPU_IRQ_BG_SYNC_NFACK = 64 + 23,
+ IPU_IRQ_FG_SYNC_NFACK = 64 + 27,
+ IPU_IRQ_DC_SYNC_NFACK = 64 + 28,
+
+ IPU_IRQ_DP_SF_START = 448 + 2,
+ IPU_IRQ_DP_SF_END = 448 + 3,
+ IPU_IRQ_BG_SF_END = IPU_IRQ_DP_SF_END,
+ IPU_IRQ_DC_FC_0 = 448 + 8,
+ IPU_IRQ_DC_FC_1 = 448 + 9,
+ IPU_IRQ_DC_FC_2 = 448 + 10,
+ IPU_IRQ_DC_FC_3 = 448 + 11,
+ IPU_IRQ_DC_FC_4 = 448 + 12,
+ IPU_IRQ_DC_FC_6 = 448 + 13,
+ IPU_IRQ_VSYNC_PRE_0 = 448 + 14,
+ IPU_IRQ_VSYNC_PRE_1 = 448 + 15,
+
+ IPU_IRQ_COUNT
+};
+
+/*!
+ * Bitfield of Display Interface signal polarities.
+ */
+typedef struct {
+ unsigned datamask_en:1;
+ unsigned int_clk:1;
+ unsigned interlaced:1;
+ unsigned odd_field_first:1;
+ unsigned clksel_en:1;
+ unsigned clkidle_en:1;
+ unsigned data_pol:1; /* true = inverted */
+ unsigned clk_pol:1; /* true = rising edge */
+ unsigned enable_pol:1;
+ unsigned Hsync_pol:1; /* true = active high */
+ unsigned Vsync_pol:1;
+} ipu_di_signal_cfg_t;
+
+/*!
+ * Bitfield of CSI signal polarities and modes.
+ */
+
+typedef struct {
+ unsigned data_width:4;
+ unsigned clk_mode:3;
+ unsigned ext_vsync:1;
+ unsigned Vsync_pol:1;
+ unsigned Hsync_pol:1;
+ unsigned pixclk_pol:1;
+ unsigned data_pol:1;
+ unsigned sens_clksrc:1;
+ unsigned pack_tight:1;
+ unsigned force_eof:1;
+ unsigned data_en_pol:1;
+ unsigned data_fmt;
+ unsigned csi;
+ unsigned mclk;
+} ipu_csi_signal_cfg_t;
+
+/*!
+ * Enumeration of CSI data bus widths.
+ */
+enum {
+ IPU_CSI_DATA_WIDTH_4 = 0,
+ IPU_CSI_DATA_WIDTH_8 = 1,
+ IPU_CSI_DATA_WIDTH_10 = 3,
+ IPU_CSI_DATA_WIDTH_16 = 9,
+};
+
+/*!
+ * Enumeration of CSI clock modes.
+ */
+enum {
+ IPU_CSI_CLK_MODE_GATED_CLK,
+ IPU_CSI_CLK_MODE_NONGATED_CLK,
+ IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE,
+ IPU_CSI_CLK_MODE_CCIR656_INTERLACED,
+ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR,
+ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR,
+ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR,
+ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR,
+};
+
+enum {
+ IPU_CSI_MIPI_DI0,
+ IPU_CSI_MIPI_DI1,
+ IPU_CSI_MIPI_DI2,
+ IPU_CSI_MIPI_DI3,
+};
+
+typedef enum {
+ RGB,
+ YCbCr,
+ YUV
+} ipu_color_space_t;
+
+/*!
+ * Enumeration of ADC vertical sync mode.
+ */
+typedef enum {
+ VsyncNone,
+ VsyncInternal,
+ VsyncCSI,
+ VsyncExternal
+} vsync_t;
+
+typedef enum {
+ DAT,
+ CMD
+} cmddata_t;
+
+/*!
+ * Enumeration of ADC display update mode.
+ */
+typedef enum {
+ IPU_ADC_REFRESH_NONE,
+ IPU_ADC_AUTO_REFRESH,
+ IPU_ADC_AUTO_REFRESH_SNOOP,
+ IPU_ADC_SNOOPING,
+} ipu_adc_update_mode_t;
+
+/*!
+ * Enumeration of ADC display interface types (serial or parallel).
+ */
+enum {
+ IPU_ADC_IFC_MODE_SYS80_TYPE1,
+ IPU_ADC_IFC_MODE_SYS80_TYPE2,
+ IPU_ADC_IFC_MODE_SYS68K_TYPE1,
+ IPU_ADC_IFC_MODE_SYS68K_TYPE2,
+ IPU_ADC_IFC_MODE_3WIRE_SERIAL,
+ IPU_ADC_IFC_MODE_4WIRE_SERIAL,
+ IPU_ADC_IFC_MODE_5WIRE_SERIAL_CLK,
+ IPU_ADC_IFC_MODE_5WIRE_SERIAL_CS,
+};
+
+enum {
+ IPU_ADC_IFC_WIDTH_8,
+ IPU_ADC_IFC_WIDTH_16,
+};
+
+/*!
+ * Enumeration of ADC display interface burst mode.
+ */
+enum {
+ IPU_ADC_BURST_WCS,
+ IPU_ADC_BURST_WBLCK,
+ IPU_ADC_BURST_NONE,
+ IPU_ADC_BURST_SERIAL,
+};
+
+/*!
+ * Enumeration of ADC display interface RW signal timing modes.
+ */
+enum {
+ IPU_ADC_SER_NO_RW,
+ IPU_ADC_SER_RW_BEFORE_RS,
+ IPU_ADC_SER_RW_AFTER_RS,
+};
+
+/*!
+ * Bitfield of ADC signal polarities and modes.
+ */
+typedef struct {
+ unsigned data_pol:1;
+ unsigned clk_pol:1;
+ unsigned cs_pol:1;
+ unsigned rs_pol:1;
+ unsigned addr_pol:1;
+ unsigned read_pol:1;
+ unsigned write_pol:1;
+ unsigned Vsync_pol:1;
+ unsigned burst_pol:1;
+ unsigned burst_mode:2;
+ unsigned ifc_mode:3;
+ unsigned ifc_width:5;
+ unsigned ser_preamble_len:4;
+ unsigned ser_preamble:8;
+ unsigned ser_rw_mode:2;
+} ipu_adc_sig_cfg_t;
+
+/*!
+ * Enumeration of ADC template commands.
+ */
+enum {
+ RD_DATA,
+ RD_ACK,
+ RD_WAIT,
+ WR_XADDR,
+ WR_YADDR,
+ WR_ADDR,
+ WR_CMND,
+ WR_DATA,
+};
+
+/*!
+ * Enumeration of ADC template command flow control.
+ */
+enum {
+ SINGLE_STEP,
+ PAUSE,
+ STOP,
+};
+
+
+/*Define template constants*/
+#define ATM_ADDR_RANGE 0x20 /*offset address of DISP */
+#define TEMPLATE_BUF_SIZE 0x20 /*size of template */
+
+/*!
+ * Define to create ADC template command entry.
+ */
+#define ipu_adc_template_gen(oc, rs, fc, dat) (((rs) << 29) | ((fc) << 27) | \
+ ((oc) << 24) | (dat))
+
+typedef struct {
+ u32 reg;
+ u32 value;
+} ipu_lpmc_reg_t;
+
+#define IPU_LPMC_REG_READ 0x80000000L
+
+#define CSI_MCLK_VF 1
+#define CSI_MCLK_ENC 2
+#define CSI_MCLK_RAW 4
+#define CSI_MCLK_I2C 8
+
+struct ipu_soc;
+/* Common IPU API */
+struct ipu_soc *ipu_get_soc(int id);
+int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params);
+void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel);
+void ipu_disable_hsp_clk(struct ipu_soc *ipu);
+
+static inline bool ipu_can_rotate_in_place(ipu_rotate_mode_t rot)
+{
+#ifdef CONFIG_MXC_IPU_V3D
+ return (rot < IPU_ROTATE_HORIZ_FLIP);
+#else
+ return (rot < IPU_ROTATE_90_RIGHT);
+#endif
+}
+
+int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
+ uint32_t pixel_fmt,
+ uint16_t width, uint16_t height,
+ uint32_t stride,
+ ipu_rotate_mode_t rot_mode,
+ dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
+ dma_addr_t phyaddr_2,
+ uint32_t u_offset, uint32_t v_offset);
+
+int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
+ uint32_t bufNum, dma_addr_t phyaddr);
+
+int32_t ipu_update_channel_offset(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
+ uint32_t pixel_fmt,
+ uint16_t width, uint16_t height,
+ uint32_t stride,
+ uint32_t u, uint32_t v,
+ uint32_t vertical_offset, uint32_t horizontal_offset);
+
+int32_t ipu_get_channel_offset(uint32_t pixel_fmt,
+ uint16_t width, uint16_t height,
+ uint32_t stride,
+ uint32_t u, uint32_t v,
+ uint32_t vertical_offset, uint32_t horizontal_offset,
+ uint32_t *u_offset, uint32_t *v_offset);
+
+int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
+ ipu_buffer_t type, uint32_t bufNum);
+int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum);
+
+int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch);
+int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch);
+
+int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel);
+int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
+ uint32_t bufNum);
+void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
+ uint32_t bufNum);
+uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type);
+int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel);
+int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop);
+int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch);
+uint32_t ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel);
+
+int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi);
+int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi);
+
+int ipu_lowpwr_display_enable(void);
+int ipu_lowpwr_display_disable(void);
+
+int ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq);
+void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq);
+void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq);
+int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
+ irqreturn_t(*handler) (int, void *),
+ uint32_t irq_flags, const char *devname, void *dev_id);
+void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id);
+bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq);
+void ipu_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3]);
+int32_t ipu_set_channel_bandmode(struct ipu_soc *ipu, ipu_channel_t channel,
+ ipu_buffer_t type, uint32_t band_height);
+
+/* two stripe calculations */
+struct stripe_param{
+ unsigned int input_width; /* width of the input stripe */
+ unsigned int output_width; /* width of the output stripe */
+ unsigned int input_column; /* the first column on the input stripe */
+ unsigned int output_column; /* the first column on the output stripe */
+ unsigned int idr;
+ /* inverse downisizing ratio parameter; expressed as a power of 2 */
+ unsigned int irr;
+ /* inverse resizing ratio parameter; expressed as a multiple of 2^-13 */
+};
+int ipu_calc_stripes_sizes(const unsigned int input_frame_width,
+ unsigned int output_frame_width,
+ const unsigned int maximal_stripe_width,
+ const unsigned long long cirr,
+ const unsigned int equal_stripes,
+ u32 input_pixelformat,
+ u32 output_pixelformat,
+ struct stripe_param *left,
+ struct stripe_param *right);
+
+/* SDC API */
+int32_t ipu_init_sync_panel(struct ipu_soc *ipu, int disp,
+ uint32_t pixel_clk,
+ uint16_t width, uint16_t height,
+ uint32_t pixel_fmt,
+ uint16_t h_start_width, uint16_t h_sync_width,
+ uint16_t h_end_width, uint16_t v_start_width,
+ uint16_t v_sync_width, uint16_t v_end_width,
+ uint32_t v_to_h_sync, ipu_di_signal_cfg_t sig);
+
+void ipu_uninit_sync_panel(struct ipu_soc *ipu, int disp);
+
+int32_t ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel, int16_t x_pos,
+ int16_t y_pos);
+int32_t ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel, int16_t *x_pos,
+ int16_t *y_pos);
+int32_t ipu_disp_set_global_alpha(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
+ uint8_t alpha);
+int32_t ipu_disp_set_color_key(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
+ uint32_t colorKey);
+int32_t ipu_disp_set_gamma_correction(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
+ int constk[], int slopek[]);
+
+int ipu_init_async_panel(struct ipu_soc *ipu, int disp, int type, uint32_t cycle_time,
+ uint32_t pixel_fmt, ipu_adc_sig_cfg_t sig);
+void ipu_reset_disp_panel(struct ipu_soc *ipu);
+
+/* CMOS Sensor Interface API */
+int32_t ipu_csi_init_interface(struct ipu_soc *ipu, uint16_t width, uint16_t height,
+ uint32_t pixel_fmt, ipu_csi_signal_cfg_t sig);
+
+int32_t ipu_csi_get_sensor_protocol(struct ipu_soc *ipu, uint32_t csi);
+
+int32_t ipu_csi_enable_mclk(struct ipu_soc *ipu, int src, bool flag, bool wait);
+
+static inline int32_t ipu_csi_enable_mclk_if(struct ipu_soc *ipu, int src, uint32_t csi,
+ bool flag, bool wait)
+{
+ return ipu_csi_enable_mclk(ipu, csi, flag, wait);
+}
+
+int ipu_csi_read_mclk_flag(void);
+
+void ipu_csi_flash_strobe(bool flag);
+
+void ipu_csi_get_window_size(struct ipu_soc *ipu, uint32_t *width, uint32_t *height, uint32_t csi);
+
+void ipu_csi_set_window_size(struct ipu_soc *ipu, uint32_t width, uint32_t height, uint32_t csi);
+
+void ipu_csi_set_window_pos(struct ipu_soc *ipu, uint32_t left, uint32_t top, uint32_t csi);
+
+uint32_t bytes_per_pixel(uint32_t fmt);
+
+bool ipu_ch_param_bad_alpha_pos(uint32_t fmt);
+int ipu_ch_param_get_axi_id(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type);
+ipu_color_space_t format_to_colorspace(uint32_t fmt);
+bool ipu_pixel_format_is_gpu_tile(uint32_t fmt);
+bool ipu_pixel_format_is_split_gpu_tile(uint32_t fmt);
+bool ipu_pixel_format_is_pre_yuv(uint32_t fmt);
+bool ipu_pixel_format_is_multiplanar_yuv(uint32_t fmt);
+
+struct ipuv3_fb_platform_data {
+ char disp_dev[32];
+ u32 interface_pix_fmt;
+ char *mode_str;
+ int default_bpp;
+ bool int_clk;
+
+ /* reserved mem */
+ resource_size_t res_base[2];
+ resource_size_t res_size[2];
+
+ /*
+ * Late init to avoid display channel being
+ * re-initialized as we've probably setup the
+ * channel in bootloader.
+ */
+ bool late_init;
+
+ /* Enable prefetch engine or not? */
+ bool prefetch;
+
+ /* Enable the PRE resolve engine or not? */
+ bool resolve;
+};
+
+#endif /* __LINUX_IPU_V3_H_ */
diff --git a/include/linux/ipu.h b/include/linux/ipu.h
new file mode 100644
index 000000000000..1090ac65f24b
--- /dev/null
+++ b/include/linux/ipu.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2005-2015 Freescale Semiconductor, Inc.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU Lesser General
+ * Public License. You may obtain a copy of the GNU Lesser General
+ * Public License Version 2.1 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/lgpl-license.html
+ * http://www.gnu.org/copyleft/lgpl.html
+ */
+
+/*!
+ * @defgroup IPU MXC Image Processing Unit (IPU) Driver
+ */
+/*!
+ * @file linux/ipu.h
+ *
+ * @brief This file contains the IPU driver API declarations.
+ *
+ * @ingroup IPU
+ */
+
+#ifndef __LINUX_IPU_H__
+#define __LINUX_IPU_H__
+
+#include <linux/interrupt.h>
+#include <uapi/linux/ipu.h>
+
+unsigned int fmt_to_bpp(unsigned int pixelformat);
+cs_t colorspaceofpixel(int fmt);
+int need_csc(int ifmt, int ofmt);
+
+int ipu_queue_task(struct ipu_task *task);
+int ipu_check_task(struct ipu_task *task);
+
+#endif
diff --git a/include/linux/ivshmem.h b/include/linux/ivshmem.h
new file mode 100644
index 000000000000..bad8547f071b
--- /dev/null
+++ b/include/linux/ivshmem.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_IVSHMEM_H
+#define _LINUX_IVSHMEM_H
+
+#include <linux/types.h>
+
+#define IVSHM_PROTO_UNDEFINED 0x0000
+#define IVSHM_PROTO_NET 0x0001
+#define IVSHM_PROTO_VIRTIO_FRONT 0x8000
+#define IVSHM_PROTO_VIRTIO_BACK 0xc000
+#define IVSHM_PROTO_VIRTIO_DEVID_MASK 0x7fff
+
+#define IVSHM_CFG_PRIV_CNTL 0x03
+# define IVSHM_PRIV_CNTL_ONESHOT_INT BIT(0)
+#define IVSHM_CFG_STATE_TAB_SZ 0x04
+#define IVSHM_CFG_RW_SECTION_SZ 0x08
+#define IVSHM_CFG_OUTPUT_SECTION_SZ 0x10
+#define IVSHM_CFG_ADDRESS 0x18
+
+struct ivshm_regs {
+ u32 id;
+ u32 max_peers;
+ u32 int_control;
+ u32 doorbell;
+ u32 state;
+};
+
+#define IVSHM_INT_ENABLE BIT(0)
+
+#endif /* _LINUX_IVSHMEM_H */
diff --git a/include/linux/mfd/max17135.h b/include/linux/mfd/max17135.h
new file mode 100644
index 000000000000..f41ba1245ebf
--- /dev/null
+++ b/include/linux/mfd/max17135.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2010-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2019 NXP
+ */
+#ifndef __LINUX_REGULATOR_MAX17135_H_
+#define __LINUX_REGULATOR_MAX17135_H_
+
+/*
+ * PMIC Register Addresses
+ */
+enum {
+ REG_MAX17135_EXT_TEMP = 0x0,
+ REG_MAX17135_CONFIG,
+ REG_MAX17135_INT_TEMP = 0x4,
+ REG_MAX17135_STATUS,
+ REG_MAX17135_PRODUCT_REV,
+ REG_MAX17135_PRODUCT_ID,
+ REG_MAX17135_DVR,
+ REG_MAX17135_ENABLE,
+ REG_MAX17135_FAULT, /*0x0A*/
+ REG_MAX17135_HVINP,
+ REG_MAX17135_PRGM_CTRL,
+ REG_MAX17135_TIMING1 = 0x10, /* Timing regs base address is 0x10 */
+ REG_MAX17135_TIMING2,
+ REG_MAX17135_TIMING3,
+ REG_MAX17135_TIMING4,
+ REG_MAX17135_TIMING5,
+ REG_MAX17135_TIMING6,
+ REG_MAX17135_TIMING7,
+ REG_MAX17135_TIMING8,
+};
+#define MAX17135_REG_NUM 21
+#define MAX17135_MAX_REGISTER 0xFF
+
+/*
+ * Bitfield macros that use rely on bitfield width/shift information.
+ */
+#define BITFMASK(field) (((1U << (field ## _WID)) - 1) << (field ## _LSH))
+#define BITFVAL(field, val) ((val) << (field ## _LSH))
+#define BITFEXT(var, bit) ((var & BITFMASK(bit)) >> (bit ## _LSH))
+
+/*
+ * Shift and width values for each register bitfield
+ */
+#define EXT_TEMP_LSH 7
+#define EXT_TEMP_WID 9
+
+#define THERMAL_SHUTDOWN_LSH 0
+#define THERMAL_SHUTDOWN_WID 1
+
+#define INT_TEMP_LSH 7
+#define INT_TEMP_WID 9
+
+#define STAT_BUSY_LSH 0
+#define STAT_BUSY_WID 1
+#define STAT_OPEN_LSH 1
+#define STAT_OPEN_WID 1
+#define STAT_SHRT_LSH 2
+#define STAT_SHRT_WID 1
+
+#define PROD_REV_LSH 0
+#define PROD_REV_WID 8
+
+#define PROD_ID_LSH 0
+#define PROD_ID_WID 8
+
+#define DVR_LSH 0
+#define DVR_WID 8
+
+#define ENABLE_LSH 0
+#define ENABLE_WID 1
+#define VCOM_ENABLE_LSH 1
+#define VCOM_ENABLE_WID 1
+
+#define FAULT_FBPG_LSH 0
+#define FAULT_FBPG_WID 1
+#define FAULT_HVINP_LSH 1
+#define FAULT_HVINP_WID 1
+#define FAULT_HVINN_LSH 2
+#define FAULT_HVINN_WID 1
+#define FAULT_FBNG_LSH 3
+#define FAULT_FBNG_WID 1
+#define FAULT_HVINPSC_LSH 4
+#define FAULT_HVINPSC_WID 1
+#define FAULT_HVINNSC_LSH 5
+#define FAULT_HVINNSC_WID 1
+#define FAULT_OT_LSH 6
+#define FAULT_OT_WID 1
+#define FAULT_POK_LSH 7
+#define FAULT_POK_WID 1
+
+#define HVINP_LSH 0
+#define HVINP_WID 4
+
+#define CTRL_DVR_LSH 0
+#define CTRL_DVR_WID 1
+#define CTRL_TIMING_LSH 1
+#define CTRL_TIMING_WID 1
+
+#define TIMING1_LSH 0
+#define TIMING1_WID 8
+#define TIMING2_LSH 0
+#define TIMING2_WID 8
+#define TIMING3_LSH 0
+#define TIMING3_WID 8
+#define TIMING4_LSH 0
+#define TIMING4_WID 8
+#define TIMING5_LSH 0
+#define TIMING5_WID 8
+#define TIMING6_LSH 0
+#define TIMING6_WID 8
+#define TIMING7_LSH 0
+#define TIMING7_WID 8
+#define TIMING8_LSH 0
+#define TIMING8_WID 8
+
+struct max17135 {
+ /* chip revision */
+ int rev;
+
+ struct device *dev;
+ struct max17135_platform_data *pdata;
+
+ /* Platform connection */
+ struct i2c_client *i2c_client;
+
+ /* Timings */
+ unsigned int gvee_pwrup;
+ unsigned int vneg_pwrup;
+ unsigned int vpos_pwrup;
+ unsigned int gvdd_pwrup;
+ unsigned int gvdd_pwrdn;
+ unsigned int vpos_pwrdn;
+ unsigned int vneg_pwrdn;
+ unsigned int gvee_pwrdn;
+
+ /* GPIOs */
+ int gpio_pmic_pwrgood;
+ int gpio_pmic_vcom_ctrl;
+ int gpio_pmic_wakeup;
+ int gpio_pmic_v3p3;
+ int gpio_pmic_intr;
+
+ /* MAX17135 part variables */
+ int pass_num;
+ int vcom_uV;
+
+ /* One-time VCOM setup marker */
+ bool vcom_setup;
+
+ /* powerup/powerdown wait time */
+ int max_wait;
+};
+
+enum {
+ /* In alphabetical order */
+ MAX17135_DISPLAY, /* virtual master enable */
+ MAX17135_GVDD,
+ MAX17135_GVEE,
+ MAX17135_HVINN,
+ MAX17135_HVINP,
+ MAX17135_VCOM,
+ MAX17135_VNEG,
+ MAX17135_VPOS,
+ MAX17135_V3P3,
+ MAX17135_NUM_REGULATORS,
+};
+
+/*
+ * Declarations
+ */
+struct regulator_init_data;
+struct max17135_regulator_data;
+
+struct max17135_platform_data {
+ unsigned int gvee_pwrup;
+ unsigned int vneg_pwrup;
+ unsigned int vpos_pwrup;
+ unsigned int gvdd_pwrup;
+ unsigned int gvdd_pwrdn;
+ unsigned int vpos_pwrdn;
+ unsigned int vneg_pwrdn;
+ unsigned int gvee_pwrdn;
+ int gpio_pmic_pwrgood;
+ int gpio_pmic_vcom_ctrl;
+ int gpio_pmic_wakeup;
+ int gpio_pmic_v3p3;
+ int gpio_pmic_intr;
+ int pass_num;
+ int vcom_uV;
+
+ /* PMIC */
+ struct max17135_regulator_data *regulators;
+ int num_regulators;
+};
+
+struct max17135_regulator_data {
+ int id;
+ struct regulator_init_data *initdata;
+ struct device_node *reg_node;
+};
+
+int max17135_reg_read(int reg_num, unsigned int *reg_val);
+int max17135_reg_write(int reg_num, const unsigned int reg_val);
+
+#endif
diff --git a/include/linux/mfd/mxc-hdmi-core.h b/include/linux/mfd/mxc-hdmi-core.h
new file mode 100644
index 000000000000..b2696b951f43
--- /dev/null
+++ b/include/linux/mfd/mxc-hdmi-core.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#ifndef __LINUX_MXC_HDMI_CORE_H_
+#define __LINUX_MXC_HDMI_CORE_H_
+
+#include <video/mxc_edid.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#define IRQ_DISABLE_SUCCEED 0
+#define IRQ_DISABLE_FAIL 1
+
+bool hdmi_check_overflow(void);
+
+u8 hdmi_readb(unsigned int reg);
+void hdmi_writeb(u8 value, unsigned int reg);
+void hdmi_mask_writeb(u8 data, unsigned int addr, u8 shift, u8 mask);
+unsigned int hdmi_read4(unsigned int reg);
+void hdmi_write4(unsigned int value, unsigned int reg);
+
+void hdmi_irq_init(void);
+void hdmi_irq_enable(int irq);
+unsigned int hdmi_irq_disable(int irq);
+
+void hdmi_set_sample_rate(unsigned int rate);
+void hdmi_set_dma_mode(unsigned int dma_running);
+void hdmi_init_clk_regenerator(void);
+void hdmi_clk_regenerator_update_pixel_clock(u32 pixclock);
+
+void hdmi_set_edid_cfg(struct mxc_edid_cfg *cfg);
+void hdmi_get_edid_cfg(struct mxc_edid_cfg *cfg);
+
+extern int mxc_hdmi_ipu_id;
+extern int mxc_hdmi_disp_id;
+
+void hdmi_set_registered(int registered);
+int hdmi_get_registered(void);
+int mxc_hdmi_abort_stream(void);
+int mxc_hdmi_register_audio(struct snd_pcm_substream *substream);
+void mxc_hdmi_unregister_audio(struct snd_pcm_substream *substream);
+unsigned int hdmi_set_cable_state(unsigned int state);
+unsigned int hdmi_set_blank_state(unsigned int state);
+int check_hdmi_state(void);
+
+#endif
diff --git a/include/linux/mfd/pca9450.h b/include/linux/mfd/pca9450.h
new file mode 100644
index 000000000000..80e45c5e0472
--- /dev/null
+++ b/include/linux/mfd/pca9450.h
@@ -0,0 +1,355 @@
+/**
+ * @file pca9450.h NXP PCA9450 header file
+ *
+ * Copyright 2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_PCA9450_H
+#define __LINUX_MFD_PCA9450_H
+
+#include <linux/regmap.h>
+
+enum {
+ PCA9450_BUCK1 = 0,
+ PCA9450_BUCK2,
+ PCA9450_BUCK3,
+ PCA9450_BUCK4,
+ PCA9450_BUCK5,
+ PCA9450_BUCK6,
+ PCA9450_LDO1,
+ PCA9450_LDO2,
+ PCA9450_LDO3,
+ PCA9450_LDO4,
+ PCA9450_LDO5,
+ PCA9450_REGULATOR_CNT,
+};
+
+#define PCA9450_SUPPLY_STATE_ENABLED 0x1
+
+#define PCA9450_BUCK1_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK2_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK3_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK4_VOLTAGE_NUM 0x80
+
+#define PCA9450_BUCK5_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK6_VOLTAGE_NUM 0x80
+
+#define PCA9450_LDO1_VOLTAGE_NUM 0x08
+#define PCA9450_LDO2_VOLTAGE_NUM 0x08
+#define PCA9450_LDO3_VOLTAGE_NUM 0x20
+#define PCA9450_LDO4_VOLTAGE_NUM 0x20
+#define PCA9450_LDO5_VOLTAGE_NUM 0x10
+
+enum {
+ PCA9450_REG_DEV_ID = 0x00,
+ PCA9450_INT1 = 0x01,
+ PCA9450_INT1_MSK = 0x02,
+ PCA9450_STATUS1 = 0x03,
+ PCA9450_STATUS2 = 0x04,
+ PCA9450_PWRON_STAT = 0x05,
+ PCA9450_SW_RST = 0x06,
+ PCA9450_PWR_CTRL = 0x07,
+ PCA9450_RESET_CTRL = 0x08,
+ PCA9450_CONFIG1 = 0x09,
+ PCA9450_CONFIG2 = 0x0A,
+ PCA9450_BUCK123_DVS = 0x0C,
+ PCA9450_BUCK1OUT_LIMIT = 0x0D,
+ PCA9450_BUCK2OUT_LIMIT = 0x0E,
+ PCA9450_BUCK3OUT_LIMIT = 0x0F,
+ PCA9450_BUCK1CTRL = 0x10,
+ PCA9450_BUCK1OUT_DVS0 = 0x11,
+ PCA9450_BUCK1OUT_DVS1 = 0x12,
+ PCA9450_BUCK2CTRL = 0x13,
+ PCA9450_BUCK2OUT_DVS0 = 0x14,
+ PCA9450_BUCK2OUT_DVS1 = 0x15,
+ PCA9450_BUCK3CTRL = 0x16,
+ PCA9450_BUCK3OUT_DVS0 = 0x17,
+ PCA9450_BUCK3OUT_DVS1 = 0x18,
+ PCA9450_BUCK4CTRL = 0x19,
+ PCA9450_BUCK4OUT = 0x1A,
+ PCA9450_BUCK5CTRL = 0x1B,
+ PCA9450_BUCK5OUT = 0x1C,
+ PCA9450_BUCK6CTRL = 0x1D,
+ PCA9450_BUCK6OUT = 0x1E,
+ PCA9450_LDO_AD_CTRL = 0x20,
+ PCA9450_LDO1CTRL = 0x21,
+ PCA9450_LDO2CTRL = 0x22,
+ PCA9450_LDO3CTRL = 0x23,
+ PCA9450_LDO4CTRL = 0x24,
+ PCA9450_LDO5CTRL_L = 0x25,
+ PCA9450_LDO5CTRL_H = 0x26,
+ PCA9450_LOADSW_CTRL = 0x2A,
+ PCA9450_VRFLT1_STS = 0x2B,
+ PCA9450_VRFLT2_STS = 0x2C,
+ PCA9450_VRFLT1_MASK = 0x2D,
+ PCA9450_VRFLT2_MASK = 0x2E,
+ PCA9450_MAX_REGISTER = 0x2F,
+};
+
+/* PCA9450 BUCK ENMODE bits */
+#define BUCK_ENMODE_OFF 0x00
+#define BUCK_ENMODE_ONREQ 0x01
+#define BUCK_ENMODE_ONREQ_STBYREQ 0x02
+#define BUCK_ENMODE_ON 0x03
+
+/* PCA9450_REG_BUCK1_CTRL bits */
+#define BUCK1_RAMP_MASK 0xC0
+#define BUCK1_RAMP_25MV 0x0
+#define BUCK1_RAMP_12P5MV 0x1
+#define BUCK1_RAMP_6P25MV 0x2
+#define BUCK1_RAMP_3P125MV 0x3
+#define BUCK1_DVS_CTRL 0x10
+#define BUCK1_AD 0x08
+#define BUCK1_FPWM 0x04
+#define BUCK1_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK2_CTRL bits */
+#define BUCK2_RAMP_MASK 0xC0
+#define BUCK2_RAMP_25MV 0x0
+#define BUCK2_RAMP_12P5MV 0x1
+#define BUCK2_RAMP_6P25MV 0x2
+#define BUCK2_RAMP_3P125MV 0x3
+#define BUCK2_DVS_CTRL 0x10
+#define BUCK2_AD 0x08
+#define BUCK2_FPWM 0x04
+#define BUCK2_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK3_CTRL bits */
+#define BUCK3_RAMP_MASK 0xC0
+#define BUCK3_RAMP_25MV 0x0
+#define BUCK3_RAMP_12P5MV 0x1
+#define BUCK3_RAMP_6P25MV 0x2
+#define BUCK3_RAMP_3P125MV 0x3
+#define BUCK3_DVS_CTRL 0x10
+#define BUCK3_AD 0x08
+#define BUCK3_FPWM 0x04
+#define BUCK3_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK4_CTRL bits */
+#define BUCK4_AD 0x08
+#define BUCK4_FPWM 0x04
+#define BUCK4_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK5_CTRL bits */
+#define BUCK5_AD 0x08
+#define BUCK5_FPWM 0x04
+#define BUCK5_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK6_CTRL bits */
+#define BUCK6_AD 0x08
+#define BUCK6_FPWM 0x04
+#define BUCK6_ENMODE_MASK 0x03
+
+/* PCA9450_BUCK1OUT_DVS0 bits */
+#define BUCK1OUT_DVS0_MASK 0x7F
+#define BUCK1OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK1OUT_DVS1 bits */
+#define BUCK1OUT_DVS1_MASK 0x7F
+#define BUCK1OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_BUCK2OUT_DVS0 bits */
+#define BUCK2OUT_DVS0_MASK 0x7F
+#define BUCK2OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK2OUT_DVS1 bits */
+#define BUCK2OUT_DVS1_MASK 0x7F
+#define BUCK2OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_BUCK3OUT_DVS0 bits */
+#define BUCK3OUT_DVS0_MASK 0x7F
+#define BUCK3OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK3OUT_DVS1 bits */
+#define BUCK3OUT_DVS1_MASK 0x7F
+#define BUCK3OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_REG_BUCK4OUT bits */
+#define BUCK4OUT_MASK 0x7F
+#define BUCK4OUT_DEFAULT 0x6C
+
+/* PCA9450_REG_BUCK5OUT bits */
+#define BUCK5OUT_MASK 0x7F
+#define BUCK5OUT_DEFAULT 0x30
+
+/* PCA9450_REG_BUCK6OUT bits */
+#define BUCK6OUT_MASK 0x7F
+#define BUCK6OUT_DEFAULT 0x14
+
+/* PCA9450_REG_IRQ bits */
+#define IRQ_PWRON 0x80
+#define IRQ_WDOGB 0x40
+#define IRQ_VR_FLT1 0x10
+#define IRQ_VR_FLT2 0x08
+#define IRQ_LOWVSYS 0x04
+#define IRQ_THERM_105 0x02
+#define IRQ_THERM_125 0x01
+
+/* PCA9450 interrupt masks */
+enum {
+ PCA9450_INT_MASK = 0xDF,
+};
+/* PCA9450 interrupt irqs */
+enum {
+ PCA9450_IRQ = 0x0,
+};
+
+/* PCA9450_REG_LDO1_VOLT bits */
+#define LDO1_EN_MASK 0xC0
+#define LDO1OUT_MASK 0x07
+
+/* PCA9450_REG_LDO2_VOLT bits */
+#define LDO2_EN_MASK 0xC0
+#define LDO2OUT_MASK 0x07
+
+/* PCA9450_REG_LDO3_VOLT bits */
+#define LDO3_EN_MASK 0xC0
+#define LDO3OUT_MASK 0x1F
+
+/* PCA9450_REG_LDO4_VOLT bits */
+#define LDO4_EN_MASK 0xC0
+#define LDO4OUT_MASK 0x1F
+
+/* PCA9450_REG_LDO5_VOLT bits */
+#define LDO5L_EN_MASK 0xC0
+#define LDO5LOUT_MASK 0x0F
+
+#define LDO5H_EN_MASK 0xC0
+#define LDO5HOUT_MASK 0x0F
+
+/*
+ * @brief Board platform data may be used to initialize regulators.
+ */
+
+struct pca9450_board {
+ struct regulator_init_data *init_data[PCA9450_REGULATOR_CNT];
+ int gpio_intr;
+ int irq_base;
+};
+
+/*
+ * @brief pca9450 sub-driver chip access routines
+ */
+
+struct pca9450 {
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ struct regmap *regmap;
+ struct mutex io_mutex;
+ unsigned int id;
+
+ /* IRQ Handling */
+ int chip_irq;
+ struct regmap_irq_chip_data *irq_data;
+
+ /* Client devices */
+ struct pca9450_pmic *pmic;
+ struct pca9450_power *power;
+
+ struct pca9450_board *of_plat_data;
+};
+
+static inline int pca9450_chip_id(struct pca9450 *pca9450)
+{
+ return pca9450->id;
+}
+
+/*
+ * @brief pca9450_reg_read
+ * read single register's value of pca9450
+ * @param pca9450 device to read
+ * @param reg register address
+ * @return register value if success
+ * error number if fail
+ */
+static inline int pca9450_reg_read(struct pca9450 *pca9450, u8 reg)
+{
+ int r, val;
+
+ r = regmap_read(pca9450->regmap, reg, &val);
+ if (r < 0)
+ return r;
+
+ return val;
+}
+
+/*
+ * @brief pca9450_reg_write
+ * write single register of pca9450
+ * @param pca9450 device to write
+ * @param reg register address
+ * @param val value to write
+ * @retval 0 if success
+ * @retval negative error number if fail
+ */
+
+static inline int pca9450_reg_write(struct pca9450 *pca9450, u8 reg,
+ unsigned int val)
+{
+ return regmap_write(pca9450->regmap, reg, val);
+}
+
+/*
+ * @brief pca9450_set_bits
+ * set bits in one register of pca9450
+ * @param pca9450 device to read
+ * @param reg register address
+ * @param mask mask bits
+ * @retval 0 if success
+ * @retval negative error number if fail
+ */
+static inline int pca9450_set_bits(struct pca9450 *pca9450, u8 reg, u8 mask)
+{
+ return regmap_update_bits(pca9450->regmap, reg, mask, mask);
+}
+
+/*
+ * @brief pca9450_clear_bits
+ * clear bits in one register of pca9450
+ * @param pca9450 device to read
+ * @param reg register address
+ * @param mask mask bits
+ * @retval 0 if success
+ * @retval negative error number if fail
+ */
+
+static inline int pca9450_clear_bits(struct pca9450 *pca9450, u8 reg,
+ u8 mask)
+{
+ return regmap_update_bits(pca9450->regmap, reg, mask, 0);
+}
+
+/*
+ * @brief pca9450_update_bits
+ * update bits in one register of pca9450
+ * @param pca9450 device to read
+ * @param reg register address
+ * @param mask mask bits
+ * @param val value to update
+ * @retval 0 if success
+ * @retval negative error number if fail
+ */
+
+static inline int pca9450_update_bits(struct pca9450 *pca9450, u8 reg,
+ u8 mask, u8 val)
+{
+ return regmap_update_bits(pca9450->regmap, reg, mask, val);
+}
+
+/*
+ * @brief pca9450 platform data type
+ */
+struct pca9450_gpo_plat_data {
+ u32 drv;
+ int gpio_base;
+};
+
+u8 ext_pca9450_reg_read8(u8 reg);
+int ext_pca9450_reg_write8(int reg, u8 val);
+#endif /* __LINUX_MFD_PCA9450_H */
diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h
index 4708c2b8512a..e591c050dce1 100644
--- a/include/linux/mfd/si476x-core.h
+++ b/include/linux/mfd/si476x-core.h
@@ -484,6 +484,8 @@ enum si476x_common_receiver_properties {
SI476X_PROP_DIGITAL_IO_OUTPUT_SAMPLE_RATE = 0x0202,
SI476X_PROP_DIGITAL_IO_OUTPUT_FORMAT = 0x0203,
+ SI476X_PROP_AUDIO_MUTE = 0x0301,
+
SI476X_PROP_SEEK_BAND_BOTTOM = 0x1100,
SI476X_PROP_SEEK_BAND_TOP = 0x1101,
SI476X_PROP_SEEK_FREQUENCY_SPACING = 0x1102,
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index d4b5e527a7a3..71ef2acb3c57 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -240,8 +240,25 @@
#define IMX6Q_GPR4_IPU_RD_CACHE_CTL BIT(0)
#define IMX6Q_GPR5_L2_CLK_STOP BIT(8)
+#define IMX6Q_GPR5_ENET_TX_CLK_SEL BIT(9)
#define IMX6Q_GPR5_SATA_SW_PD BIT(10)
#define IMX6Q_GPR5_SATA_SW_RST BIT(11)
+#define IMX6Q_GPR5_PRE_PRG_SEL0_MASK (0x3 << 12)
+#define IMX6Q_GPR5_PRE_PRG_SEL0_SHIFT 12
+#define IMX6Q_GPR5_PRE_PRG_SEL0_MSB 13
+#define IMX6Q_GPR5_PRE_PRG_SEL0_LSB 12
+#define IMX6Q_GPR5_PRE_PRG_SEL0_PRE1_PRG0_CHAN1 (0x0 << 12)
+#define IMX6Q_GPR5_PRE_PRG_SEL0_PRE1_PRG0_CHAN2 (0x1 << 12)
+#define IMX6Q_GPR5_PRE_PRG_SEL0_PRE1_PRG1_CHAN1 (0x2 << 12)
+#define IMX6Q_GPR5_PRE_PRG_SEL0_PRE1_PRG1_CHAN2 (0x3 << 12)
+#define IMX6Q_GPR5_PRE_PRG_SEL1_MASK (0x3 << 14)
+#define IMX6Q_GPR5_PRE_PRG_SEL1_SHIFT 14
+#define IMX6Q_GPR5_PRE_PRG_SEL1_MSB 15
+#define IMX6Q_GPR5_PRE_PRG_SEL1_LSB 14
+#define IMX6Q_GPR5_PRE_PRG_SEL1_PRE2_PRG0_CHAN1 (0x0 << 14)
+#define IMX6Q_GPR5_PRE_PRG_SEL1_PRE2_PRG0_CHAN2 (0x1 << 14)
+#define IMX6Q_GPR5_PRE_PRG_SEL1_PRE2_PRG1_CHAN1 (0x2 << 14)
+#define IMX6Q_GPR5_PRE_PRG_SEL1_PRE2_PRG1_CHAN2 (0x3 << 14)
#define IMX6Q_GPR6_IPU1_ID00_WR_QOS_MASK (0xf << 0)
#define IMX6Q_GPR6_IPU1_ID01_WR_QOS_MASK (0xf << 4)
@@ -285,15 +302,15 @@
#define IMX6Q_GPR10_OCRAM_TZ_ADDR_MASK (0x3f << 5)
#define IMX6Q_GPR10_OCRAM_TZ_EN_MASK BIT(4)
#define IMX6Q_GPR10_DCIC2_MUX_CTL_MASK (0x3 << 2)
-#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU1_DI0 (0x0 << 2)
-#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU1_DI1 (0x1 << 2)
-#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU2_DI0 (0x2 << 2)
-#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU2_DI1 (0x3 << 2)
+#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU1_DI1 (0x0 << 2)
+#define IMX6Q_GPR10_DCIC2_MUX_CTL_LVDS0 (0x1 << 2)
+#define IMX6Q_GPR10_DCIC2_MUX_CTL_LVDS1 (0x2 << 2)
+#define IMX6Q_GPR10_DCIC2_MUX_CTL_MIPI (0x3 << 2)
#define IMX6Q_GPR10_DCIC1_MUX_CTL_MASK (0x3 << 0)
#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU1_DI0 (0x0 << 0)
-#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU1_DI1 (0x1 << 0)
-#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU2_DI0 (0x2 << 0)
-#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU2_DI1 (0x3 << 0)
+#define IMX6Q_GPR10_DCIC1_MUX_CTL_LVDS0 (0x1 << 0)
+#define IMX6Q_GPR10_DCIC1_MUX_CTL_LVDS1 (0x2 << 0)
+#define IMX6Q_GPR10_DCIC1_MUX_CTL_HDMI (0x3 << 0)
#define IMX6Q_GPR12_ARMP_IPG_CLK_EN BIT(27)
#define IMX6Q_GPR12_ARMP_AHB_CLK_EN BIT(26)
@@ -419,6 +436,15 @@
#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3)
#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4)
+#define IMX6SX_GPR2_MQS_OVERSAMPLE_MASK (0x1 << 26)
+#define IMX6SX_GPR2_MQS_OVERSAMPLE_SHIFT (26)
+#define IMX6SX_GPR2_MQS_EN_MASK (0x1 << 25)
+#define IMX6SX_GPR2_MQS_EN_SHIFT (25)
+#define IMX6SX_GPR2_MQS_SW_RST_MASK (0x1 << 24)
+#define IMX6SX_GPR2_MQS_SW_RST_SHIFT (24)
+#define IMX6SX_GPR2_MQS_CLK_DIV_MASK (0xFF << 16)
+#define IMX6SX_GPR2_MQS_CLK_DIV_SHIFT (16)
+
#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_MASK (0x1 << 3)
#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF1 (0x0 << 3)
#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF2 (0x1 << 3)
@@ -450,6 +476,16 @@
#define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0)
#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0)
+/* For imx6dl iomux gpr register field definitions */
+#define IMX6DL_GPR3_LVDS1_MUX_CTL_MASK (0x3 << 8)
+#define IMX6DL_GPR3_LVDS1_MUX_CTL_IPU1_DI0 (0x0 << 8)
+#define IMX6DL_GPR3_LVDS1_MUX_CTL_IPU1_DI1 (0x1 << 8)
+#define IMX6DL_GPR3_LVDS1_MUX_CTL_LCDIF (0x2 << 8)
+#define IMX6DL_GPR3_LVDS0_MUX_CTL_MASK (0x3 << 6)
+#define IMX6DL_GPR3_LVDS0_MUX_CTL_IPU1_DI0 (0x0 << 6)
+#define IMX6DL_GPR3_LVDS0_MUX_CTL_IPU1_DI1 (0x1 << 6)
+#define IMX6DL_GPR3_LVDS0_MUX_CTL_LCDIF (0x2 << 6)
+
/* For imx6ul iomux gpr register field define */
#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
index 3d46907bab89..4b9797c78395 100644
--- a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
@@ -31,6 +31,8 @@
#define IOMUXC_GPR22 0x58
/* For imx7d iomux gpr register field define */
+#define IMX7D_GPR0_ENET_MDIO_OPEN_DRAIN_MASK (0x3 << 7)
+
#define IMX7D_GPR1_IRQ_MASK (0x1 << 12)
#define IMX7D_GPR1_ENET1_TX_CLK_SEL_MASK (0x1 << 13)
#define IMX7D_GPR1_ENET2_TX_CLK_SEL_MASK (0x1 << 14)
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 4ce8901a1af6..18c6208f56fc 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -373,6 +373,56 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
}
/**
+ * mii_lpa_mod_linkmode_adv_sgmii
+ * @lp_advertising: pointer to destination link mode.
+ * @lpa: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA bits to
+ * linkmode advertisement settings for SGMII.
+ * Leaves other bits unchanged.
+ */
+static inline void
+mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa)
+{
+ u32 speed_duplex = lpa & LPA_SGMII_DPX_SPD_MASK;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_1000HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_1000FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_100HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_100FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_10HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_10FULL);
+}
+
+/**
+ * mii_lpa_to_linkmode_adv_sgmii
+ * @advertising: pointer to destination link mode.
+ * @lpa: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to linkmode advertisement settings when in SGMII mode.
+ * Clears the old value of advertising.
+ */
+static inline void mii_lpa_to_linkmode_lpa_sgmii(unsigned long *lp_advertising,
+ u32 lpa)
+{
+ linkmode_zero(lp_advertising);
+
+ mii_lpa_mod_linkmode_lpa_sgmii(lp_advertising, lpa);
+}
+
+/**
* mii_adv_mod_linkmode_adv_t
* @advertising:pointer to destination link mode.
* @adv: value of the MII_ADVERTISE register
diff --git a/include/linux/mipi_csi2.h b/include/linux/mipi_csi2.h
new file mode 100644
index 000000000000..d750ed9b02d5
--- /dev/null
+++ b/include/linux/mipi_csi2.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2019 NXP
+ */
+
+#ifndef __INCLUDE_MIPI_CSI2_H
+#define __INCLUDE_MIPI_CSI2_H
+
+/* MIPI CSI2 registers */
+#define MIPI_CSI2_REG(offset) (offset)
+
+#define MIPI_CSI2_VERSION MIPI_CSI2_REG(0x000)
+#define MIPI_CSI2_N_LANES MIPI_CSI2_REG(0x004)
+#define MIPI_CSI2_PHY_SHUTDOWNZ MIPI_CSI2_REG(0x008)
+#define MIPI_CSI2_DPHY_RSTZ MIPI_CSI2_REG(0x00c)
+#define MIPI_CSI2_CSI2_RESETN MIPI_CSI2_REG(0x010)
+#define MIPI_CSI2_PHY_STATE MIPI_CSI2_REG(0x014)
+#define MIPI_CSI2_DATA_IDS_1 MIPI_CSI2_REG(0x018)
+#define MIPI_CSI2_DATA_IDS_2 MIPI_CSI2_REG(0x01c)
+#define MIPI_CSI2_ERR1 MIPI_CSI2_REG(0x020)
+#define MIPI_CSI2_ERR2 MIPI_CSI2_REG(0x024)
+#define MIPI_CSI2_MASK1 MIPI_CSI2_REG(0x028)
+#define MIPI_CSI2_MASK2 MIPI_CSI2_REG(0x02c)
+#define MIPI_CSI2_PHY_TST_CTRL0 MIPI_CSI2_REG(0x030)
+#define MIPI_CSI2_PHY_TST_CTRL1 MIPI_CSI2_REG(0x034)
+#define MIPI_CSI2_SFT_RESET MIPI_CSI2_REG(0xf00)
+
+/* mipi data type */
+#define MIPI_DT_YUV420 0x18 /* YYY.../UYVY.... */
+#define MIPI_DT_YUV420_LEGACY 0x1a /* UYY.../VYY... */
+#define MIPI_DT_YUV422 0x1e /* UYVY... */
+#define MIPI_DT_RGB444 0x20
+#define MIPI_DT_RGB555 0x21
+#define MIPI_DT_RGB565 0x22
+#define MIPI_DT_RGB666 0x23
+#define MIPI_DT_RGB888 0x24
+#define MIPI_DT_RAW6 0x28
+#define MIPI_DT_RAW7 0x29
+#define MIPI_DT_RAW8 0x2a
+#define MIPI_DT_RAW10 0x2b
+#define MIPI_DT_RAW12 0x2c
+#define MIPI_DT_RAW14 0x2d
+
+
+struct mipi_csi2_info;
+/* mipi csi2 API */
+struct mipi_csi2_info *mipi_csi2_get_info(void);
+
+bool mipi_csi2_enable(struct mipi_csi2_info *info);
+
+bool mipi_csi2_disable(struct mipi_csi2_info *info);
+
+bool mipi_csi2_get_status(struct mipi_csi2_info *info);
+
+int mipi_csi2_get_bind_ipu(struct mipi_csi2_info *info);
+
+unsigned int mipi_csi2_get_bind_csi(struct mipi_csi2_info *info);
+
+unsigned int mipi_csi2_get_virtual_channel(struct mipi_csi2_info *info);
+
+unsigned int mipi_csi2_set_lanes(struct mipi_csi2_info *info);
+
+unsigned int mipi_csi2_set_datatype(struct mipi_csi2_info *info,
+ unsigned int datatype);
+
+unsigned int mipi_csi2_get_datatype(struct mipi_csi2_info *info);
+
+unsigned int mipi_csi2_dphy_status(struct mipi_csi2_info *info);
+
+unsigned int mipi_csi2_get_error1(struct mipi_csi2_info *info);
+
+unsigned int mipi_csi2_get_error2(struct mipi_csi2_info *info);
+
+int mipi_csi2_pixelclk_enable(struct mipi_csi2_info *info);
+
+void mipi_csi2_pixelclk_disable(struct mipi_csi2_info *info);
+
+int mipi_csi2_reset(struct mipi_csi2_info *info);
+
+#endif
diff --git a/include/linux/mipi_dsi.h b/include/linux/mipi_dsi.h
new file mode 100644
index 000000000000..dbc249a38f68
--- /dev/null
+++ b/include/linux/mipi_dsi.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2011-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#ifndef __INCLUDE_MIPI_DSI_H
+#define __INCLUDE_MIPI_DSI_H
+
+#define MIPI_DSI_VERSION (0x000)
+#define MIPI_DSI_PWR_UP (0x004)
+#define MIPI_DSI_CLKMGR_CFG (0x008)
+#define MIPI_DSI_DPI_CFG (0x00c)
+#define MIPI_DSI_DBI_CFG (0x010)
+#define MIPI_DSI_DBIS_CMDSIZE (0x014)
+#define MIPI_DSI_PCKHDL_CFG (0x018)
+#define MIPI_DSI_VID_MODE_CFG (0x01c)
+#define MIPI_DSI_VID_PKT_CFG (0x020)
+#define MIPI_DSI_CMD_MODE_CFG (0x024)
+#define MIPI_DSI_TMR_LINE_CFG (0x028)
+#define MIPI_DSI_VTIMING_CFG (0x02c)
+#define MIPI_DSI_PHY_TMR_CFG (0x030)
+#define MIPI_DSI_GEN_HDR (0x034)
+#define MIPI_DSI_GEN_PLD_DATA (0x038)
+#define MIPI_DSI_CMD_PKT_STATUS (0x03c)
+#define MIPI_DSI_TO_CNT_CFG (0x040)
+#define MIPI_DSI_ERROR_ST0 (0x044)
+#define MIPI_DSI_ERROR_ST1 (0x048)
+#define MIPI_DSI_ERROR_MSK0 (0x04c)
+#define MIPI_DSI_ERROR_MSK1 (0x050)
+#define MIPI_DSI_PHY_RSTZ (0x054)
+#define MIPI_DSI_PHY_IF_CFG (0x058)
+#define MIPI_DSI_PHY_IF_CTRL (0x05c)
+#define MIPI_DSI_PHY_STATUS (0x060)
+#define MIPI_DSI_PHY_TST_CTRL0 (0x064)
+#define MIPI_DSI_PHY_TST_CTRL1 (0x068)
+
+#define DSI_PWRUP_RESET (0x0 << 0)
+#define DSI_PWRUP_POWERUP (0x1 << 0)
+
+#define DSI_DPI_CFG_VID_SHIFT (0)
+#define DSI_DPI_CFG_VID_MASK (0x3)
+#define DSI_DPI_CFG_COLORCODE_SHIFT (2)
+#define DSI_DPI_CFG_COLORCODE_MASK (0x7)
+#define DSI_DPI_CFG_DATAEN_ACT_LOW (0x1 << 5)
+#define DSI_DPI_CFG_DATAEN_ACT_HIGH (0x0 << 5)
+#define DSI_DPI_CFG_VSYNC_ACT_LOW (0x1 << 6)
+#define DSI_DPI_CFG_VSYNC_ACT_HIGH (0x0 << 6)
+#define DSI_DPI_CFG_HSYNC_ACT_LOW (0x1 << 7)
+#define DSI_DPI_CFG_HSYNC_ACT_HIGH (0x0 << 7)
+#define DSI_DPI_CFG_SHUTD_ACT_LOW (0x1 << 8)
+#define DSI_DPI_CFG_SHUTD_ACT_HIGH (0x0 << 8)
+#define DSI_DPI_CFG_COLORMODE_ACT_LOW (0x1 << 9)
+#define DSI_DPI_CFG_COLORMODE_ACT_HIGH (0x0 << 9)
+#define DSI_DPI_CFG_EN18LOOSELY (0x1 << 10)
+
+#define DSI_PCKHDL_CFG_EN_EOTP_TX (0x1 << 0)
+#define DSI_PCKHDL_CFG_EN_EOTP_RX (0x1 << 1)
+#define DSI_PCKHDL_CFG_EN_BTA (0x1 << 2)
+#define DSI_PCKHDL_CFG_EN_ECC_RX (0x1 << 3)
+#define DSI_PCKHDL_CFG_EN_CRC_RX (0x1 << 4)
+#define DSI_PCKHDL_CFG_GEN_VID_RX_MASK (0x3)
+#define DSI_PCKHDL_CFG_GEN_VID_RX_SHIFT (5)
+
+#define DSI_VID_MODE_CFG_EN (0x1 << 0)
+#define DSI_VID_MODE_CFG_EN_BURSTMODE (0x3 << 1)
+#define DSI_VID_MODE_CFG_TYPE_MASK (0x3)
+#define DSI_VID_MODE_CFG_TYPE_SHIFT (1)
+#define DSI_VID_MODE_CFG_EN_LP_VSA (0x1 << 3)
+#define DSI_VID_MODE_CFG_EN_LP_VBP (0x1 << 4)
+#define DSI_VID_MODE_CFG_EN_LP_VFP (0x1 << 5)
+#define DSI_VID_MODE_CFG_EN_LP_VACT (0x1 << 6)
+#define DSI_VID_MODE_CFG_EN_LP_HBP (0x1 << 7)
+#define DSI_VID_MODE_CFG_EN_LP_HFP (0x1 << 8)
+#define DSI_VID_MODE_CFG_EN_MULTI_PKT (0x1 << 9)
+#define DSI_VID_MODE_CFG_EN_NULL_PKT (0x1 << 10)
+#define DSI_VID_MODE_CFG_EN_FRAME_ACK (0x1 << 11)
+#define DSI_VID_MODE_CFG_EN_LP_MODE (DSI_VID_MODE_CFG_EN_LP_VSA | \
+ DSI_VID_MODE_CFG_EN_LP_VBP | \
+ DSI_VID_MODE_CFG_EN_LP_VFP | \
+ DSI_VID_MODE_CFG_EN_LP_HFP | \
+ DSI_VID_MODE_CFG_EN_LP_HBP | \
+ DSI_VID_MODE_CFG_EN_LP_VACT)
+
+
+
+#define DSI_VID_PKT_CFG_VID_PKT_SZ_MASK (0x7ff)
+#define DSI_VID_PKT_CFG_VID_PKT_SZ_SHIFT (0)
+#define DSI_VID_PKT_CFG_NUM_CHUNKS_MASK (0x3ff)
+#define DSI_VID_PKT_CFG_NUM_CHUNKS_SHIFT (11)
+#define DSI_VID_PKT_CFG_NULL_PKT_SZ_MASK (0x3ff)
+#define DSI_VID_PKT_CFG_NULL_PKT_SZ_SHIFT (21)
+
+#define MIPI_DSI_CMD_MODE_CFG_EN_LOWPOWER (0x1FFF)
+#define MIPI_DSI_CMD_MODE_CFG_EN_CMD_MODE (0x1 << 0)
+
+#define DSI_TME_LINE_CFG_HSA_TIME_MASK (0x1ff)
+#define DSI_TME_LINE_CFG_HSA_TIME_SHIFT (0)
+#define DSI_TME_LINE_CFG_HBP_TIME_MASK (0x1ff)
+#define DSI_TME_LINE_CFG_HBP_TIME_SHIFT (9)
+#define DSI_TME_LINE_CFG_HLINE_TIME_MASK (0x3fff)
+#define DSI_TME_LINE_CFG_HLINE_TIME_SHIFT (18)
+
+#define DSI_VTIMING_CFG_VSA_LINES_MASK (0xf)
+#define DSI_VTIMING_CFG_VSA_LINES_SHIFT (0)
+#define DSI_VTIMING_CFG_VBP_LINES_MASK (0x3f)
+#define DSI_VTIMING_CFG_VBP_LINES_SHIFT (4)
+#define DSI_VTIMING_CFG_VFP_LINES_MASK (0x3f)
+#define DSI_VTIMING_CFG_VFP_LINES_SHIFT (10)
+#define DSI_VTIMING_CFG_V_ACT_LINES_MASK (0x7ff)
+#define DSI_VTIMING_CFG_V_ACT_LINES_SHIFT (16)
+
+#define DSI_PHY_TMR_CFG_BTA_TIME_MASK (0xfff)
+#define DSI_PHY_TMR_CFG_BTA_TIME_SHIFT (0)
+#define DSI_PHY_TMR_CFG_LP2HS_TIME_MASK (0xff)
+#define DSI_PHY_TMR_CFG_LP2HS_TIME_SHIFT (12)
+#define DSI_PHY_TMR_CFG_HS2LP_TIME_MASK (0xff)
+#define DSI_PHY_TMR_CFG_HS2LP_TIME_SHIFT (20)
+
+#define DSI_PHY_IF_CFG_N_LANES_MASK (0x3)
+#define DSI_PHY_IF_CFG_N_LANES_SHIFT (0)
+#define DSI_PHY_IF_CFG_WAIT_TIME_MASK (0xff)
+#define DSI_PHY_IF_CFG_WAIT_TIME_SHIFT (2)
+
+#define DSI_PHY_RSTZ_EN_CLK (0x1 << 2)
+#define DSI_PHY_RSTZ_DISABLE_RST (0x1 << 1)
+#define DSI_PHY_RSTZ_DISABLE_SHUTDOWN (0x1 << 0)
+#define DSI_PHY_RSTZ_RST (0x0)
+
+#define DSI_PHY_STATUS_LOCK (0x1 << 0)
+#define DSI_PHY_STATUS_STOPSTATE_CLK_LANE (0x1 << 2)
+
+#define DSI_GEN_HDR_TYPE_MASK (0xff)
+#define DSI_GEN_HDR_TYPE_SHIFT (0)
+#define DSI_GEN_HDR_DATA_MASK (0xffff)
+#define DSI_GEN_HDR_DATA_SHIFT (8)
+
+#define DSI_CMD_PKT_STATUS_GEN_CMD_EMPTY (0x1 << 0)
+#define DSI_CMD_PKT_STATUS_GEN_CMD_FULL (0x1 << 1)
+#define DSI_CMD_PKT_STATUS_GEN_PLD_W_EMPTY (0x1 << 2)
+#define DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL (0x1 << 3)
+#define DSI_CMD_PKT_STATUS_GEN_PLD_R_EMPTY (0x1 << 4)
+#define DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY (0x1 << 6)
+
+#define DSI_ERROR_MSK0_ALL_MASK (0x1fffff)
+#define DSI_ERROR_MSK1_ALL_MASK (0x3ffff)
+
+#define DSI_PHY_IF_CTRL_RESET (0x0)
+#define DSI_PHY_IF_CTRL_TX_REQ_CLK_HS (0x1 << 0)
+#define DSI_PHY_IF_CTRL_TX_REQ_CLK_ULPS (0x1 << 1)
+#define DSI_PHY_IF_CTRL_TX_EXIT_CLK_ULPS (0x1 << 2)
+#define DSI_PHY_IF_CTRL_TX_REQ_DATA_ULPS (0x1 << 3)
+#define DSI_PHY_IF_CTRL_TX_EXIT_DATA_ULPS (0x1 << 4)
+#define DSI_PHY_IF_CTRL_TX_TRIG_MASK (0xF)
+#define DSI_PHY_IF_CTRL_TX_TRIG_SHIFT (5)
+
+#define DSI_PHY_CLK_INIT_COMMAND (0x44)
+#define DSI_GEN_PLD_DATA_BUF_SIZE (0x4)
+#endif
diff --git a/include/linux/mipi_dsi_northwest.h b/include/linux/mipi_dsi_northwest.h
new file mode 100644
index 000000000000..8cb379c85ef5
--- /dev/null
+++ b/include/linux/mipi_dsi_northwest.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef __INCLUDE_MIPI_DSI_NORTHWEST_H
+#define __INCLUDE_MIPI_DSI_NORTHWEST_H
+
+/* ---------------------------- register offsets --------------------------- */
+
+/* sim */
+#define SIM_SOPT1 0x0
+#define MIPI_ISO_DISABLE 0x8
+
+#define SIM_SOPT1CFG 0x4
+#define DSI_RST_DPI_N 0x80000000
+#define DSI_RST_ESC_N 0x40000000
+#define DSI_RST_BYTE_N 0x20000000
+#define DSI_SD 0x200
+#define DSI_CM 0x100
+#define DSI_PLL_EN 0x80
+
+/* SRC */
+#define SRC_MIPIPHY_RCR 0x28
+#define MIPI_DSI_RESET_BYTE_N 0x2
+#define MIPI_DSI_RESET_N 0x4
+#define MIPI_DSI_DPI_RESET_N 0x8
+#define MIPI_DSI_ESC_RESET_N 0x10
+#define MIPI_DSI_PCLK_RESET_N 0x20
+
+/* GPR */
+#define IOMUXC_GPR_GPR13 0x34
+#define GPR_MIPI_MUX_SEL 0x4
+
+/* dphy */
+#define DPHY_PD_DPHY 0x300
+#define DPHY_M_PRG_HS_PREPARE 0x304
+#define DPHY_MC_PRG_HS_PREPARE 0x308
+#define DPHY_M_PRG_HS_ZERO 0x30c
+#define DPHY_MC_PRG_HS_ZERO 0x310
+#define DPHY_M_PRG_HS_TRAIL 0x314
+#define DPHY_MC_PRG_HS_TRAIL 0x318
+#define DPHY_PD_PLL 0x31c
+#define DPHY_TST 0x320
+#define DPHY_CN 0x324
+#define DPHY_CM 0x328
+#define DPHY_CO 0x32c
+#define DPHY_LOCK 0x330
+#define DPHY_LOCK_BYP 0x334
+#define DPHY_RTERM_SEL 0x338
+#define DPHY_AUTO_PD_EN 0x33c
+#define DPHY_RXLPRP 0x340
+#define DPHY_RXCDRP 0x344
+
+/* host */
+#define HOST_CFG_NUM_LANES 0x0
+#define HOST_CFG_NONCONTINUOUS_CLK 0x4
+#define HOST_CFG_T_PRE 0x8
+#define HOST_CFG_T_POST 0xc
+#define HOST_CFG_TX_GAP 0x10
+#define HOST_CFG_AUTOINSERT_EOTP 0x14
+#define HOST_CFG_EXTRA_CMDS_AFTER_EOTP 0x18
+#define HOST_CFG_HTX_TO_COUNT 0x1c
+#define HOST_CFG_LRX_H_TO_COUNT 0x20
+#define HOST_CFG_BTA_H_TO_COUNT 0x24
+#define HOST_CFG_TWAKEUP 0x28
+#define HOST_CFG_STATUS_OUT 0x2c
+#define HOST_RX_ERROR_STATUS 0x30
+
+/* dpi */
+#define DPI_PIXEL_PAYLOAD_SIZE 0x200
+#define DPI_PIXEL_FIFO_SEND_LEVEL 0x204
+#define DPI_INTERFACE_COLOR_CODING 0x208
+#define DPI_PIXEL_FORMAT 0x20c
+#define DPI_VSYNC_POLARITY 0x210
+#define DPI_HSYNC_POLARITY 0x214
+#define DPI_VIDEO_MODE 0x218
+#define DPI_HFP 0x21c
+#define DPI_HBP 0x220
+#define DPI_HSA 0x224
+#define DPI_ENABLE_MULT_PKTS 0x228
+#define DPI_VBP 0x22c
+#define DPI_VFP 0x230
+#define DPI_BLLP_MODE 0x234
+#define DPI_USE_NULL_PKT_BLLP 0x238
+#define DPI_VACTIVE 0x23c
+#define DPI_VC 0x240
+
+/* apb pkt */
+#define HOST_TX_PAYLOAD 0x280
+
+#define HOST_PKT_CONTROL 0x284
+#define HOST_PKT_CONTROL_WC(x) (((x) & 0xffff) << 0)
+#define HOST_PKT_CONTROL_VC(x) (((x) & 0x3) << 16)
+#define HOST_PKT_CONTROL_DT(x) (((x) & 0x3f) << 18)
+#define HOST_PKT_CONTROL_HS_SEL(x) (((x) & 0x1) << 24)
+#define HOST_PKT_CONTROL_BTA_TX(x) (((x) & 0x1) << 25)
+#define HOST_PKT_CONTROL_BTA_NO_TX(x) (((x) & 0x1) << 26)
+
+#define HOST_SEND_PACKET 0x288
+#define HOST_PKT_STATUS 0x28c
+#define HOST_PKT_FIFO_WR_LEVEL 0x290
+#define HOST_PKT_FIFO_RD_LEVEL 0x294
+#define HOST_PKT_RX_PAYLOAD 0x298
+
+#define HOST_PKT_RX_PKT_HEADER 0x29c
+#define HOST_PKT_RX_PKT_HEADER_WC(x) (((x) & 0xffff) << 0)
+#define HOST_PKT_RX_PKT_HEADER_DT(x) (((x) & 0x3f) << 16)
+#define HOST_PKT_RX_PKT_HEADER_VC(x) (((x) & 0x3) << 22)
+
+#define HOST_IRQ_STATUS 0x2a0
+#define HOST_IRQ_STATUS_SM_NOT_IDLE (1 << 0)
+#define HOST_IRQ_STATUS_TX_PKT_DONE (1 << 1)
+#define HOST_IRQ_STATUS_DPHY_DIRECTION (1 << 2)
+#define HOST_IRQ_STATUS_TX_FIFO_OVFLW (1 << 3)
+#define HOST_IRQ_STATUS_TX_FIFO_UDFLW (1 << 4)
+#define HOST_IRQ_STATUS_RX_FIFO_OVFLW (1 << 5)
+#define HOST_IRQ_STATUS_RX_FIFO_UDFLW (1 << 6)
+#define HOST_IRQ_STATUS_RX_PKT_HDR_RCVD (1 << 7)
+#define HOST_IRQ_STATUS_RX_PKT_PAYLOAD_DATA_RCVD (1 << 8)
+#define HOST_IRQ_STATUS_HOST_BTA_TIMEOUT (1 << 29)
+#define HOST_IRQ_STATUS_LP_RX_TIMEOUT (1 << 30)
+#define HOST_IRQ_STATUS_HS_TX_TIMEOUT (1 << 31)
+
+#define HOST_IRQ_STATUS2 0x2a4
+#define HOST_IRQ_STATUS2_SINGLE_BIT_ECC_ERR (1 << 0)
+#define HOST_IRQ_STATUS2_MULTI_BIT_ECC_ERR (1 << 1)
+#define HOST_IRQ_STATUS2_CRC_ERR (1 << 2)
+
+#define HOST_IRQ_MASK 0x2a8
+#define HOST_IRQ_MASK_SM_NOT_IDLE_MASK (1 << 0)
+#define HOST_IRQ_MASK_TX_PKT_DONE_MASK (1 << 1)
+#define HOST_IRQ_MASK_DPHY_DIRECTION_MASK (1 << 2)
+#define HOST_IRQ_MASK_TX_FIFO_OVFLW_MASK (1 << 3)
+#define HOST_IRQ_MASK_TX_FIFO_UDFLW_MASK (1 << 4)
+#define HOST_IRQ_MASK_RX_FIFO_OVFLW_MASK (1 << 5)
+#define HOST_IRQ_MASK_RX_FIFO_UDFLW_MASK (1 << 6)
+#define HOST_IRQ_MASK_RX_PKT_HDR_RCVD_MASK (1 << 7)
+#define HOST_IRQ_MASK_RX_PKT_PAYLOAD_DATA_RCVD_MASK (1 << 8)
+#define HOST_IRQ_MASK_HOST_BTA_TIMEOUT_MASK (1 << 29)
+#define HOST_IRQ_MASK_LP_RX_TIMEOUT_MASK (1 << 30)
+#define HOST_IRQ_MASK_HS_TX_TIMEOUT_MASK (1 << 31)
+
+#define HOST_IRQ_MASK2 0x2ac
+#define HOST_IRQ_MASK2_SINGLE_BIT_ECC_ERR_MASK (1 << 0)
+#define HOST_IRQ_MASK2_MULTI_BIT_ECC_ERR_MASK (1 << 1)
+#define HOST_IRQ_MASK2_CRC_ERR_MASK (1 << 2)
+
+/* ------------------------------------- end -------------------------------- */
+
+#endif
diff --git a/include/linux/mipi_dsi_samsung.h b/include/linux/mipi_dsi_samsung.h
new file mode 100644
index 000000000000..e58da9541cfb
--- /dev/null
+++ b/include/linux/mipi_dsi_samsung.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef __INCLUDE_MIPI_DSI_SAMSUNG_H
+#define __INCLUDE_MIPI_DSI_SAMSUNG_H
+
+#define MIPI_DSI_VERSION (0x000)
+#define MIPI_DSI_STATUS (0x004)
+#define MIPI_DSI_RGB_STATUS (0x008)
+#define MIPI_DSI_SWRST (0x00c)
+#define MIPI_DSI_CLKCTRL (0x010)
+#define MIPI_DSI_TIMEOUT (0x014)
+#define MIPI_DSI_CONFIG (0x018)
+#define MIPI_DSI_ESCMODE (0x01c)
+#define MIPI_DSI_MDRESOL (0x020)
+#define MIPI_DSI_MVPORCH (0x024)
+#define MIPI_DSI_MHPORCH (0x028)
+#define MIPI_DSI_MSYNC (0x02c)
+#define MIPI_DSI_SDRESOL (0x030)
+#define MIPI_DSI_INTSRC (0x034)
+#define MIPI_DSI_INTMSK (0x038)
+#define MIPI_DSI_PKTHDR (0x03c)
+#define MIPI_DSI_PAYLOAD (0x040)
+#define MIPI_DSI_RXFIFO (0x044)
+#define MIPI_DSI_FIFOTHLD (0x048)
+#define MIPI_DSI_FIFOCTRL (0x04c)
+#define MIPI_DSI_MEMACCHR (0x050)
+#define MIPI_DSI_MULTI_PKT (0x078)
+#define MIPI_DSI_PLLCTRL_1G (0x090)
+#define MIPI_DSI_PLLCTRL (0x094)
+#define MIPI_DSI_PLLCTRL1 (0x098)
+#define MIPI_DSI_PLLCTRL2 (0x09c)
+#define MIPI_DSI_PLLTMR (0x0a0)
+#define MIPI_DSI_PHYCTRL_B1 (0x0a4)
+#define MIPI_DSI_PHYCTRL_B2 (0x0a8)
+#define MIPI_DSI_PHYCTRL_M1 (0x0a8)
+#define MIPI_DSI_PHYCTRL_M2 (0x0ac)
+#define MIPI_DSI_PHYTIMING (0x0b4)
+#define MIPI_DSI_PHYTIMING1 (0x0b8)
+#define MIPI_DSI_PHYTIMING2 (0x0bc)
+
+#define MIPI_DSI_SWRST_SWRST (0x1 << 0)
+#define MIPI_DSI_SWRST_FUNCRST (0x1 << 16)
+#define MIPI_DSI_MAIN_HRESOL(x) (((x) & 0x7ff) << 0)
+#define MIPI_DSI_MAIN_VRESOL(x) (((x) & 0x7ff) << 16)
+#define MIPI_DSI_MAIN_STANDBY(x) (((x) & 0x1) << 31)
+#define MIPI_DSI_MAIN_VBP(x) (((x) & 0x7ff) << 0)
+#define MIPI_DSI_STABLE_VFP(x) (((x) & 0x7ff) << 16)
+#define MIPI_DSI_CMDALLOW(x) (((x) & 0xf) << 28)
+#define MIPI_DSI_MAIN_HBP(x) (((x) & 0xffff) << 0)
+#define MIPI_DSI_MAIN_HFP(x) (((x) & 0xffff) << 16)
+#define MIPI_DSI_MAIN_VSA(x) (((x) & 0x3ff) << 22)
+#define MIPI_DSI_MAIN_HSA(x) (((x) & 0xffff) << 0)
+
+#define MIPI_DSI_LANE_EN(x) (((x) & 0x1f) << 0)
+#define MIPI_DSI_NUM_OF_DATALANE(x) (((x) & 0x3) << 5)
+#define MIPI_DSI_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8)
+#define MIPI_DSI_MAIN_PIX_FORMAT(x) (((x) & 0x7) << 12)
+#define MIPI_DSI_SUB_VC(x) (((x) & 0x3) << 16)
+#define MIPI_DSI_MAIN_VC(x) (((x) & 0x3) << 18)
+#define MIPI_DSI_HSA_DISABLE_MODE(x) (((x) & 0x1) << 20)
+#define MIPI_DSI_HBP_DISABLE_MODE(x) (((x) & 0x1) << 21)
+#define MIPI_DSI_HFP_DISABLE_MODE(x) (((x) & 0x1) << 22)
+#define MIPI_DSI_HSE_DISABLE_MODE(x) (((x) & 0x1) << 23)
+#define MIPI_DSI_AUTO_MODE(x) (((x) & 0x1) << 24)
+#define MIPI_DSI_VIDEO_MODE(x) (((x) & 0x1) << 25)
+#define MIPI_DSI_BURST_MODE(x) (((x) & 0x1) << 26)
+#define MIPI_DSI_SYNC_IN_FORM(x) (((x) & 0x1) << 27)
+#define MIPI_DSI_EOT_R03(x) (((x) & 0x1) << 28)
+#define MIPI_DSI_MFLUSH_VS(x) (((x) & 0x1) << 29)
+
+#define MIPI_DSI_DP_DN_SWAP_DATA (0x1 << 24)
+#define MIPI_DSI_PLL_EN(x) (((x) & 0x1) << 23)
+#define MIPI_DSI_PMS(x) (((x) & 0x7ffff) << 1)
+
+#define MIPI_DSI_TX_REQUEST_HSCLK(x) (((x) & 0x1) << 31)
+#define MIPI_DSI_DPHY_SEL(x) (((x) & 0x1) << 29)
+#define MIPI_DSI_ESC_CLK_EN(x) (((x) & 0x1) << 28)
+#define MIPI_DSI_PLL_BYPASS(x) (((x) & 0x1) << 27)
+#define MIPI_DSI_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
+#define MIPI_DSI_BYTE_CLK_EN(x) (((x) & 0x1) << 24)
+#define MIPI_DSI_LANE_ESC_CLK_EN(x) (((x) & 0x1f) << 19)
+
+#define MIPI_DSI_FORCE_STOP_STATE(x) (((x) & 0x1) << 20)
+
+#define MIPI_DSI_M_TLPXCTL(x) (((x) & 0xff) << 8)
+#define MIPI_DSI_M_THSEXITCTL(x) (((x) & 0xff) << 0)
+
+#define MIPI_DSI_M_TCLKPRPRCTL(x) (((x) & 0xff) << 24)
+#define MIPI_DSI_M_TCLKZEROCTL(x) (((x) & 0xff) << 16)
+#define MIPI_DSI_M_TCLKPOSTCTL(x) (((x) & 0xff) << 8)
+#define MIPI_DSI_M_TCLKTRAILCTL(x) (((x) & 0xff) << 0)
+
+#define MIPI_DSI_M_THSPRPRCTL(x) (((x) & 0xff) << 16)
+#define MIPI_DSI_M_THSZEROCTL(x) (((x) & 0xff) << 8)
+#define MIPI_DSI_M_THSTRAILCTL(x) (((x) & 0xff) << 0)
+
+#define MIPI_DSI_PLL_STABLE(x) (((x) & 0x1) << 31)
+#define MIPI_DSI_TX_READY_HS_CLK(x) (((x) & 0x1) << 10)
+#define MIPI_DSI_ULPS_CLK(x) (((x) & 0x1) << 9)
+#define MIPI_DSI_STOP_STATE_CLK(x) (((x) & 0x1) << 8)
+#define MIPI_DSI_ULPS_DAT(x) (((x) & 0xf) << 4)
+#define MIPI_DSI_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
+
+#define INTSRC_SFR_PL_FIFO_EMPTY (0x1 << 29)
+#define INTSRC_SFR_PH_FIFO_EMPTY (0x1 << 28)
+#define INTSRC_RX_DATA_DONE (0x1 << 18)
+#define INTMSK_SFR_PL_FIFO_EMPTY (0x1 << 29)
+#define INTMSK_SFR_PH_FIFO_EMPTY (0x1 << 28)
+#define INTMSK_RX_DATA_DONE (0x1 << 18)
+
+#define MIPI_DSI_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21)
+#define MIPI_DSI_CMD_LPDT (0x1 << 7)
+#define MIPI_DSI_TX_LPDT (0x1 << 6)
+
+#endif
diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h
index 3549f8045784..1d554b858086 100644
--- a/include/linux/mmc/pm.h
+++ b/include/linux/mmc/pm.h
@@ -23,5 +23,6 @@ typedef unsigned int mmc_pm_flag_t;
#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */
#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */
+#define MMC_PM_IGNORE_PM_NOTIFY (1 << 2) /* ignore mmc pm notify */
#endif /* LINUX_MMC_PM_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 08b25c02b5a1..d28769db1ecc 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -36,6 +36,7 @@
#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
+#define SDIO_DEVICE_ID_BROADCOM_43428 0xa9a4
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 4c56404e53a7..d12d053c98a5 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -827,4 +827,19 @@ struct wmi_device_id {
const void *context;
};
+/* vop */
+struct vop_device_id {
+ __u32 device;
+ __u32 vendor;
+};
+#define VOP_DEV_ANY_ID 0xffffffff
+
+/* cosm */
+#define COSM_NAME_SIZE 32
+#define COSM_MODULE_PREFIX "cosm:"
+
+struct cosm_device_id {
+ char name[COSM_NAME_SIZE];
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/mx8_mu.h b/include/linux/mx8_mu.h
new file mode 100644
index 000000000000..b31e52693db8
--- /dev/null
+++ b/include/linux/mx8_mu.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#define MU_ATR0_OFFSET1 0x0
+#define MU_ARR0_OFFSET1 0x10
+#define MU_ASR_OFFSET1 0x20
+#define MU_ACR_OFFSET1 0x24
+
+/* Registers offsets of the MU Version 1.0 */
+#define MU_V10_VER_OFFSET1 0x0
+#define MU_V10_ATR0_OFFSET1 0x20
+#define MU_V10_ARR0_OFFSET1 0x40
+#define MU_V10_ASR_OFFSET1 0x60
+#define MU_V10_ACR_OFFSET1 0x64
+#define MU_VER_ID_V10 0x0100 /* Version 1.0 */
+
+#define MU_TR_COUNT1 4
+#define MU_RR_COUNT1 4
+
+#define MU_CR_GIEn_MASK1 (0xF << 28)
+#define MU_CR_RIEn_MASK1 (0xF << 24)
+#define MU_CR_TIEn_MASK1 (0xF << 20)
+#define MU_CR_GIRn_MASK1 (0xF << 16)
+#define MU_CR_NMI_MASK1 (1 << 3)
+#define MU_CR_Fn_MASK1 0x7
+
+#define MU_SR_TE0_MASK1 (1 << 23)
+#define MU_SR_RF0_MASK1 (1 << 27)
+#define MU_CR_RIE0_MASK1 (1 << 27)
+#define MU_CR_GIE0_MASK1 (1 << 31)
+
+#define MU_TR_COUNT 4
+#define MU_RR_COUNT 4
+
+
+void MU_Init(void __iomem *base);
+void MU_SendMessage(void __iomem *base, uint32_t regIndex, uint32_t msg);
+void MU_SendMessageTimeout(void __iomem *base, uint32_t regIndex, uint32_t msg, uint32_t t);
+void MU_ReceiveMsg(void __iomem *base, uint32_t regIndex, uint32_t *msg);
+void MU_EnableGeneralInt(void __iomem *base, uint32_t index);
+void MU_EnableRxFullInt(void __iomem *base, uint32_t index);
+uint32_t MU_ReadStatus(void __iomem *base);
+int32_t MU_SetFn(void __iomem *base, uint32_t Fn);
+
diff --git a/include/linux/mxc_dcic.h b/include/linux/mxc_dcic.h
new file mode 100644
index 000000000000..8e330bd466da
--- /dev/null
+++ b/include/linux/mxc_dcic.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+/*!
+ * @file linux/mxc_dcic.h
+ *
+ * @brief Global header file for the MXC DCIC driver
+ *
+ * @ingroup MXC DCIC
+ */
+
+#ifndef __LINUX_DCIC_H__
+#define __LINUX_DCIC_H__
+
+#include <uapi/linux/mxc_dcic.h>
+
+#define DCICC_IC_ENABLE 0x1
+#define DCICC_IC_DISABLE 0x0
+#define DCICC_IC_MASK 0x1
+#define DCICC_DE_ACTIVE_HIGH 0
+#define DCICC_DE_ACTIVE_LOW (0x1 << 4)
+#define DCICC_DE_ACTIVE_MASK (0x1 << 4)
+#define DCICC_HSYNC_POL_ACTIVE_HIGH 0
+#define DCICC_HSYNC_POL_ACTIVE_LOW (0x1 << 5)
+#define DCICC_HSYNC_POL_ACTIVE_MASK (0x1 << 5)
+#define DCICC_VSYNC_POL_ACTIVE_HIGH 0
+#define DCICC_VSYNC_POL_ACTIVE_LOW (0x1 << 6)
+#define DCICC_VSYNC_POL_ACTIVE_MASK (0x1 << 6)
+#define DCICC_CLK_POL_NO_INVERTED 0
+#define DCICC_CLK_POL_INVERTED (0x1 << 7)
+#define DCICC_CLK_POL_INVERTED_MASK (0x1 << 7)
+
+#define DCICIC_ERROR_INT_DISABLE 1
+#define DCICIC_ERROR_INT_ENABLE 0
+#define DCICIC_ERROR_INT_MASK_MASK 1
+#define DCICIC_FUN_INT_DISABLE (0x1 << 1)
+#define DCICIC_FUN_INT_ENABLE 0
+#define DCICIC_FUN_INT_MASK (0x1 << 1)
+#define DCICIC_FREEZE_MASK_CHANGED 0
+#define DCICIC_FREEZE_MASK_FORZEN (0x1 << 3)
+#define DCICIC_FREEZE_MASK_MASK (0x1 << 3)
+#define DCICIC_EXT_SIG_EX_DISABLE 0
+#define DCICIC_EXT_SIG_EN_ENABLE (0x1 << 16)
+#define DCICIC_EXT_SIG_EN_MASK (0x1 << 16)
+
+#define DCICS_ROI_MATCH_STAT_MASK 0xFFFF
+#define DCICS_EI_STAT_PENDING (0x1 << 16)
+#define DCICS_EI_STAT_NO_PENDING 0
+#define DCICS_FI_STAT_PENDING (0x1 << 17)
+#define DCICS_FI_STAT_NO_PENDING 0
+
+#define DCICRC_ROI_START_OFFSET_X_MASK 0x1FFF
+#define DCICRC_ROI_START_OFFSET_X_SHIFT 0
+#define DCICRC_ROI_START_OFFSET_Y_MASK (0xFFF << 16)
+#define DCICRC_ROI_START_OFFSET_Y_SHIFT 16
+#define DCICRC_ROI_CHANGED 0
+#define DCICRC_ROI_FROZEN (0x1 << 30)
+#define DCICRC_ROI_ENABLE (0x1 << 31)
+#define DCICRC_ROI_DISABLE 0
+
+#define DCICRS_ROI_END_OFFSET_X_MASK 0x1FFF
+#define DCICRS_ROI_END_OFFSET_X_SHIFT 0
+#define DCICRS_ROI_END_OFFSET_Y_MASK (0xFFF << 16)
+#define DCICRS_ROI_END_OFFSET_Y_SHIFT 16
+
+struct roi_regs {
+ u32 dcicrc;
+ u32 dcicrs;
+ u32 dcicrrs;
+ u32 dcicrcs;
+};
+
+struct dcic_regs {
+ u32 dcicc;
+ u32 dcicic;
+ u32 dcics;
+ u32 dcic_reserved;
+ struct roi_regs ROI[16];
+};
+
+struct dcic_mux {
+ char dcic[16];
+ u32 val;
+};
+
+struct bus_mux {
+ char name[16];
+ int reg;
+ int shift;
+ int mask;
+ int dcic_mux_num;
+ const struct dcic_mux *dcics;
+};
+
+struct dcic_info {
+ int bus_mux_num;
+ const struct bus_mux *buses;
+};
+
+struct dcic_data {
+ struct regmap *regmap;
+ struct device *dev;
+ struct dcic_regs *regs;
+ const struct bus_mux *buses;
+ u32 bus_n;
+ u32 mux_n;
+ struct clk *disp_axi_clk;
+ struct clk *dcic_clk;
+ struct mutex lock;
+ struct completion roi_crc_comp;
+ struct class *class;
+ int major;
+ struct cdev cdev; /* Char device structure */
+ dev_t devt;
+ unsigned int result;
+};
+#endif
diff --git a/include/linux/mxc_mlb.h b/include/linux/mxc_mlb.h
new file mode 100644
index 000000000000..d7c792a2bee4
--- /dev/null
+++ b/include/linux/mxc_mlb.h
@@ -0,0 +1,55 @@
+/*
+ * mxc_mlb.h
+ *
+ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef _MXC_MLB_H
+#define _MXC_MLB_H
+
+/* define IOCTL command */
+#define MLB_DBG_RUNTIME _IO('S', 0x09)
+#define MLB_SET_FPS _IOW('S', 0x10, unsigned int)
+#define MLB_GET_VER _IOR('S', 0x11, unsigned long)
+#define MLB_SET_DEVADDR _IOR('S', 0x12, unsigned char)
+
+/*!
+ * set channel address for each logical channel
+ * the MSB 16bits is for tx channel, the left LSB is for rx channel
+ */
+#define MLB_CHAN_SETADDR _IOW('S', 0x13, unsigned int)
+#define MLB_CHAN_STARTUP _IO('S', 0x14)
+#define MLB_CHAN_SHUTDOWN _IO('S', 0x15)
+#define MLB_CHAN_GETEVENT _IOR('S', 0x16, unsigned long)
+
+#define MLB_SET_ISOC_BLKSIZE_188 _IO('S', 0x17)
+#define MLB_SET_ISOC_BLKSIZE_196 _IO('S', 0x18)
+#define MLB_SET_SYNC_QUAD _IOW('S', 0x19, unsigned int)
+#define MLB_IRQ_ENABLE _IO('S', 0x20)
+#define MLB_IRQ_DISABLE _IO('S', 0x21)
+
+/*!
+ * MLB event define
+ */
+enum {
+ MLB_EVT_TX_PROTO_ERR_CUR = 1 << 0,
+ MLB_EVT_TX_BRK_DETECT_CUR = 1 << 1,
+ MLB_EVT_TX_PROTO_ERR_PREV = 1 << 8,
+ MLB_EVT_TX_BRK_DETECT_PREV = 1 << 9,
+ MLB_EVT_RX_PROTO_ERR_CUR = 1 << 16,
+ MLB_EVT_RX_BRK_DETECT_CUR = 1 << 17,
+ MLB_EVT_RX_PROTO_ERR_PREV = 1 << 24,
+ MLB_EVT_RX_BRK_DETECT_PREV = 1 << 25,
+};
+
+
+#endif /* _MXC_MLB_H */
diff --git a/include/linux/mxc_sim_interface.h b/include/linux/mxc_sim_interface.h
new file mode 100644
index 000000000000..5eae53a59075
--- /dev/null
+++ b/include/linux/mxc_sim_interface.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef MXC_SIM_INTERFACE_H
+#define MXC_SIM_INTERFACE_H
+
+#define SIM_ATR_LENGTH_MAX 32
+
+/* Raw ATR SIM_IOCTL_GET_ATR */
+typedef struct {
+ unsigned int size;/* length of ATR received */
+ unsigned char *atr_buffer;/* raw ATR string received */
+ int errval;/* The error vale reported to user space after completing ATR*/
+} sim_atr_t;
+
+/* ISO7816-3 protocols */
+#define SIM_PROTOCOL_T0 1
+#define SIM_PROTOCOL_T1 2
+
+/* Transfer types for SIM_IOCTL_XFER */
+#define SIM_XFER_TYPE_TPDU 1
+#define SIM_XFER_TYPE_PTS 2
+
+typedef struct {
+ unsigned int wwt;
+ unsigned int cwt;
+ unsigned int bwt;
+ unsigned int bgt;
+ unsigned int cgt;
+} sim_timing_t;
+
+/* Transfer data for SIM_IOCTL_XFER */
+typedef struct {
+ unsigned char *xmt_buffer; /* transmit buffer pointer */
+ int xmt_length;/* transmit buffer length */
+ int timeout;/* transfer timeout in milliseconds */
+ int errval;/* The error vale reported to user space after completing transmitting*/
+} sim_xmt_t;
+
+typedef struct {
+ unsigned char *rcv_buffer; /* receive buffer pointer */
+ int rcv_length; /* receive buffer length */
+ int timeout;/* transfer timeout in milliseconds */
+ int errval;/* The error vale reported to user space after receiving*/
+} sim_rcv_t;
+
+typedef struct {
+ unsigned char di;
+ unsigned char fi;
+} sim_baud_t;
+
+/* Interface power states */
+#define SIM_POWER_OFF (0)
+#define SIM_POWER_ON (1)
+
+/* Return values for SIM_IOCTL_GET_PRESENSE */
+#define SIM_PRESENT_REMOVED (0)
+#define SIM_PRESENT_DETECTED (1)
+#define SIM_PRESENT_OPERATIONAL (2)
+
+/* The error value */
+#define SIM_OK (0)
+#define SIM_ERROR_CWT (1 << 0)
+#define SIM_ERROR_BWT (1 << 1)
+#define SIM_ERROR_PARITY (1 << 2)
+#define SIM_ERROR_INVALID_TS (1 << 3)
+#define SIM_ERROR_FRAME (1 << 4)
+#define SIM_ERROR_ATR_TIMEROUT (1 << 5)
+#define SIM_ERROR_NACK_THRESHOLD (1 << 6)
+#define SIM_ERROR_BGT (1 << 7)
+#define SIM_ERROR_ATR_DELAY (1 << 8)
+
+/* Return values for SIM_IOCTL_GET_ERROR */
+#define SIM_E_ACCESS (1)
+#define SIM_E_TPDUSHORT (2)
+#define SIM_E_PTSEMPTY (3)
+#define SIM_E_INVALIDXFERTYPE (4)
+#define SIM_E_INVALIDXMTLENGTH (5)
+#define SIM_E_INVALIDRCVLENGTH (6)
+#define SIM_E_NACK (7)
+#define SIM_E_TIMEOUT (8)
+#define SIM_E_NOCARD (9)
+#define SIM_E_PARAM_FI_INVALID (10)
+#define SIM_E_PARAM_DI_INVALID (11)
+#define SIM_E_PARAM_FBYD_WITHFRACTION (12)
+#define SIM_E_PARAM_FBYD_NOTDIVBY8OR12 (13)
+#define SIM_E_PARAM_DIVISOR_RANGE (14)
+#define SIM_E_MALLOC (15)
+#define SIM_E_IRQ (16)
+#define SIM_E_POWERED_ON (17)
+#define SIM_E_POWERED_OFF (18)
+
+/* ioctl encodings */
+#define SIM_IOCTL_BASE (0xc0)
+#define SIM_IOCTL_GET_PRESENSE _IOR(SIM_IOCTL_BASE, 1, int)
+#define SIM_IOCTL_GET_ATR _IOR(SIM_IOCTL_BASE, 2, sim_atr_t)
+#define SIM_IOCTL_XMT _IOR(SIM_IOCTL_BASE, 3, sim_xmt_t)
+#define SIM_IOCTL_RCV _IOR(SIM_IOCTL_BASE, 4, sim_rcv_t)
+#define SIM_IOCTL_ACTIVATE _IO(SIM_IOCTL_BASE, 5)
+#define SIM_IOCTL_DEACTIVATE _IO(SIM_IOCTL_BASE, 6)
+#define SIM_IOCTL_WARM_RESET _IO(SIM_IOCTL_BASE, 7)
+#define SIM_IOCTL_COLD_RESET _IO(SIM_IOCTL_BASE, 8)
+#define SIM_IOCTL_CARD_LOCK _IO(SIM_IOCTL_BASE, 9)
+#define SIM_IOCTL_CARD_EJECT _IO(SIM_IOCTL_BASE, 10)
+#define SIM_IOCTL_SET_PROTOCOL _IOR(SIM_IOCTL_BASE, 11, unsigned int)
+#define SIM_IOCTL_SET_TIMING _IOR(SIM_IOCTL_BASE, 12, sim_timing_t)
+#define SIM_IOCTL_SET_BAUD _IOR(SIM_IOCTL_BASE, 13, sim_baud_t)
+#define SIM_IOCTL_WAIT _IOR(SIM_IOCTL_BASE, 14, unsigned int)
+
+#endif
diff --git a/include/linux/mxc_vpu.h b/include/linux/mxc_vpu.h
new file mode 100644
index 000000000000..df024698dee7
--- /dev/null
+++ b/include/linux/mxc_vpu.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2004-2013, 2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU Lesser General
+ * Public License. You may obtain a copy of the GNU Lesser General
+ * Public License Version 2.1 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/lgpl-license.html
+ * http://www.gnu.org/copyleft/lgpl.html
+ */
+
+/*!
+ * @defgroup VPU Video Processor Unit Driver
+ */
+
+/*!
+ * @file linux/mxc_vpu.h
+ *
+ * @brief VPU system initialization and file operation definition
+ *
+ * @ingroup VPU
+ */
+
+#ifndef __LINUX_MXC_VPU_H__
+#define __LINUX_MXC_VPU_H__
+
+#include <linux/fs.h>
+
+struct mxc_vpu_platform_data {
+ bool iram_enable;
+ int iram_size;
+ void (*reset) (void);
+ void (*pg) (int);
+};
+
+struct vpu_mem_desc {
+ u32 size;
+ dma_addr_t phy_addr;
+ u32 cpu_addr; /* cpu address to free the dma mem */
+ u32 virt_uaddr; /* virtual user space address */
+};
+
+#define VPU_IOC_MAGIC 'V'
+
+#define VPU_IOC_PHYMEM_ALLOC _IO(VPU_IOC_MAGIC, 0)
+#define VPU_IOC_PHYMEM_FREE _IO(VPU_IOC_MAGIC, 1)
+#define VPU_IOC_WAIT4INT _IO(VPU_IOC_MAGIC, 2)
+#define VPU_IOC_PHYMEM_DUMP _IO(VPU_IOC_MAGIC, 3)
+#define VPU_IOC_REG_DUMP _IO(VPU_IOC_MAGIC, 4)
+#define VPU_IOC_IRAM_SETTING _IO(VPU_IOC_MAGIC, 6)
+#define VPU_IOC_CLKGATE_SETTING _IO(VPU_IOC_MAGIC, 7)
+#define VPU_IOC_GET_WORK_ADDR _IO(VPU_IOC_MAGIC, 8)
+#define VPU_IOC_REQ_VSHARE_MEM _IO(VPU_IOC_MAGIC, 9)
+#define VPU_IOC_SYS_SW_RESET _IO(VPU_IOC_MAGIC, 11)
+#define VPU_IOC_GET_SHARE_MEM _IO(VPU_IOC_MAGIC, 12)
+#define VPU_IOC_QUERY_BITWORK_MEM _IO(VPU_IOC_MAGIC, 13)
+#define VPU_IOC_SET_BITWORK_MEM _IO(VPU_IOC_MAGIC, 14)
+#define VPU_IOC_PHYMEM_CHECK _IO(VPU_IOC_MAGIC, 15)
+#define VPU_IOC_LOCK_DEV _IO(VPU_IOC_MAGIC, 16)
+
+#define BIT_CODE_RUN 0x000
+#define BIT_CODE_DOWN 0x004
+#define BIT_INT_CLEAR 0x00C
+#define BIT_INT_STATUS 0x010
+#define BIT_CUR_PC 0x018
+#define BIT_INT_REASON 0x174
+
+#define MJPEG_PIC_STATUS_REG 0x3004
+#define MBC_SET_SUBBLK_EN 0x4A0
+
+#define BIT_WORK_CTRL_BUF_BASE 0x100
+#define BIT_WORK_CTRL_BUF_REG(i) (BIT_WORK_CTRL_BUF_BASE + i * 4)
+#define BIT_CODE_BUF_ADDR BIT_WORK_CTRL_BUF_REG(0)
+#define BIT_WORK_BUF_ADDR BIT_WORK_CTRL_BUF_REG(1)
+#define BIT_PARA_BUF_ADDR BIT_WORK_CTRL_BUF_REG(2)
+#define BIT_BIT_STREAM_CTRL BIT_WORK_CTRL_BUF_REG(3)
+#define BIT_FRAME_MEM_CTRL BIT_WORK_CTRL_BUF_REG(4)
+#define BIT_BIT_STREAM_PARAM BIT_WORK_CTRL_BUF_REG(5)
+
+#ifndef CONFIG_SOC_IMX6Q
+#define BIT_RESET_CTRL 0x11C
+#else
+#define BIT_RESET_CTRL 0x128
+#endif
+
+/* i could be 0, 1, 2, 3 */
+#define BIT_RD_PTR_BASE 0x120
+#define BIT_RD_PTR_REG(i) (BIT_RD_PTR_BASE + i * 8)
+#define BIT_WR_PTR_REG(i) (BIT_RD_PTR_BASE + i * 8 + 4)
+
+/* i could be 0, 1, 2, 3 */
+#define BIT_FRM_DIS_FLG_BASE (cpu_is_mx51() ? 0x150 : 0x140)
+#define BIT_FRM_DIS_FLG_REG(i) (BIT_FRM_DIS_FLG_BASE + i * 4)
+
+#define BIT_BUSY_FLAG 0x160
+#define BIT_RUN_COMMAND 0x164
+#define BIT_INT_ENABLE 0x170
+
+#define BITVAL_PIC_RUN 8
+
+#define VPU_SLEEP_REG_VALUE 10
+#define VPU_WAKE_REG_VALUE 11
+
+int vl2cc_init(u32 vl2cc_hw_base);
+void vl2cc_enable(void);
+void vl2cc_flush(void);
+void vl2cc_disable(void);
+void vl2cc_cleanup(void);
+
+int vl2cc_init(u32 vl2cc_hw_base);
+void vl2cc_enable(void);
+void vl2cc_flush(void);
+void vl2cc_disable(void);
+void vl2cc_cleanup(void);
+
+#endif
diff --git a/include/linux/mxcfb_epdc.h b/include/linux/mxcfb_epdc.h
new file mode 100644
index 000000000000..84fea89e7e33
--- /dev/null
+++ b/include/linux/mxcfb_epdc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2019 NXP
+ */
+#ifndef _MXCFB_EPDC_KERNEL
+#define _MXCFB_EPDC_KERNEL
+
+struct imx_epdc_fb_mode {
+ struct fb_videomode *vmode;
+ int vscan_holdoff;
+ int sdoed_width;
+ int sdoed_delay;
+ int sdoez_width;
+ int sdoez_delay;
+ int gdclk_hp_offs;
+ int gdsp_offs;
+ int gdoe_offs;
+ int gdclk_offs;
+ int num_ce;
+};
+
+struct imx_epdc_fb_platform_data {
+ struct imx_epdc_fb_mode *epdc_mode;
+ int num_modes;
+ int (*get_pins) (void);
+ void (*put_pins) (void);
+ void (*enable_pins) (void);
+ void (*disable_pins) (void);
+};
+
+#endif
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 640e7279f161..88fb37093c63 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -77,6 +77,7 @@ enum {
NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */
NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */
+ NETIF_F_HW_ACCEL_MQ_BIT, /* Hardware-accelerated multiqueue */
NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
@@ -150,6 +151,7 @@ enum {
#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
+#define NETIF_F_HW_ACCEL_MQ __NETIF_F(HW_ACCEL_MQ)
/* Finds the next feature with the highest number of the range of start till 0.
*/
diff --git a/include/linux/of.h b/include/linux/of.h
index a7621e2b440a..4c6acfaac075 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -391,6 +391,7 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
+extern int of_alias_max_index(const char *stem);
extern int of_alias_get_alias_list(const struct of_device_id *matches,
const char *stem, unsigned long *bitmap,
unsigned int nbits);
@@ -911,6 +912,11 @@ static inline int of_alias_get_alias_list(const struct of_device_id *matches,
return -ENOSYS;
}
+static inline int of_alias_max_index(const char *stem)
+{
+ return -ENODEV;
+}
+
static inline int of_machine_is_compatible(const char *compat)
{
return 0;
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index acf820e88952..a713e5d156d8 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -23,6 +23,15 @@
struct device_node;
/* For scanning an arbitrary device-tree at any time */
+extern char *of_fdt_get_string(const void *blob, u32 offset);
+extern void *of_fdt_get_property(const void *blob,
+ unsigned long node,
+ const char *name,
+ int *size);
+extern bool of_fdt_is_big_endian(const void *blob,
+ unsigned long node);
+extern int of_fdt_match(const void *blob, unsigned long node,
+ const char *const *compat);
extern void *of_fdt_unflatten_tree(const unsigned long *blob,
struct device_node *dad,
struct device_node **mynodes);
@@ -55,7 +64,9 @@ extern int of_get_flat_dt_subnode_by_name(unsigned long node,
extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
int *size);
extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
+extern int of_flat_dt_match(unsigned long node, const char *const *matches);
extern unsigned long of_get_flat_dt_root(void);
+extern int of_get_flat_dt_size(void);
extern uint32_t of_get_flat_dt_phandle(unsigned long node);
extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9a937f8b2783..1527b5cd73de 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -2022,6 +2022,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) {}
int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
resource_size_t pcibios_default_alignment(void);
+int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask);
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern struct dev_pm_ops pcibios_pm_ops;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 42588645478d..d2cd83fb9816 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1481,6 +1481,7 @@
#define PCI_VENDOR_ID_SIEMENS 0x110A
#define PCI_DEVICE_ID_SIEMENS_DSCC4 0x2102
+#define PCI_DEVICE_ID_IVSHMEM 0x4106
#define PCI_VENDOR_ID_VORTEX 0x1119
#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 80750783b5b0..67bf6cbe75ab 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -102,6 +102,7 @@ typedef enum {
/* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */
PHY_INTERFACE_MODE_10GKR,
PHY_INTERFACE_MODE_USXGMII,
+ PHY_INTERFACE_MODE_2500SGMII,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -179,6 +180,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "10gbase-kr";
case PHY_INTERFACE_MODE_USXGMII:
return "usxgmii";
+ case PHY_INTERFACE_MODE_2500SGMII:
+ return "sgmii-2500";
default:
return "unknown";
}
diff --git a/include/linux/phy/phy-mixel-lvds-combo.h b/include/linux/phy/phy-mixel-lvds-combo.h
new file mode 100644
index 000000000000..16a558fb4074
--- /dev/null
+++ b/include/linux/phy/phy-mixel-lvds-combo.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef PHY_MIXEL_LVDS_COMBO_H_
+#define PHY_MIXEL_LVDS_COMBO_H_
+
+#include "phy.h"
+
+#if IS_ENABLED(CONFIG_PHY_MIXEL_LVDS_COMBO)
+void mixel_phy_combo_lvds_set_phy_speed(struct phy *phy,
+ unsigned long phy_clk_rate);
+void mixel_phy_combo_lvds_set_hsync_pol(struct phy *phy, bool active_high);
+void mixel_phy_combo_lvds_set_vsync_pol(struct phy *phy, bool active_high);
+#else
+static inline void
+mixel_phy_combo_lvds_set_phy_speed(struct phy *phy, unsigned long phy_clk_rate)
+{
+}
+static inline void mixel_phy_combo_lvds_set_hsync_pol(struct phy *phy,
+ bool active_high)
+{
+}
+static inline void mixel_phy_combo_lvds_set_vsync_pol(struct phy *phy,
+ bool active_high)
+{
+}
+#endif
+
+#endif /* PHY_MIXEL_LVDS_COMBO_H_ */
diff --git a/include/linux/phy/phy-mixel-lvds.h b/include/linux/phy/phy-mixel-lvds.h
new file mode 100644
index 000000000000..2ffa4ee28cb0
--- /dev/null
+++ b/include/linux/phy/phy-mixel-lvds.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef PHY_MIXEL_LVDS_H_
+#define PHY_MIXEL_LVDS_H_
+
+#include "phy.h"
+
+#if IS_ENABLED(CONFIG_PHY_MIXEL_LVDS)
+void mixel_phy_lvds_set_phy_speed(struct phy *phy, unsigned long phy_clk_rate);
+void mixel_phy_lvds_set_hsync_pol(struct phy *phy, bool active_high);
+void mixel_phy_lvds_set_vsync_pol(struct phy *phy, bool active_high);
+#else
+static inline void mixel_phy_lvds_set_phy_speed(struct phy *phy,
+ unsigned long phy_clk_rate)
+{
+}
+static inline void mixel_phy_lvds_set_hsync_pol(struct phy *phy,
+ bool active_high)
+{
+}
+static inline void mixel_phy_lvds_set_vsync_pol(struct phy *phy,
+ bool active_high)
+{
+}
+#endif
+
+#endif /* PHY_MIXEL_LVDS_H_ */
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 300ecdb6790a..066021f7991c 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -63,10 +63,12 @@ enum phylink_op_type {
* struct phylink_config - PHYLINK configuration structure
* @dev: a pointer to a struct device associated with the MAC
* @type: operation type of PHYLINK instance
+ * @pcs_poll: MAC PCS cannot provide link change interrupt
*/
struct phylink_config {
struct device *dev;
enum phylink_op_type type;
+ bool pcs_poll;
};
/**
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
index 1d30bf278231..b737c0a8e58d 100644
--- a/include/linux/platform_data/brcmfmac.h
+++ b/include/linux/platform_data/brcmfmac.h
@@ -178,6 +178,7 @@ struct brcmfmac_platform_data {
void (*power_off)(void);
char *fw_alternative_path;
int device_count;
+ struct device *dev;
struct brcmfmac_pd_device devices[0];
};
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
index 30e676b36b24..e12d2e8c246b 100644
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ b/include/linux/platform_data/dma-imx-sdma.h
@@ -20,12 +20,12 @@ struct sdma_script_start_addrs {
s32 per_2_firi_addr;
s32 mcu_2_firi_addr;
s32 uart_2_per_addr;
- s32 uart_2_mcu_addr;
+ s32 uart_2_mcu_ram_addr;
s32 per_2_app_addr;
s32 mcu_2_app_addr;
s32 per_2_per_addr;
s32 uartsh_2_per_addr;
- s32 uartsh_2_mcu_addr;
+ s32 uartsh_2_mcu_ram_addr;
s32 per_2_shp_addr;
s32 mcu_2_shp_addr;
s32 ata_2_mcu_addr;
@@ -52,6 +52,10 @@ struct sdma_script_start_addrs {
s32 zcanfd_2_mcu_addr;
s32 zqspi_2_mcu_addr;
s32 mcu_2_ecspi_addr;
+ s32 mcu_2_sai_addr;
+ s32 sai_2_mcu_addr;
+ s32 uart_2_mcu_addr;
+ s32 uartsh_2_mcu_addr;
/* End of v3 array */
s32 mcu_2_zqspi_addr;
/* End of v4 array */
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
index 281adbb26e6b..eb19aedb03f0 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/platform_data/dma-imx.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2004-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2018 NXP.
*/
#ifndef __ASM_ARCH_MXC_DMA_H__
@@ -39,6 +40,8 @@ enum sdma_peripheral_type {
IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
IMX_DMATYPE_SAI, /* SAI */
+ IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */
+ IMX_DMATYPE_HDMI, /* HDMI Audio */
};
enum imx_dma_prio {
@@ -52,6 +55,9 @@ struct imx_dma_data {
int dma_request2; /* secondary DMA request line */
enum sdma_peripheral_type peripheral_type;
int priority;
+ bool src_dualfifo;
+ bool dst_dualfifo;
+ int done_sel;
};
static inline int imx_dma_is_ipu(struct dma_chan *chan)
@@ -59,6 +65,11 @@ static inline int imx_dma_is_ipu(struct dma_chan *chan)
return !strcmp(dev_name(chan->device->dev), "ipu-core");
}
+static inline int imx_dma_is_pxp(struct dma_chan *chan)
+{
+ return strstr(dev_name(chan->device->dev), "pxp") != NULL;
+}
+
static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
{
return !strcmp(chan->device->dev->driver->name, "imx-sdma") ||
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index 6c006078c8a1..0434f68eda86 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -37,5 +37,6 @@ struct esdhc_platform_data {
unsigned int delay_line;
unsigned int tuning_step; /* The delay cell steps in tuning procedure */
unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
+ unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */
};
#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index baf02ff91a31..a7c726413984 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -143,6 +143,7 @@ struct generic_pm_domain {
};
};
+ unsigned int state_idx_saved; /* saved power state for recovery after system suspend/resume */
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 661efa029c96..aa3da6611533 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -63,6 +63,11 @@ struct wakeup_source {
bool autosleep_enabled:1;
};
+#define for_each_wakeup_source(ws) \
+ for ((ws) = wakeup_sources_walk_start(); \
+ (ws); \
+ (ws) = wakeup_sources_walk_next((ws)))
+
#ifdef CONFIG_PM_SLEEP
/*
@@ -92,6 +97,10 @@ extern void wakeup_source_remove(struct wakeup_source *ws);
extern struct wakeup_source *wakeup_source_register(struct device *dev,
const char *name);
extern void wakeup_source_unregister(struct wakeup_source *ws);
+extern int wakeup_sources_read_lock(void);
+extern void wakeup_sources_read_unlock(int idx);
+extern struct wakeup_source *wakeup_sources_walk_start(void);
+extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws);
extern int device_wakeup_enable(struct device *dev);
extern int device_wakeup_disable(struct device *dev);
extern void device_set_wakeup_capable(struct device *dev, bool capable);
diff --git a/include/linux/pmic_status.h b/include/linux/pmic_status.h
new file mode 100644
index 000000000000..cbef24194782
--- /dev/null
+++ b/include/linux/pmic_status.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2004-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2019 NXP
+ */
+
+#ifndef __ASM_ARCH_MXC_PMIC_STATUS_H__
+#define __ASM_ARCH_MXC_PMIC_STATUS_H__
+#include <asm-generic/errno-base.h>
+#ifdef __KERNEL__
+#include <asm/uaccess.h> /* copy_{from,to}_user() */
+#endif
+/*!
+ * @file arch-mxc/pmic_status.h
+ * @brief PMIC APIs return code definition.
+ *
+ * @ingroup PMIC_CORE
+ */
+
+/*!
+ * @enum PMIC_STATUS
+ * @brief Define return values for all PMIC APIs.
+ *
+ * These return values are used by all of the PMIC APIs.
+ *
+ * @ingroup PMIC
+ */
+typedef enum {
+ PMIC_SUCCESS = 0, /*!< The requested operation was successfully
+ completed. */
+ PMIC_ERROR = -1, /*!< The requested operation could not be completed
+ due to an error. */
+ PMIC_PARAMETER_ERROR = -2, /*!< The requested operation failed because
+ one or more of the parameters was
+ invalid. */
+ PMIC_NOT_SUPPORTED = -3, /*!< The requested operation could not be
+ completed because the PMIC hardware
+ does not support it. */
+ PMIC_SYSTEM_ERROR_EINTR = -EINTR,
+
+ PMIC_MALLOC_ERROR = -5, /*!< Error in malloc function */
+ PMIC_UNSUBSCRIBE_ERROR = -6, /*!< Error in un-subscribe event */
+ PMIC_EVENT_NOT_SUBSCRIBED = -7, /*!< Event occur and not subscribed */
+ PMIC_EVENT_CALL_BACK = -8, /*!< Error - bad call back */
+ PMIC_CLIENT_NBOVERFLOW = -9, /*!< The requested operation could not be
+ completed because there are too many
+ PMIC client requests */
+} PMIC_STATUS;
+
+/*
+ * Bitfield macros that use rely on bitfield width/shift information.
+ */
+#define BITFMASK(field) (((1U << (field ## _WID)) - 1) << (field ## _LSH))
+#define BITFVAL(field, val) ((val) << (field ## _LSH))
+#define BITFEXT(var, bit) ((var & BITFMASK(bit)) >> (bit ## _LSH))
+
+/*
+ * Macros implementing error handling
+ */
+#define CHECK_ERROR(a) \
+do { \
+ int ret = (a); \
+ if (ret != PMIC_SUCCESS) \
+ return ret; \
+} while (0)
+
+#define CHECK_ERROR_KFREE(func, freeptrs) \
+do { \
+ int ret = (func); \
+ if (ret != PMIC_SUCCESS) { \
+ freeptrs; \
+ return ret; \
+ } \
+} while (0);
+
+#endif /* __ASM_ARCH_MXC_PMIC_STATUS_H__ */
diff --git a/include/linux/power/sabresd_battery.h b/include/linux/power/sabresd_battery.h
new file mode 100644
index 000000000000..10bfa4588864
--- /dev/null
+++ b/include/linux/power/sabresd_battery.h
@@ -0,0 +1,65 @@
+/*
+ * sabresd_battery.h - Maxim 8903 USB/Adapter Charger Driver
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * Copyright (C) 2011-2015 Freescale Semiconductor, Inc.
+ * Based on max8903_charger.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __MAX8903_SABRESD_H__
+#define __MAX8903_SABRESD_H__
+
+struct max8903_pdata {
+ /*
+ * GPIOs
+ * cen, chg, flt, and usus are optional.
+ * dok, dcm, and uok are not optional depending on the status of
+ * dc_valid and usb_valid.
+ */
+ int cen; /* Charger Enable input */
+ int dok; /* DC(Adapter) Power OK output */
+ int uok; /* USB Power OK output */
+ int chg; /* Charger status output */
+ int flt; /* Fault output */
+ int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */
+ int usus; /* USB Suspend Input (1: suspended) */
+ int feature_flag;/* battery capacity feature(0:enable, 1:disable) */
+
+ /*
+ * DCM wired to Logic High Set this true when DCM pin connect to
+ * Logic high.
+ */
+ bool dcm_always_high;
+
+ /*
+ * DC(Adapter/TA) is wired
+ * When dc_valid is true,
+ * dok and dcm should be valid.
+ *
+ * At least one of dc_valid or usb_valid should be true.
+ */
+ bool dc_valid;
+ /*
+ * USB is wired
+ * When usb_valid is true,
+ * uok should be valid.
+ */
+ bool usb_valid;
+};
+
+#endif /* __SABRESD_BATTERY_H__ */
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 8ea265a022fd..d5f73e14445b 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -23,6 +23,7 @@ struct platform_pwm_backlight_data {
void (*notify_after)(struct device *dev, int brightness);
void (*exit)(struct device *dev);
int (*check_fb)(struct device *dev, struct fb_info *info);
+ char fb_id[16];
};
#endif
diff --git a/include/linux/pxp_device.h b/include/linux/pxp_device.h
new file mode 100644
index 000000000000..cc42092ec76f
--- /dev/null
+++ b/include/linux/pxp_device.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Copyright 2019 NXP
+ */
+#ifndef _PXP_DEVICE
+#define _PXP_DEVICE
+
+#include <linux/idr.h>
+#include <linux/hash.h>
+#include <uapi/linux/pxp_device.h>
+
+struct pxp_irq_info {
+ wait_queue_head_t waitq;
+ atomic_t irq_pending;
+ int hist_status;
+};
+
+struct pxp_buffer_hash {
+ struct hlist_head *hash_table;
+ u32 order;
+ spinlock_t hash_lock;
+};
+
+struct pxp_buf_obj {
+ uint32_t handle;
+
+ uint32_t size;
+ uint32_t mem_type;
+
+ unsigned long offset;
+ void *virtual;
+
+ struct hlist_node item;
+};
+
+struct pxp_chan_obj {
+ uint32_t handle;
+ struct dma_chan *chan;
+};
+
+/* File private data */
+struct pxp_file {
+ struct file *filp;
+
+ /* record allocated dma buffer */
+ struct idr buffer_idr;
+ spinlock_t buffer_lock;
+
+ /* record allocated dma channel */
+ struct idr channel_idr;
+ spinlock_t channel_lock;
+};
+
+#endif
diff --git a/include/linux/pxp_dma.h b/include/linux/pxp_dma.h
new file mode 100644
index 000000000000..f89e36369713
--- /dev/null
+++ b/include/linux/pxp_dma.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2010-2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Copyright 2017-2019 NXP
+ */
+#ifndef _PXP_DMA
+#define _PXP_DMA
+
+#include <uapi/linux/pxp_dma.h>
+
+struct pxp_tx_desc {
+ struct dma_async_tx_descriptor txd;
+ struct list_head tx_list;
+ struct list_head list;
+ int len;
+ union {
+ struct pxp_layer_param s0_param;
+ struct pxp_layer_param out_param;
+ struct pxp_layer_param ol_param;
+ struct pxp_layer_param processing_param;
+ } layer_param;
+ struct pxp_proc_data proc_data;
+
+ u32 hist_status; /* Histogram output status */
+
+ struct pxp_tx_desc *next;
+};
+
+struct pxp_channel {
+ struct dma_chan dma_chan;
+ dma_cookie_t completed; /* last completed cookie */
+ enum pxp_channel_status status;
+ void *client; /* Only one client per channel */
+ unsigned int n_tx_desc;
+ struct pxp_tx_desc *desc; /* allocated tx-descriptors */
+ struct list_head active_list; /* active tx-descriptors */
+ struct list_head free_list; /* free tx-descriptors */
+ struct list_head queue; /* queued tx-descriptors */
+ struct list_head list; /* track queued channel number */
+ spinlock_t lock; /* protects sg[0,1], queue */
+ struct mutex chan_mutex; /* protects status, cookie, free_list */
+ int active_buffer;
+ unsigned int eof_irq;
+ char eof_name[16]; /* EOF IRQ name for request_irq() */
+};
+
+#define to_tx_desc(tx) container_of(tx, struct pxp_tx_desc, txd)
+#define to_pxp_channel(d) container_of(d, struct pxp_channel, dma_chan)
+
+void pxp_txd_ack(struct dma_async_tx_descriptor *txd,
+ struct pxp_channel *pxp_chan);
+
+#ifdef CONFIG_MXC_PXP_CLIENT_DEVICE
+int register_pxp_device(void);
+void unregister_pxp_device(void);
+#else
+static int register_pxp_device(void) { return 0; }
+static void unregister_pxp_device(void) {}
+#endif
+void pxp_fill(
+ u32 bpp,
+ u32 value,
+ u32 width,
+ u32 height,
+ u32 output_buffer,
+ u32 output_pitch);
+
+void m4_process(void);
+#endif
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index d44ce5f18a56..55319943fcc5 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -36,6 +36,7 @@ struct fixed_voltage_config {
const char *input_supply;
int microvolts;
unsigned startup_delay;
+ unsigned int off_on_delay;
unsigned enabled_at_boot:1;
struct regulator_init_data *init_data;
};
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 16ad66683ad0..1c624906eb72 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -383,6 +383,8 @@ struct rproc_ops {
int (*load)(struct rproc *rproc, const struct firmware *fw);
int (*sanity_check)(struct rproc *rproc, const struct firmware *fw);
u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
+ void * (*memcpy)(struct rproc *rproc, void *dest,
+ const void *src, size_t count, int flags);
};
/**
@@ -479,6 +481,7 @@ struct rproc_dump_segment {
* @table_sz: size of @cached_table
* @has_iommu: flag to indicate if remote processor is behind an MMU
* @auto_boot: flag to indicate if remote processor should be auto-started
+ * @skip_fw_load: remote processor has been preloaded before start sequence
* @dump_segments: list of segments in the firmware
* @nb_vdev: number of vdev currently handled by rproc
*/
@@ -512,6 +515,7 @@ struct rproc {
size_t table_sz;
bool has_iommu;
bool auto_boot;
+ bool skip_fw_load;
struct list_head dump_segments;
int nb_vdev;
};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b04b5bd43f54..f3344eecd4fb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1024,6 +1024,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb);
void consume_skb(struct sk_buff *skb);
void __consume_stateless_skb(struct sk_buff *skb);
+void skb_recycle(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index dc60d03c4b60..a15818f3d8a4 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -107,6 +107,18 @@ struct stmmac_axi {
bool axi_rb;
};
+#define EST_GCL 1024
+struct stmmac_est {
+ int enable;
+ u32 btr_offset[2];
+ u32 btr[2];
+ u32 ctr[2];
+ u32 ter;
+ u32 gcl_unaligned[EST_GCL];
+ u32 gcl[EST_GCL];
+ u32 gcl_size;
+};
+
struct stmmac_rxq_cfg {
u8 mode_to_use;
u32 chan;
@@ -137,6 +149,7 @@ struct plat_stmmacenet_data {
struct device_node *phylink_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
+ struct stmmac_est *est;
int clk_csr;
int has_gmac;
int enh_desc;
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index cd15c1b7fae0..1a04477bbcd0 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -47,6 +47,8 @@ struct tee_shm_pool;
* and just return with an error code. It is needed for requests
* that arises from TEE based kernel drivers that should be
* non-blocking in nature.
+ * @cap_memref_null: flag indicating if the TEE Client support shared
+ * memory buffer with a NULL pointer.
*/
struct tee_context {
struct tee_device *teedev;
@@ -55,6 +57,7 @@ struct tee_context {
struct kref refcount;
bool releasing;
bool supp_nowait;
+ bool cap_memref_null;
};
struct tee_param_memref {
@@ -168,6 +171,22 @@ int tee_device_register(struct tee_device *teedev);
void tee_device_unregister(struct tee_device *teedev);
/**
+ * tee_session_calc_client_uuid() - Calculates client UUID for session
+ * @uuid: Resulting UUID
+ * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*)
+ * @connectuon_data: Connection data for opening session
+ *
+ * Based on connection method calculates UUIDv5 based client UUID.
+ *
+ * For group based logins verifies that calling process has specified
+ * credentials.
+ *
+ * @return < 0 on failure
+ */
+int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
+ const u8 connection_data[TEE_IOCTL_UUID_LEN]);
+
+/**
* struct tee_shm - shared memory object
* @teedev: device used to allocate the object
* @ctx: context using the object, if NULL the context is gone
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 01081c4726c0..ebfc06e36ca2 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -31,6 +31,7 @@ struct uio_map;
* @offs: offset of device memory within the page
* @size: size of IO (multiple of page size)
* @memtype: type of memory addr points to
+ * @readonly: true of region is read-only
* @internal_addr: ioremap-ped version of addr, for driver internal use
* @map: for use by the UIO core only.
*/
@@ -40,6 +41,7 @@ struct uio_mem {
unsigned long offs;
resource_size_t size;
int memtype;
+ bool readonly;
void __iomem *internal_addr;
struct uio_map *map;
};
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index edd89b7c8f18..17e9b62f4660 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -62,11 +62,15 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
#define CI_HDRC_IMX_IS_HSIC BIT(14)
#define CI_HDRC_PMQOS BIT(15)
+/* PHY enter low power mode when bus suspend */
+#define CI_HDRC_HOST_SUSP_PHY_LPM BIT(16)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
#define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2
#define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3
+#define CI_HDRC_CONTROLLER_VBUS_EVENT 4
+#define CI_HDRC_NOTIFY_RET_DEFER_EVENT 5
int (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
struct usb_otg_caps ci_otg_caps;
@@ -99,4 +103,6 @@ struct platform_device *ci_hdrc_add_device(struct device *dev,
/* Remove ci hdrc device */
void ci_hdrc_remove_device(struct platform_device *pdev);
+/* Get current available role */
+enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev);
#endif
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 712b2a603645..5e2695d3852c 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -409,7 +409,10 @@ struct hc_driver {
int (*find_raw_port_number)(struct usb_hcd *, int);
/* Call for power on/off the port if necessary */
int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
-
+ /* Call for SINGLE_STEP_SET_FEATURE Test for USB2 EH certification */
+#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
+ int (*submit_single_step_set_feature)(struct usb_hcd *,
+ struct urb *, int);
};
static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -474,6 +477,14 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
struct platform_device;
extern void usb_hcd_platform_shutdown(struct platform_device *dev);
+#ifdef CONFIG_USB_HCD_TEST_MODE
+extern int ehset_single_step_set_feature(struct usb_hcd *hcd, int port);
+#else
+static inline int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+ return 0;
+}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
#ifdef CONFIG_USB_PCI
struct pci_dev;
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index 8ef7d148c149..012a7116782f 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -54,6 +54,7 @@ enum otg_fsm_timer {
A_WAIT_ENUM,
B_DATA_PLS,
B_SSEND_SRP,
+ A_DP_END,
NUM_OTG_FSM_TIMERS,
};
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index e4de6bc1f69b..086f95f7b424 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -63,6 +63,13 @@ enum usb_otg_state {
OTG_STATE_A_VBUS_ERR,
};
+/* The usb role of phy to be working with */
+enum usb_current_mode {
+ CUR_USB_MODE_NONE,
+ CUR_USB_MODE_HOST,
+ CUR_USB_MODE_DEVICE,
+};
+
struct usb_phy;
struct usb_otg;
@@ -155,6 +162,15 @@ struct usb_phy {
* manually detect the charger type.
*/
enum usb_charger_type (*charger_detect)(struct usb_phy *x);
+
+ int (*notify_suspend)(struct usb_phy *x,
+ enum usb_device_speed speed);
+ int (*notify_resume)(struct usb_phy *x,
+ enum usb_device_speed speed);
+
+ int (*set_mode)(struct usb_phy *x,
+ enum usb_current_mode mode);
+
};
/* for board-specific init logic */
@@ -213,6 +229,15 @@ usb_phy_vbus_off(struct usb_phy *x)
return x->set_vbus(x, false);
}
+static inline int
+usb_phy_set_mode(struct usb_phy *x, enum usb_current_mode mode)
+{
+ if (!x || !x->set_mode)
+ return 0;
+
+ return x->set_mode(x, mode);
+}
+
/* for usb host and peripheral controller drivers */
#if IS_ENABLED(CONFIG_USB_PHY)
extern struct usb_phy *usb_get_phy(enum usb_phy_type type);
@@ -334,6 +359,24 @@ usb_phy_notify_disconnect(struct usb_phy *x, enum usb_device_speed speed)
return 0;
}
+static inline int usb_phy_notify_suspend
+ (struct usb_phy *x, enum usb_device_speed speed)
+{
+ if (x && x->notify_suspend)
+ return x->notify_suspend(x, speed);
+ else
+ return 0;
+}
+
+static inline int usb_phy_notify_resume
+ (struct usb_phy *x, enum usb_device_speed speed)
+{
+ if (x && x->notify_resume)
+ return x->notify_resume(x, speed);
+ else
+ return 0;
+}
+
/* notifiers */
static inline int
usb_register_notifier(struct usb_phy *x, struct notifier_block *nb)