summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml19
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pci.txt7
-rw-r--r--drivers/pci/controller/dwc/Kconfig19
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c2003
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c117
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c457
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h1
-rw-r--r--include/dt-bindings/soc/imx8_hsio.h31
10 files changed, 2392 insertions, 265 deletions
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
index acea1cd444fd..8d944d02198f 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
@@ -25,6 +25,8 @@ properties:
- fsl,imx6qp-pcie
- fsl,imx7d-pcie
- fsl,imx8mq-pcie
+ - fsl,imx8qm-pcie
+ - fsl,imx8qxp-pcie
reg:
items:
@@ -148,6 +150,23 @@ properties:
the three PCIe PHY powers. This regulator can be supplied by both
1.8v and 3.3v voltage supplies (optional required).
+ hsio-cfg:
+ description: hsio configuration mode when the pcie node is supported.
+ mode 1: pciea 2 lanes and one sata ahci port.
+ mode 2: pciea 1 lane, pcieb 1 lane and one sata ahci port.
+ mode 3: pciea 2 lanes, pcieb 1 lane.
+
+ local-addr:
+ description: the local address used in hsio module.
+
+ reset-names:
+ description: Must contain the following entries: "clkreq"
+
+ l1ss-disabled:
+ description: Force to disable L1SS or not. If present then the L1
+ substate would be force disabled although it might be supported by the
+ chip.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
index a0ce177c9eb8..5b38c2ec1fcd 100644
--- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt
+++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
@@ -25,6 +25,7 @@ Required properties:
EP mode:
"fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep"
"fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep"
+ "fsl,ls1028a-pcie-ep", "fsl,ls-pcie-ep"
"fsl,ls2088a-pcie-ep", "fsl,ls-pcie-ep"
"fsl,lx2160ar2-pcie-ep", "fsl,ls-pcie-ep"
- reg: base addresses and lengths of the PCIe controller register blocks.
@@ -38,12 +39,16 @@ Required properties:
......
- fsl,pcie-scfg: Must include two entries.
The first entry must be a link to the SCFG device node
- The second entry must be '0' or '1' based on physical PCIe controller index.
+ The second entry is the physical PCIe controller index starting from '0'.
This is used to get SCFG PEXN registers
- dma-coherent: Indicates that the hardware IP block can ensure the coherency
of the data transferred from/to the IP block. This can avoid the software
cache flush/invalid actions, and improve the performance significantly.
+Optional properties:
+- big-endian: If the PEX_LUT and PF register block is in big-endian, specify
+ this property.
+
Example:
pcie@3400000 {
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 76c0a63a3f64..6f14aacfedf7 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -94,10 +94,27 @@ config PCI_EXYNOS
functions to implement the driver.
config PCI_IMX6
- bool "Freescale i.MX6/7/8 PCIe controller"
+ tristate "Freescale i.MX6/7/8 PCIe driver"
+
+config PCI_IMX6_HOST
+ bool "Freescale i.MX6/7/8 PCIe controller host mode"
depends on ARCH_MXC || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ select PCI_IMX6
+ help
+ Enables support for the PCIe controller host mode in the
+ iMX6/7/8 SoCs to work in endpoint mode.
+
+config PCI_IMX6_EP
+ bool "Freescale i.MX6/7/8 PCIe controller endpoint mode"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_IMX6
+ help
+ Enables support for the PCIe controller endpoint mode in the
+ iMX6/7/8 SoCs to work in endpoint mode.
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 80fc98acf097..efa8b8171109 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -8,6 +8,7 @@
* Author: Sean Cross <xobs@kosagi.com>
*/
+#include <dt-bindings/soc/imx8_hsio.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -17,6 +18,7 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -28,18 +30,49 @@
#include <linux/signal.h>
#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/phy/phy.h>
#include <linux/reset.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/busfreq-imx.h>
+#include "../../pci.h"
#include "pcie-designware.h"
+#define IMX8MQ_PCIE_LINK_CAP_REG_OFFSET 0x7c
+#define IMX8MQ_PCIE_LINK_CAP_L1EL_64US GENMASK(18, 17)
+#define IMX8MQ_PCIE_L1SUB_CTRL1_REG_EN_MASK 0xf
#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
+#define IMX8_HSIO_PCIEB_BASE_ADDR 0x5f010000
+#define IMX8MP_GPR_REG0 0x0
+#define IMX8MP_GPR_REG0_CLK_MOD_EN BIT(0)
+#define IMX8MP_GPR_REG0_PHY_APB_RST BIT(4)
+#define IMX8MP_GPR_REG0_PHY_INIT_RST BIT(5)
+#define IMX8MP_GPR_REG1 0x4
+#define IMX8MP_GPR_REG1_PM_EN_CORE_CLK BIT(0)
+#define IMX8MP_GPR_REG1_PLL_LOCK BIT(13)
+#define IMX8MP_GPR_REG2 0x8
+#define IMX8MP_GPR_REG2_P_PLL_MASK GENMASK(5, 0)
+#define IMX8MP_GPR_REG2_M_PLL_MASK GENMASK(15, 6)
+#define IMX8MP_GPR_REG2_S_PLL_MASK GENMASK(18, 16)
+#define IMX8MP_GPR_REG2_P_PLL (0xc << 0)
+#define IMX8MP_GPR_REG2_M_PLL (0x320 << 6)
+#define IMX8MP_GPR_REG2_S_PLL (0x4 << 16)
+#define IMX8MP_GPR_REG3 0xc
+#define IMX8MP_GPR_REG3_PLL_CKE BIT(17)
+#define IMX8MP_GPR_REG3_PLL_RST BIT(31)
+#define IMX8MP_GPR_PCIE_SSC_EN BIT(16)
+#define IMX8MP_GPR_PCIE_PWR_OFF BIT(17)
+#define IMX8MP_GPR_PCIE_CMN_RSTN BIT(18)
+#define IMX8MP_GPR_PCIE_AUX_EN BIT(19)
+#define IMX8MP_GPR_PCIE_REF_SEL_MASK GENMASK(25, 24)
+#define IMX8MP_GPR_PCIE_REF_PLL_SYS GENMASK(25, 24)
+#define IMX8MP_GPR_PCIE_REF_EXT_OSC BIT(25)
#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
@@ -49,46 +82,88 @@ enum imx6_pcie_variants {
IMX6QP,
IMX7D,
IMX8MQ,
+ IMX8MM,
+ IMX8QM,
+ IMX8QXP,
+ IMX8MP,
+ IMX8QXP_EP,
+ IMX8QM_EP,
+ IMX8MQ_EP,
+ IMX8MM_EP,
+ IMX8MP_EP,
+ IMX6SX_EP,
+ IMX7D_EP,
+ IMX6Q_EP,
+ IMX6QP_EP,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX6_PCIE_FLAG_IMX6_CPU_ADDR_FIXUP BIT(3)
+#define IMX6_PCIE_FLAG_SUPPORTS_L1SS BIT(4)
struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
u32 flags;
int dbi_length;
};
struct imx6_pcie {
struct dw_pcie *pci;
+ int clkreq_gpio;
+ int dis_gpio;
int reset_gpio;
bool gpio_active_high;
struct clk *pcie_bus;
struct clk *pcie_phy;
+ struct clk *pcie_phy_pclk;
+ struct clk *pcie_per;
+ struct clk *pciex2_per;
struct clk *pcie_inbound_axi;
struct clk *pcie;
struct clk *pcie_aux;
+ struct clk *phy_per;
+ struct clk *misc_per;
struct regmap *iomuxc_gpr;
u32 controller_id;
struct reset_control *pciephy_reset;
+ struct reset_control *pciephy_perst;
struct reset_control *apps_reset;
struct reset_control *turnoff_reset;
+ struct reset_control *clkreq_reset;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
u32 tx_swing_full;
u32 tx_swing_low;
+ u32 hsio_cfg;
+ u32 ext_osc;
+ u32 local_addr;
+ u32 l1ss_clkreq;
+ int link_gen;
struct regulator *vpcie;
struct regulator *vph;
void __iomem *phy_base;
+ void __iomem *hsmix_base;
/* power domain for pcie */
struct device *pd_pcie;
+ /* power domain for pcie csr access */
+ struct device *pd_pcie_per;
/* power domain for pcie phy */
struct device *pd_pcie_phy;
+ /* power domain for hsio gpio used by pcie */
+ struct device *pd_hsio_gpio;
+ struct device_link *pd_link;
+ struct device_link *pd_per_link;
+ struct device_link *pd_phy_link;
+ struct device_link *pd_hsio_link;
+
const struct imx6_pcie_drvdata *drvdata;
+ struct regulator *epdev_on;
+ struct phy *phy;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -108,6 +183,8 @@ struct imx6_pcie {
#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
#define PCIE_PHY_STAT_ACK BIT(16)
+#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_ATEOVRD 0x10
#define PCIE_PHY_ATEOVRD_EN BIT(2)
@@ -119,19 +196,11 @@ struct imx6_pcie {
#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
#define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9)
-#define PCIE_PHY_RX_ASIC_OUT 0x100D
-#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
-
/* iMX7 PCIe PHY registers */
#define PCIE_PHY_CMN_REG4 0x14
/* These are probably the bits that *aren't* DCC_FB_EN */
#define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29
-#define PCIE_PHY_CMN_REG15 0x54
-#define PCIE_PHY_CMN_REG15_DLY_4 BIT(2)
-#define PCIE_PHY_CMN_REG15_PLL_PD BIT(5)
-#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7)
-
#define PCIE_PHY_CMN_REG24 0x90
#define PCIE_PHY_CMN_REG24_RX_EQ BIT(6)
#define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3)
@@ -139,10 +208,168 @@ struct imx6_pcie {
#define PCIE_PHY_CMN_REG26 0x98
#define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
+#define PCIE_PHY_CMN_REG62 0x188
+#define PCIE_PHY_CMN_REG62_PLL_CLK_OUT 0x08
+#define PCIE_PHY_CMN_REG64 0x190
+#define PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM 0x8C
+#define PCIE_PHY_CMN_REG75 0x1D4
+#define PCIE_PHY_CMN_REG75_PLL_DONE 0x3
+#define PCIE_PHY_TRSV_REG5 0x414
+#define PCIE_PHY_TRSV_REG5_GEN1_DEEMP 0x2D
+#define PCIE_PHY_TRSV_REG6 0x418
+#define PCIE_PHY_TRSV_REG6_GEN2_DEEMP 0xF
+
#define PHY_RX_OVRD_IN_LO 0x1005
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
+/* iMX8 HSIO registers */
+#define IMX8QM_PHYX2_LPCG_OFFSET 0x00000
+#define IMX8QM_PHYX2_LPCG_PCLK0_MASK GENMASK(17, 16)
+#define IMX8QM_PHYX2_LPCG_PCLK1_MASK GENMASK(21, 20)
+#define IMX8QM_CSR_PHYX2_OFFSET 0x90000
+#define IMX8QM_CSR_PHYX1_OFFSET 0xA0000
+#define IMX8QM_CSR_PHYX_STTS0_OFFSET 0x4
+#define IMX8QM_CSR_PCIEA_OFFSET 0xB0000
+#define IMX8QM_CSR_PCIEB_OFFSET 0xC0000
+#define IMX8QM_CSR_PCIE_CTRL1_OFFSET 0x4
+#define IMX8QM_CSR_PCIE_CTRL2_OFFSET 0x8
+#define IMX8QM_CSR_PCIE_STTS0_OFFSET 0xC
+#define IMX8QM_CSR_MISC_OFFSET 0xE0000
+
+#define IMX8QM_CTRL_LTSSM_ENABLE BIT(4)
+#define IMX8QM_CTRL_READY_ENTR_L23 BIT(5)
+#define IMX8QM_CTRL_PM_XMT_TURNOFF BIT(9)
+#define IMX8QM_CTRL_BUTTON_RST_N BIT(21)
+#define IMX8QM_CTRL_PERST_N BIT(22)
+#define IMX8QM_CTRL_POWER_UP_RST_N BIT(23)
+
+#define IMX8QM_CTRL_STTS0_PM_LINKST_IN_L2 BIT(13)
+#define IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST BIT(19)
+#define IMX8QM_STTS0_LANE0_TX_PLL_LOCK BIT(4)
+#define IMX8QM_STTS0_LANE1_TX_PLL_LOCK BIT(12)
+
+#define IMX8QM_PCIE_TYPE_MASK GENMASK(27, 24)
+
+#define IMX8QM_PHYX2_CTRL0_APB_MASK 0x3
+#define IMX8QM_PHY_APB_RSTN_0 BIT(0)
+#define IMX8QM_PHY_APB_RSTN_1 BIT(1)
+
+#define IMX8QM_MISC_IOB_RXENA BIT(0)
+#define IMX8QM_MISC_IOB_TXENA BIT(1)
+#define IMX8QM_CSR_MISC_IOB_A_0_TXOE BIT(2)
+#define IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK (0x3 << 3)
+#define IMX8QM_CSR_MISC_IOB_A_0_M1M0_2 BIT(4)
+#define IMX8QM_MISC_PHYX1_EPCS_SEL BIT(12)
+#define IMX8QM_MISC_PCIE_AB_SELECT BIT(13)
+#define IMX8QM_MISC_CLKREQ_1 BIT(22)
+#define IMX8QM_MISC_CLKREQ_0 BIT(23)
+#define IMX8QM_MISC_CLKREQ_OVERRIDE_EN_1 BIT(24)
+#define IMX8QM_MISC_CLKREQ_OVERRIDE_EN_0 BIT(25)
+
+#define IMX8MM_GPR_PCIE_REF_CLK_SEL (0x3 << 24)
+#define IMX8MM_GPR_PCIE_REF_CLK_PLL (0x3 << 24)
+#define IMX8MM_GPR_PCIE_REF_CLK_EXT (0x2 << 24)
+#define IMX8MM_GPR_PCIE_AUX_EN BIT(19)
+#define IMX8MM_GPR_PCIE_CMN_RST BIT(18)
+#define IMX8MM_GPR_PCIE_POWER_OFF BIT(17)
+#define IMX8MM_GPR_PCIE_SSC_EN BIT(16)
+
+static int imx6_pcie_cz_enabled;
+static void imx6_pcie_ltssm_disable(struct device *dev);
+
+static bool imx6_pcie_readable_reg(struct device *dev, unsigned int reg)
+{
+ enum imx6_pcie_variants variant;
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ variant = imx6_pcie->drvdata->variant;
+ if (variant == IMX8QXP || variant == IMX8QXP_EP) {
+ switch (reg) {
+ case IMX8QM_CSR_PHYX1_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET:
+ case IMX8QM_CSR_MISC_OFFSET:
+ case IMX8QM_CSR_PHYX1_OFFSET + IMX8QM_CSR_PHYX_STTS0_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL1_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_STTS0_OFFSET:
+ return true;
+
+ default:
+ return false;
+ }
+ } else {
+ switch (reg) {
+ case IMX8QM_PHYX2_LPCG_OFFSET:
+ case IMX8QM_CSR_PHYX2_OFFSET:
+ case IMX8QM_CSR_PHYX1_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET:
+ case IMX8QM_CSR_MISC_OFFSET:
+ case IMX8QM_CSR_PHYX2_OFFSET + IMX8QM_CSR_PHYX_STTS0_OFFSET:
+ case IMX8QM_CSR_PHYX1_OFFSET + IMX8QM_CSR_PHYX_STTS0_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL1_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_STTS0_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL1_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_STTS0_OFFSET:
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
+static bool imx6_pcie_writeable_reg(struct device *dev, unsigned int reg)
+{
+ enum imx6_pcie_variants variant;
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ variant = imx6_pcie->drvdata->variant;
+ if (variant == IMX8QXP || variant == IMX8QXP_EP) {
+ switch (reg) {
+ case IMX8QM_CSR_PHYX1_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET:
+ case IMX8QM_CSR_MISC_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL1_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET:
+ return true;
+
+ default:
+ return false;
+ }
+ } else {
+ switch (reg) {
+ case IMX8QM_PHYX2_LPCG_OFFSET:
+ case IMX8QM_CSR_PHYX2_OFFSET:
+ case IMX8QM_CSR_PHYX1_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET:
+ case IMX8QM_CSR_MISC_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL1_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL1_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET:
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
+static const struct regmap_config imx6_pcie_regconfig = {
+ .max_register = IMX8QM_CSR_MISC_OFFSET,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .num_reg_defaults_raw = IMX8QM_CSR_MISC_OFFSET / sizeof(uint32_t) + 1,
+ .readable_reg = imx6_pcie_readable_reg,
+ .writeable_reg = imx6_pcie_writeable_reg,
+ .cache_type = REGCACHE_NONE,
+};
+
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -294,8 +521,15 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
unsigned int fsr, struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
- unsigned long instr = *(unsigned long *)pc;
- int reg = (instr >> 12) & 15;
+ unsigned long instr;
+ int reg;
+
+ /* if the abort from user-space, just return and report it */
+ if (user_mode(regs))
+ return 1;
+
+ instr = *(unsigned long *)pc;
+ reg = (instr >> 12) & 15;
/*
* If the instruction being executed was a read,
@@ -324,10 +558,43 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
}
#endif
+static void imx6_pcie_detach_pd(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ if (imx6_pcie->pd_hsio_link && !IS_ERR(imx6_pcie->pd_hsio_link))
+ device_link_del(imx6_pcie->pd_hsio_link);
+ if (imx6_pcie->pd_hsio_gpio && !IS_ERR(imx6_pcie->pd_hsio_gpio))
+ dev_pm_domain_detach(imx6_pcie->pd_hsio_gpio, true);
+ if (imx6_pcie->pd_phy_link && !IS_ERR(imx6_pcie->pd_phy_link))
+ device_link_del(imx6_pcie->pd_phy_link);
+ if (imx6_pcie->pd_pcie_phy && !IS_ERR(imx6_pcie->pd_pcie_phy))
+ dev_pm_domain_detach(imx6_pcie->pd_pcie_phy, true);
+ if (imx6_pcie->pd_per_link && !IS_ERR(imx6_pcie->pd_per_link))
+ device_link_del(imx6_pcie->pd_per_link);
+ if (imx6_pcie->pd_pcie_per && !IS_ERR(imx6_pcie->pd_pcie_per))
+ dev_pm_domain_detach(imx6_pcie->pd_pcie_per, true);
+ if (imx6_pcie->pd_link && !IS_ERR(imx6_pcie->pd_link))
+ device_link_del(imx6_pcie->pd_link);
+ if (imx6_pcie->pd_pcie && !IS_ERR(imx6_pcie->pd_pcie))
+ dev_pm_domain_detach(imx6_pcie->pd_pcie, true);
+
+ imx6_pcie->pd_hsio_gpio = NULL;
+ imx6_pcie->pd_hsio_link = NULL;
+ imx6_pcie->pd_pcie_phy = NULL;
+ imx6_pcie->pd_phy_link = NULL;
+ imx6_pcie->pd_pcie_per = NULL;
+ imx6_pcie->pd_per_link = NULL;
+ imx6_pcie->pd_pcie = NULL;
+ imx6_pcie->pd_link = NULL;
+}
+
static int imx6_pcie_attach_pd(struct device *dev)
{
+ int ret = 0;
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
struct device_link *link;
+ struct device *pd_dev;
/* Do nothing when in a single power domain */
if (dev->pm_domain)
@@ -346,11 +613,15 @@ static int imx6_pcie_attach_pd(struct device *dev)
if (!link) {
dev_err(dev, "Failed to add device_link to pcie pd.\n");
return -EINVAL;
+ } else {
+ imx6_pcie->pd_link = link;
}
imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pd_pcie_phy))
- return PTR_ERR(imx6_pcie->pd_pcie_phy);
+ if (IS_ERR(imx6_pcie->pd_pcie_phy)) {
+ ret = PTR_ERR(imx6_pcie->pd_pcie_phy);
+ goto err_ret;
+ }
link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
DL_FLAG_STATELESS |
@@ -358,56 +629,77 @@ static int imx6_pcie_attach_pd(struct device *dev)
DL_FLAG_RPM_ACTIVE);
if (!link) {
dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_ret;
+ } else {
+ imx6_pcie->pd_phy_link = link;
}
- return 0;
-}
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8QM:
+ case IMX8QM_EP:
+ /*
+ * PCIA CSR would be touched during the initialization of the
+ * PCIEB of 8QM.
+ * Enable the PCIEA PD for this case here.
+ */
+ if (imx6_pcie->controller_id) {
+ pd_dev = dev_pm_domain_attach_by_name(dev, "pcie_per");
+ if (IS_ERR(pd_dev)) {
+ ret = PTR_ERR(pd_dev);
+ goto err_ret;
+ } else {
+ imx6_pcie->pd_pcie_per = pd_dev;
+ }
+ link = device_link_add(dev, imx6_pcie->pd_pcie_per,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(dev, "Failed to link pcie_per pd\n");
+ ret = -EINVAL;
+ goto err_ret;
+ } else {
+ imx6_pcie->pd_per_link = link;
+ }
+ }
+ fallthrough;
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ pd_dev = dev_pm_domain_attach_by_name(dev, "hsio_gpio");
+ if (IS_ERR(pd_dev)) {
+ ret = PTR_ERR(pd_dev);
+ goto err_ret;
+ } else {
+ imx6_pcie->pd_hsio_gpio = pd_dev;
+ }
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
-{
- struct device *dev = imx6_pcie->pci->dev;
+ link = device_link_add(dev, imx6_pcie->pd_hsio_gpio,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to hsio_gpio pd.\n");
+ ret = -EINVAL;
+ goto err_ret;
+ } else {
+ imx6_pcie->pd_hsio_link = link;
+ }
- switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- case IMX8MQ:
- reset_control_assert(imx6_pcie->pciephy_reset);
- reset_control_assert(imx6_pcie->apps_reset);
break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
- break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+ default:
break;
}
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- int ret = regulator_disable(imx6_pcie->vpcie);
+ return 0;
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
+err_ret:
+ imx6_pcie_detach_pd(dev);
+ return ret;
}
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
@@ -420,6 +712,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
+ case IMX6SX_EP:
ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
if (ret) {
dev_err(dev, "unable to enable pcie_axi clock\n");
@@ -430,7 +723,9 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
break;
case IMX6QP:
+ case IMX6QP_EP:
case IMX6Q:
+ case IMX6Q_EP:
/* power up core phy and enable ref clock */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
@@ -445,8 +740,14 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
break;
case IMX7D:
+ case IMX7D_EP:
break;
case IMX8MQ:
+ case IMX8MM:
+ case IMX8MP:
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
+ case IMX8MP_EP:
ret = clk_prepare_enable(imx6_pcie->pcie_aux);
if (ret) {
dev_err(dev, "unable to enable pcie_aux clock\n");
@@ -465,9 +766,66 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
break;
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ case IMX8QM:
+ case IMX8QM_EP:
+ ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_axi clock\n");
+ return ret;
+ }
+ ret = clk_prepare_enable(imx6_pcie->pcie_per);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_per clock\n");
+ goto err_pcie_per;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->phy_per);
+ if (unlikely(ret)) {
+ dev_err(dev, "unable to enable phy per clock\n");
+ goto err_phy_per;
+ }
+ ret = clk_prepare_enable(imx6_pcie->misc_per);
+ if (unlikely(ret)) {
+ dev_err(dev, "unable to enable misc per clock\n");
+ goto err_misc_per;
+ }
+ /*
+ * PCIA CSR would be touched during the initialization of the
+ * PCIEB of 8QM.
+ * Enable the PCIEA peripheral clock for this case here.
+ */
+ if (imx6_pcie->drvdata->variant == IMX8QM
+ && imx6_pcie->controller_id == 1) {
+ ret = clk_prepare_enable(imx6_pcie->pcie_phy_pclk);
+ if (unlikely(ret)) {
+ dev_err(dev, "can't enable pciephyp clock\n");
+ goto err_pcie_phy_pclk;
+ }
+ ret = clk_prepare_enable(imx6_pcie->pciex2_per);
+ if (unlikely(ret)) {
+ dev_err(dev, "can't enable pciex2 per clock\n");
+ goto err_pciex2_per;
+ }
+ }
+ break;
+ default:
+ break;
}
return ret;
+err_pciex2_per:
+ clk_disable_unprepare(imx6_pcie->pcie_phy_pclk);
+err_pcie_phy_pclk:
+ clk_disable_unprepare(imx6_pcie->misc_per);
+err_misc_per:
+ clk_disable_unprepare(imx6_pcie->phy_per);
+err_phy_per:
+ clk_disable_unprepare(imx6_pcie->pcie_per);
+err_pcie_per:
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ return ret;
}
static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
@@ -483,62 +841,468 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
dev_err(dev, "PCIe PLL lock timeout\n");
}
-static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static void imx8_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
{
+ u32 val, offset = 0, cond = 0;
+ int ret;
struct dw_pcie *pci = imx6_pcie->pci;
struct device *dev = pci->dev;
- int ret;
- if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
- ret = regulator_enable(imx6_pcie->vpcie);
- if (ret) {
- dev_err(dev, "failed to enable vpcie regulator: %d\n",
- ret);
- return;
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MP:
+ case IMX8MP_EP:
+ if (phy_init(imx6_pcie->phy) != 0)
+ dev_err(dev, "Waiting for PHY PLL ready timeout!\n");
+ /* wait for core_clk enabled */
+ for (cond = 0; cond < 2000; cond++) {
+ val = readl(imx6_pcie->hsmix_base + IMX8MP_GPR_REG1);
+ if (val & IMX8MP_GPR_REG1_PM_EN_CORE_CLK)
+ break;
+ udelay(10);
+ }
+ if (cond >= 2000)
+ ret = -ETIMEDOUT;
+ break;
+ case IMX8MM:
+ case IMX8MM_EP:
+ for (cond = 0; cond < 2000; cond++) {
+ val = readl(imx6_pcie->phy_base + PCIE_PHY_CMN_REG75);
+ if (val == PCIE_PHY_CMN_REG75_PLL_DONE)
+ break;
+ udelay(10);
}
+ if (cond >= 2000)
+ ret = -ETIMEDOUT;
+ break;
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ case IMX8QM:
+ case IMX8QM_EP:
+ if (imx6_pcie->hsio_cfg == PCIEAX1PCIEBX1SATA) {
+ if (imx6_pcie->controller_id == 0) /* pciea 1 lanes */
+ cond = IMX8QM_STTS0_LANE0_TX_PLL_LOCK;
+ else /* pcieb 1 lanes */
+ cond = IMX8QM_STTS0_LANE1_TX_PLL_LOCK;
+ offset = IMX8QM_CSR_PHYX2_OFFSET + 0x4;
+ } else if (imx6_pcie->hsio_cfg == PCIEAX2PCIEBX1) {
+ offset = IMX8QM_CSR_PHYX2_OFFSET
+ + imx6_pcie->controller_id * SZ_64K
+ + IMX8QM_CSR_PHYX_STTS0_OFFSET;
+ cond = IMX8QM_STTS0_LANE0_TX_PLL_LOCK;
+ if (imx6_pcie->controller_id == 0) /* pciea 2 lanes */
+ cond |= IMX8QM_STTS0_LANE1_TX_PLL_LOCK;
+ } else if (imx6_pcie->hsio_cfg == PCIEAX2SATA) {
+ offset = IMX8QM_CSR_PHYX2_OFFSET + 0x4;
+ cond = IMX8QM_STTS0_LANE0_TX_PLL_LOCK;
+ cond |= IMX8QM_STTS0_LANE1_TX_PLL_LOCK;
+ }
+
+ ret = regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr, offset,
+ val, val & cond,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT);
+ break;
+ default:
+ break;
}
+ if (ret)
+ dev_err(dev, "PCIe PLL lock timeout\n");
+ else
+ dev_info(dev, "PCIe PLL is locked.\n");
+}
+
+static void imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
+{
+ int ret;
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
- if (ret) {
+ if (ret)
dev_err(dev, "unable to enable pcie_phy clock\n");
- goto err_pcie_phy;
- }
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
- if (ret) {
+ if (ret)
dev_err(dev, "unable to enable pcie_bus clock\n");
- goto err_pcie_bus;
- }
ret = clk_prepare_enable(imx6_pcie->pcie);
- if (ret) {
+ if (ret)
dev_err(dev, "unable to enable pcie clock\n");
- goto err_pcie;
- }
ret = imx6_pcie_enable_ref_clk(imx6_pcie);
- if (ret) {
+ if (ret)
dev_err(dev, "unable to enable pcie ref clock\n");
- goto err_ref_clk;
- }
/* allow the clocks to stabilize */
usleep_range(200, 500);
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+ clk_disable_unprepare(imx6_pcie->pcie);
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6Q:
+ case IMX6Q_EP:
+ case IMX6QP:
+ case IMX6QP_EP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD,
+ IMX6Q_GPR1_PCIE_TEST_PD);
+ break;
+ case IMX6SX:
+ case IMX6SX_EP:
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ break;
+ case IMX7D:
+ case IMX7D_EP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ break;
+ case IMX8MP:
+ case IMX8MP_EP:
+ phy_exit(imx6_pcie->phy);
+ phy_power_off(imx6_pcie->phy);
+ fallthrough;
+ case IMX8MQ:
+ case IMX8MM:
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
+ clk_disable_unprepare(imx6_pcie->pcie_aux);
+ break;
+ case IMX8QM:
+ case IMX8QM_EP:
+ if (imx6_pcie->controller_id == 1) {
+ clk_disable_unprepare(imx6_pcie->pciex2_per);
+ clk_disable_unprepare(imx6_pcie->pcie_phy_pclk);
+ }
+ fallthrough;
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ clk_disable_unprepare(imx6_pcie->pcie_per);
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ clk_disable_unprepare(imx6_pcie->phy_per);
+ clk_disable_unprepare(imx6_pcie->misc_per);
+ break;
+ default:
+ break;
+ }
+}
+
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+ u32 val;
+ int i;
+ struct device *dev = imx6_pcie->pci->dev;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX7D:
+ case IMX7D_EP:
+ case IMX8MQ:
+ case IMX8MM:
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ fallthrough;
+ case IMX8MP:
+ case IMX8MP_EP:
+ imx6_pcie_ltssm_disable(dev);
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ reset_control_assert(imx6_pcie->pciephy_perst);
+ break;
+ case IMX6SX:
+ case IMX6SX_EP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET);
+ break;
+ case IMX6QP:
+ case IMX6QP_EP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST,
+ IMX6Q_GPR1_PCIE_SW_RST);
+ break;
+ case IMX6Q:
+ case IMX6Q_EP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+ break;
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ imx6_pcie_clk_enable(imx6_pcie);
+ /*
+ * Set the over ride low and enabled
+ * make sure that REF_CLK is turned on.
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_CLKREQ_1,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_CLKREQ_OVERRIDE_EN_1,
+ IMX8QM_MISC_CLKREQ_OVERRIDE_EN_1);
+ val = IMX8QM_CSR_PCIEB_OFFSET;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_BUTTON_RST_N,
+ IMX8QM_CTRL_BUTTON_RST_N);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_PERST_N,
+ IMX8QM_CTRL_PERST_N);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_POWER_UP_RST_N,
+ IMX8QM_CTRL_POWER_UP_RST_N);
+ break;
+ case IMX8QM:
+ case IMX8QM_EP:
+ imx6_pcie_clk_enable(imx6_pcie);
+ /*
+ * Set the over ride low and enabled
+ * make sure that REF_CLK is turned on.
+ */
+ if (imx6_pcie->controller_id) {
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_CLKREQ_1,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_CLKREQ_OVERRIDE_EN_1,
+ IMX8QM_MISC_CLKREQ_OVERRIDE_EN_1);
+ } else {
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_CLKREQ_0,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_CLKREQ_OVERRIDE_EN_0,
+ IMX8QM_MISC_CLKREQ_OVERRIDE_EN_0);
+ }
+ for (i = 0; i <= imx6_pcie->controller_id; i++) {
+ val = IMX8QM_CSR_PCIEA_OFFSET + i * SZ_64K;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_BUTTON_RST_N,
+ IMX8QM_CTRL_BUTTON_RST_N);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_PERST_N,
+ IMX8QM_CTRL_PERST_N);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_POWER_UP_RST_N,
+ IMX8QM_CTRL_POWER_UP_RST_N);
+ }
+ break;
+ }
+
+ if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
+ int ret = regulator_disable(imx6_pcie->vpcie);
+
+ if (ret)
+ dev_err(dev, "failed to disable vpcie regulator: %d\n",
+ ret);
+ }
+}
+
+static void imx6_pcie_set_l1_latency(struct imx6_pcie *imx6_pcie)
+{
+ u32 val;
+ struct dw_pcie *pci = imx6_pcie->pci;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MM:
+ case IMX8MP:
+ /*
+ * Configure the L1 latency of rc to less than 64us
+ * Otherwise, the L1/L1SUB wouldn't be enable by ASPM.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = readl(pci->dbi_base + SZ_1M +
+ IMX8MQ_PCIE_LINK_CAP_REG_OFFSET);
+ val &= ~PCI_EXP_LNKCAP_L1EL;
+ val |= IMX8MQ_PCIE_LINK_CAP_L1EL_64US;
+ writel(val, pci->dbi_base + SZ_1M +
+ IMX8MQ_PCIE_LINK_CAP_REG_OFFSET);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ break;
+ default:
+ break;
+ }
+}
+
+static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+ int ret, i;
+ u32 val, tmp;
+
+ if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
+ ret = regulator_enable(imx6_pcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie regulator: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ case IMX8QM:
+ case IMX8QM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ /* ClKs had been enabled */
+ break;
+ default:
+ imx6_pcie_clk_enable(imx6_pcie);
+ break;
+ }
/* Some boards don't have PCIe reset GPIO. */
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
gpio_set_value_cansleep(imx6_pcie->reset_gpio,
imx6_pcie->gpio_active_high);
- msleep(100);
+ msleep(20);
gpio_set_value_cansleep(imx6_pcie->reset_gpio,
!imx6_pcie->gpio_active_high);
}
switch (imx6_pcie->drvdata->variant) {
+ case IMX8QM:
+ case IMX8QM_EP:
+ if (imx6_pcie->controller_id)
+ /* Set the APB clock masks */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_PHYX2_LPCG_OFFSET,
+ IMX8QM_PHYX2_LPCG_PCLK0_MASK |
+ IMX8QM_PHYX2_LPCG_PCLK1_MASK,
+ IMX8QM_PHYX2_LPCG_PCLK0_MASK |
+ IMX8QM_PHYX2_LPCG_PCLK1_MASK);
+ fallthrough;
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ val = IMX8QM_CSR_PCIEA_OFFSET
+ + imx6_pcie->controller_id * SZ_64K;
+ /* bit19 PM_REQ_CORE_RST of pciex#_stts0 should be cleared. */
+ for (i = 0; i < 2000; i++) {
+ regmap_read(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_STTS0_OFFSET,
+ &tmp);
+ if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) == 0)
+ break;
+ udelay(10);
+ }
+
+ if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) != 0)
+ dev_err(dev, "ERROR PM_REQ_CORE_RST is still set.\n");
+
+ /* wait for phy pll lock firstly. */
+ imx8_pcie_wait_for_phy_pll_lock(imx6_pcie);
+ break;
case IMX8MQ:
+ case IMX8MM:
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
reset_control_deassert(imx6_pcie->pciephy_reset);
+
+ imx8_pcie_wait_for_phy_pll_lock(imx6_pcie);
+
+ if (imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_L1SS)
+ /*
+ * Configure the CLK_REQ# high, let the L1SS
+ * automatically controlled by HW later.
+ */
+ reset_control_deassert(imx6_pcie->clkreq_reset);
+ imx6_pcie_set_l1_latency(imx6_pcie);
+ break;
+ case IMX8MP:
+ case IMX8MP_EP:
+ msleep(20);
+ reset_control_deassert(imx6_pcie->pciephy_reset);
+ reset_control_deassert(imx6_pcie->pciephy_perst);
+
+ /* release pcie_phy_apb_reset and pcie_phy_init_resetn */
+ val = readl(imx6_pcie->hsmix_base + IMX8MP_GPR_REG0);
+ val |= IMX8MP_GPR_REG0_PHY_APB_RST;
+ val |= IMX8MP_GPR_REG0_PHY_INIT_RST;
+ writel(val, imx6_pcie->hsmix_base + IMX8MP_GPR_REG0);
+
+ val = imx6_pcie_grp_offset(imx6_pcie);
+ if (imx6_pcie->ext_osc) {
+ /*TODO Configure the external OSC as REF clock */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_REF_SEL_MASK,
+ IMX8MP_GPR_PCIE_REF_SEL_MASK);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_AUX_EN,
+ IMX8MP_GPR_PCIE_AUX_EN);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_SSC_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_PWR_OFF, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_CMN_RSTN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_REF_SEL_MASK,
+ IMX8MP_GPR_PCIE_REF_EXT_OSC);
+ } else {
+ /* Configure the internal PLL as REF clock */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_REF_SEL_MASK,
+ IMX8MP_GPR_PCIE_REF_PLL_SYS);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_AUX_EN,
+ IMX8MP_GPR_PCIE_AUX_EN);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_SSC_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_PWR_OFF, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_CMN_RSTN, 0);
+ }
+
+ phy_calibrate(imx6_pcie->phy);
+ /*
+ * GPR_PCIE_PHY_CTRL_BUS[3:0]
+ * 0:i_ssc_en 1:i_power_off
+ * 2:i_cmn_rstn 3:aux_en_glue.ctrl_bus
+ */
+ val = imx6_pcie_grp_offset(imx6_pcie);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MP_GPR_PCIE_CMN_RSTN,
+ IMX8MP_GPR_PCIE_CMN_RSTN);
+
+ imx8_pcie_wait_for_phy_pll_lock(imx6_pcie);
+
+ if (imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_L1SS)
+ /*
+ * Configure the CLK_REQ# high, let the L1SS
+ * automatically controlled by HW later.
+ */
+ reset_control_deassert(imx6_pcie->clkreq_reset);
+ imx6_pcie_set_l1_latency(imx6_pcie);
break;
case IMX7D:
+ case IMX7D_EP:
reset_control_deassert(imx6_pcie->pciephy_reset);
/* Workaround for ERR010728, failure of PCI-e PLL VCO to
@@ -563,58 +1327,248 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
break;
case IMX6SX:
+ case IMX6SX_EP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
break;
case IMX6QP:
+ case IMX6QP_EP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_SW_RST, 0);
usleep_range(200, 500);
break;
case IMX6Q: /* Nothing to do */
+ case IMX6Q_EP:
break;
}
return;
-
-err_ref_clk:
- clk_disable_unprepare(imx6_pcie->pcie);
-err_pcie:
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-err_pcie_bus:
- clk_disable_unprepare(imx6_pcie->pcie_phy);
-err_pcie_phy:
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- ret = regulator_disable(imx6_pcie->vpcie);
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
}
static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
{
- unsigned int mask, val;
+ unsigned int addr, mask, val, mode;
+ unsigned int variant = imx6_pcie->drvdata->variant;
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
- if (imx6_pcie->drvdata->variant == IMX8MQ &&
- imx6_pcie->controller_id == 1) {
- mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
- val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
- } else {
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
+ mode = imx6_pcie->drvdata->mode;
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ mode = PCI_EXP_TYPE_ROOT_PORT;
+ break;
+ case DW_PCIE_EP_TYPE:
+ mode = PCI_EXP_TYPE_ENDPOINT;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
}
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+ switch (variant) {
+ case IMX8QM:
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ case IMX8QM_EP:
+ if (imx6_pcie->controller_id)
+ addr = IMX8QM_CSR_PCIEB_OFFSET;
+ else
+ addr = IMX8QM_CSR_PCIEA_OFFSET;
+ mask = IMX8QM_PCIE_TYPE_MASK;
+ val = FIELD_PREP(IMX8QM_PCIE_TYPE_MASK, mode);
+ break;
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ if (imx6_pcie->controller_id == 1) {
+ addr = IOMUXC_GPR12;
+ mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
+ val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, mode);
+ } else {
+ addr = IOMUXC_GPR12;
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
+ }
+ break;
+ default:
+ addr = IOMUXC_GPR12;
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
+ break;
+ }
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, addr, mask, val);
}
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
+ int i;
+ unsigned int offset, val;
+
switch (imx6_pcie->drvdata->variant) {
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ case IMX8QM:
+ case IMX8QM_EP:
+ if (imx6_pcie->hsio_cfg == PCIEAX2SATA) {
+ /*
+ * bit 0 rx ena 1.
+ * bit12 PHY_X1_EPCS_SEL 1.
+ * bit13 phy_ab_select 0.
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_PHYX2_OFFSET,
+ IMX8QM_PHYX2_CTRL0_APB_MASK,
+ IMX8QM_PHY_APB_RSTN_0
+ | IMX8QM_PHY_APB_RSTN_1);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PHYX1_EPCS_SEL,
+ IMX8QM_MISC_PHYX1_EPCS_SEL);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PCIE_AB_SELECT,
+ 0);
+ } else if (imx6_pcie->hsio_cfg == PCIEAX1PCIEBX1SATA) {
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_PHYX2_OFFSET,
+ IMX8QM_PHYX2_CTRL0_APB_MASK,
+ IMX8QM_PHY_APB_RSTN_0
+ | IMX8QM_PHY_APB_RSTN_1);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PHYX1_EPCS_SEL,
+ IMX8QM_MISC_PHYX1_EPCS_SEL);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PCIE_AB_SELECT,
+ IMX8QM_MISC_PCIE_AB_SELECT);
+ } else if (imx6_pcie->hsio_cfg == PCIEAX2PCIEBX1) {
+ /*
+ * bit 0 rx ena 1.
+ * bit12 PHY_X1_EPCS_SEL 0.
+ * bit13 phy_ab_select 1.
+ */
+ if (imx6_pcie->controller_id)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_PHYX1_OFFSET,
+ IMX8QM_PHY_APB_RSTN_0,
+ IMX8QM_PHY_APB_RSTN_0);
+ else
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_PHYX2_OFFSET,
+ IMX8QM_PHYX2_CTRL0_APB_MASK,
+ IMX8QM_PHY_APB_RSTN_0
+ | IMX8QM_PHY_APB_RSTN_1);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PHYX1_EPCS_SEL,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PCIE_AB_SELECT,
+ IMX8QM_MISC_PCIE_AB_SELECT);
+ }
+
+ if (imx6_pcie->ext_osc) {
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_IOB_RXENA,
+ IMX8QM_MISC_IOB_RXENA);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_IOB_TXENA,
+ 0);
+ } else {
+ /* Try to used the internal pll as ref clk */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_IOB_RXENA,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_IOB_TXENA,
+ IMX8QM_MISC_IOB_TXENA);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_CSR_MISC_IOB_A_0_TXOE
+ | IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK,
+ IMX8QM_CSR_MISC_IOB_A_0_TXOE
+ | IMX8QM_CSR_MISC_IOB_A_0_M1M0_2);
+ }
+
+ break;
+ case IMX8MM:
+ case IMX8MM_EP:
+ offset = imx6_pcie_grp_offset(imx6_pcie);
+
+ dev_info(imx6_pcie->pci->dev, "%s REF_CLK is used!.\n",
+ imx6_pcie->ext_osc ? "EXT" : "PLL");
+ if (imx6_pcie->ext_osc) {
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_AUX_EN,
+ IMX8MM_GPR_PCIE_AUX_EN);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_POWER_OFF, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_SSC_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL,
+ IMX8MM_GPR_PCIE_REF_CLK_EXT);
+ udelay(100);
+ /* Do the PHY common block reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_CMN_RST,
+ IMX8MM_GPR_PCIE_CMN_RST);
+ udelay(200);
+ } else {
+ /* Configure the internal PLL as REF clock */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_AUX_EN,
+ IMX8MM_GPR_PCIE_AUX_EN);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_POWER_OFF, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_SSC_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL,
+ IMX8MM_GPR_PCIE_REF_CLK_PLL);
+ udelay(100);
+ /* Configure the PHY */
+ writel(PCIE_PHY_CMN_REG62_PLL_CLK_OUT,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG62);
+ writel(PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG64);
+ /* Do the PHY common block reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_CMN_RST,
+ IMX8MM_GPR_PCIE_CMN_RST);
+ udelay(200);
+ }
+
+ /*
+ * In order to pass the compliance tests.
+ * Configure the TRSV regiser of iMX8MM PCIe PHY.
+ */
+ writel(PCIE_PHY_TRSV_REG5_GEN1_DEEMP,
+ imx6_pcie->phy_base + PCIE_PHY_TRSV_REG5);
+ writel(PCIE_PHY_TRSV_REG6_GEN2_DEEMP,
+ imx6_pcie->phy_base + PCIE_PHY_TRSV_REG6);
+
+ break;
case IMX8MQ:
+ case IMX8MQ_EP:
/*
* TODO: Currently this code assumes external
* oscillator is being used
@@ -635,11 +1589,62 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
IMX8MQ_GPR_PCIE_VREG_BYPASS,
0);
break;
+ case IMX8MP:
+ case IMX8MP_EP:
+ phy_power_on(imx6_pcie->phy);
+ dev_info(imx6_pcie->pci->dev, "%s REF_CLK is used!.\n",
+ imx6_pcie->ext_osc ? "EXT" : "PLL");
+ imx6_pcie_clk_enable(imx6_pcie);
+
+ /* Set P=12,M=800,S=4 and must set ICP=2'b01. */
+ val = readl(imx6_pcie->hsmix_base + IMX8MP_GPR_REG2);
+ val &= ~IMX8MP_GPR_REG2_P_PLL_MASK;
+ val |= IMX8MP_GPR_REG2_P_PLL;
+ val &= ~IMX8MP_GPR_REG2_M_PLL_MASK;
+ val |= IMX8MP_GPR_REG2_M_PLL;
+ val &= ~IMX8MP_GPR_REG2_S_PLL_MASK;
+ val |= IMX8MP_GPR_REG2_S_PLL;
+ writel(val, imx6_pcie->hsmix_base + IMX8MP_GPR_REG2);
+ /* wait greater than 1/F_FREF =1/2MHZ=0.5us */
+ udelay(1);
+
+ val = readl(imx6_pcie->hsmix_base + IMX8MP_GPR_REG3);
+ val |= IMX8MP_GPR_REG3_PLL_RST;
+ writel(val, imx6_pcie->hsmix_base + IMX8MP_GPR_REG3);
+ udelay(10);
+
+ /* Set 1 to pll_cke of GPR_REG3 */
+ val = readl(imx6_pcie->hsmix_base + IMX8MP_GPR_REG3);
+ val |= IMX8MP_GPR_REG3_PLL_CKE;
+ writel(val, imx6_pcie->hsmix_base + IMX8MP_GPR_REG3);
+
+ /* Lock time should be greater than 300cycle=300*0.5us=150us */
+ val = readl(imx6_pcie->hsmix_base + IMX8MP_GPR_REG1);
+ for (i = 0; i < 100; i++) {
+ val = readl(imx6_pcie->hsmix_base + IMX8MP_GPR_REG1);
+ if (val & IMX8MP_GPR_REG1_PLL_LOCK)
+ break;
+ udelay(10);
+ }
+ if (i >= 100)
+ dev_err(imx6_pcie->pci->dev,
+ "PCIe PHY PLL clock is not locked.\n");
+ else
+ dev_info(imx6_pcie->pci->dev,
+ "PCIe PHY PLL clock is locked.\n");
+
+ /* pcie_clock_module_en */
+ val = readl(imx6_pcie->hsmix_base + IMX8MP_GPR_REG0);
+ val |= IMX8MP_GPR_REG0_CLK_MOD_EN;
+ writel(val, imx6_pcie->hsmix_base + IMX8MP_GPR_REG0);
+ break;
case IMX7D:
+ case IMX7D_EP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
break;
case IMX6SX:
+ case IMX6SX_EP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK,
IMX6SX_GPR12_PCIE_RX_EQ_2);
@@ -741,20 +1746,42 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
static void imx6_pcie_ltssm_enable(struct device *dev)
{
+ u32 val;
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
switch (imx6_pcie->drvdata->variant) {
case IMX6Q:
+ case IMX6Q_EP:
case IMX6SX:
+ case IMX6SX_EP:
case IMX6QP:
+ case IMX6QP_EP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2,
IMX6Q_GPR12_PCIE_CTL_2);
break;
case IMX7D:
+ case IMX7D_EP:
case IMX8MQ:
+ case IMX8MM:
+ case IMX8MP:
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
+ case IMX8MP_EP:
reset_control_deassert(imx6_pcie->apps_reset);
break;
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ case IMX8QM:
+ case IMX8QM_EP:
+ /* Bit4 of the CTRL2 */
+ val = IMX8QM_CSR_PCIEA_OFFSET
+ + imx6_pcie->controller_id * SZ_64K;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_LTSSM_ENABLE,
+ IMX8QM_CTRL_LTSSM_ENABLE);
+ break;
}
}
@@ -766,15 +1793,23 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
u32 tmp;
int ret;
+ if (dw_pcie_link_up(pci)) {
+ dev_dbg(dev, "link is already up\n");
+ return 0;
+ }
+
+ dw_pcie_dbi_ro_wr_en(pci);
/*
* Force Gen1 operation when starting the link. In case the link is
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
- tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
- tmp &= ~PCI_EXP_LNKCAP_SLS;
- tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
+ if (!imx6_pcie_cz_enabled) {
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
+ }
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
@@ -783,11 +1818,21 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
if (ret)
goto err_reset_phy;
- if (pci->link_gen == 2) {
+ if (pci->link_gen >= 2) {
+ /* Fill up target link speed before speed change. */
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= pci->link_gen;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, tmp);
+
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp &= ~PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+
/* Allow Gen2 mode after the link is up. */
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
- tmp |= PCI_EXP_LNKCAP_SLS_5_0GB;
+ tmp |= pci->link_gen;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
@@ -826,60 +1871,306 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dev_info(dev, "Link: Gen2 disabled\n");
}
+ dw_pcie_dbi_ro_wr_dis(pci);
tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
+ msleep(100);
return 0;
err_reset_phy:
+ dw_pcie_dbi_ro_wr_dis(pci);
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
imx6_pcie_reset_phy(imx6_pcie);
+ if (!imx6_pcie_cz_enabled) {
+ imx6_pcie_clk_disable(imx6_pcie);
+ if (imx6_pcie->vpcie != NULL)
+ regulator_disable(imx6_pcie->vpcie);
+ if (imx6_pcie->epdev_on != NULL)
+ regulator_disable(imx6_pcie->epdev_on);
+ if (gpio_is_valid(imx6_pcie->dis_gpio))
+ gpio_set_value_cansleep(imx6_pcie->dis_gpio, 0);
+ }
+
return ret;
}
-static int imx6_pcie_host_init(struct pcie_port *pp)
+static void pci_imx_set_msi_en(struct pcie_port *pp)
{
+ u16 val;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
- imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
- imx6_pcie_deassert_core_reset(imx6_pcie);
- imx6_setup_phy_mpll(imx6_pcie);
+ if (pci_msi_enabled()) {
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ val &= ~PCI_MSI_FLAGS_64BIT;
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+}
- return 0;
+static void imx6_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+
+ /* turn off pcie ltssm */
+ imx6_pcie_ltssm_disable(dev);
}
-static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
- .host_init = imx6_pcie_host_init,
-};
+static u64 imx6_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr)
+{
+ unsigned int offset;
+ struct dw_pcie_ep *ep = &pcie->ep;
+ struct pcie_port *pp = &pcie->pp;
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pcie);
+ struct resource_entry *entry;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_CPU_ADDR_FIXUP))
+ return cpu_addr;
+
+ if (imx6_pcie->drvdata->mode == DW_PCIE_RC_TYPE) {
+ entry = resource_list_first_type(&pp->bridge->windows,
+ IORESOURCE_MEM);
+ offset = entry->res->start;
+ } else {
+ offset = ep->phys_base;
+ }
+
+ return (cpu_addr + imx6_pcie->local_addr - offset);
+}
static const struct dw_pcie_ops dw_pcie_ops = {
.start_link = imx6_pcie_start_link,
+ .stop_link = imx6_pcie_stop_link,
+ .cpu_addr_fixup = imx6_pcie_cpu_addr_fixup,
};
-#ifdef CONFIG_PM_SLEEP
+static void imx_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ enum pci_barno bar;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * iMX8QM/iMXQXP: Bar1/3/5 are reserved.
+ */
+static const struct pci_epc_features imx8q_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
+};
+
+static const struct pci_epc_features imx8m_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_1 | 1 << BAR_3,
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features imx6q_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_0 | 1 << BAR_1 | 1 << BAR_2,
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features*
+imx_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8QM_EP:
+ case IMX8QXP_EP:
+ return &imx8q_pcie_epc_features;
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
+ case IMX8MP_EP:
+ case IMX7D_EP:
+ case IMX6SX_EP:
+ return &imx8m_pcie_epc_features;
+ default:
+ return &imx6q_pcie_epc_features;
+ }
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = imx_pcie_ep_init,
+ .raise_irq = imx_pcie_ep_raise_irq,
+ .get_features = imx_pcie_ep_get_features,
+};
+
+static int imx_add_pcie_ep(struct imx6_pcie *imx6_pcie,
+ struct platform_device *pdev)
+{
+ int ret;
+ unsigned int pcie_dbi2_offset;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
+ case IMX8MP_EP:
+ pcie_dbi2_offset = SZ_1M;
+ break;
+ default:
+ pcie_dbi2_offset = SZ_4K;
+ break;
+ }
+ pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+ ep->page_size = SZ_64K;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+ /* Start LTSSM. */
+ imx6_pcie_ltssm_enable(dev);
+
+ return 0;
+}
+
static void imx6_pcie_ltssm_disable(struct device *dev)
{
+ u32 val;
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
+ case IMX6SX_EP:
case IMX6QP:
+ case IMX6QP_EP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0);
break;
case IMX7D:
+ case IMX7D_EP:
+ case IMX8MQ:
+ case IMX8MM:
+ case IMX8MP:
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
+ case IMX8MP_EP:
reset_control_assert(imx6_pcie->apps_reset);
break;
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ case IMX8QM:
+ case IMX8QM_EP:
+ /* Bit4 of the CTRL2 */
+ val = IMX8QM_CSR_PCIEA_OFFSET
+ + imx6_pcie->controller_id * SZ_64K;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_LTSSM_ENABLE, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_READY_ENTR_L23, 0);
+ break;
default:
dev_err(dev, "ltssm_disable not supported\n");
}
}
+static ssize_t bus_freq_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ u32 bus_freq;
+
+ ret = sscanf(buf, "%x\n", &bus_freq);
+ if (ret != 1)
+ return -EINVAL;
+ if (bus_freq) {
+ dev_info(dev, "pcie request bus freq high.\n");
+ request_bus_freq(BUS_FREQ_HIGH);
+ } else {
+ dev_info(dev, "pcie release bus freq high.\n");
+ release_bus_freq(BUS_FREQ_HIGH);
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(bus_freq);
+
+static struct attribute *imx_pcie_rc_attrs[] = {
+ &dev_attr_bus_freq.attr,
+ NULL
+};
+
+static struct attribute_group imx_pcie_attrgroup = {
+ .attrs = imx_pcie_rc_attrs,
+};
+
+static void imx6_pcie_clkreq_enable(struct imx6_pcie *imx6_pcie)
+{
+ /*
+ * If the L1SS is supported, disable the over ride after link up.
+ * Let the the CLK_REQ# controlled by HW L1SS automatically.
+ */
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MM:
+ case IMX8MP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ 0);
+ break;
+ default:
+ break;
+ };
+}
+
+#ifdef CONFIG_PM_SLEEP
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
+ int i;
+ u32 dst, val;
struct device *dev = imx6_pcie->pci->dev;
/* Some variants have a turnoff reset in DT */
@@ -892,12 +2183,51 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
/* Others poke directly at IOMUXC registers */
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
+ case IMX6SX_EP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF,
IMX6SX_GPR12_PCIE_PM_TURN_OFF);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
break;
+ case IMX6QP:
+ case IMX6QP_EP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
+ break;
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ case IMX8QM:
+ case IMX8QM_EP:
+ dst = IMX8QM_CSR_PCIEA_OFFSET + imx6_pcie->controller_id * SZ_64K;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ dst + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_PM_XMT_TURNOFF,
+ IMX8QM_CTRL_PM_XMT_TURNOFF);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ dst + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_PM_XMT_TURNOFF,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ dst + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_READY_ENTR_L23,
+ IMX8QM_CTRL_READY_ENTR_L23);
+ /* check the L2 is entered or not. */
+ for (i = 0; i < 10000; i++) {
+ regmap_read(imx6_pcie->iomuxc_gpr,
+ dst + IMX8QM_CSR_PCIE_STTS0_OFFSET,
+ &val);
+ if (val & IMX8QM_CTRL_STTS0_PM_LINKST_IN_L2)
+ break;
+ udelay(10);
+ }
+ if ((val & IMX8QM_CTRL_STTS0_PM_LINKST_IN_L2) == 0)
+ dev_err(dev, "PCIE%d can't enter into L2.\n",
+ imx6_pcie->controller_id);
+ break;
default:
dev_err(dev, "PME_Turn_Off not implemented\n");
return;
@@ -914,39 +2244,27 @@ pm_turnoff_sleep:
usleep_range(1000, 10000);
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
-{
- clk_disable_unprepare(imx6_pcie->pcie);
- clk_disable_unprepare(imx6_pcie->pcie_phy);
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- case IMX8MQ:
- clk_disable_unprepare(imx6_pcie->pcie_aux);
- break;
- default:
- break;
- }
-}
-
static int imx6_pcie_suspend_noirq(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
-
- imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_clk_disable(imx6_pcie);
- imx6_pcie_ltssm_disable(dev);
+ if (unlikely(imx6_pcie->drvdata->variant == IMX6Q)) {
+ /*
+ * L2 can exit by 'reset' or Inband beacon (from remote EP)
+ * toggling phy_powerdown has same effect as 'inband beacon'
+ * So, toggle bit18 of GPR1, used as a workaround of errata
+ * ERR005723 "PCIe PCIe does not support L2 Power Down"
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD,
+ IMX6Q_GPR1_PCIE_TEST_PD);
+ } else {
+ imx6_pcie_pm_turnoff(imx6_pcie);
+ imx6_pcie_ltssm_disable(dev);
+ imx6_pcie_clk_disable(imx6_pcie);
+ }
return 0;
}
@@ -959,15 +2277,28 @@ static int imx6_pcie_resume_noirq(struct device *dev)
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
-
- imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
- imx6_pcie_deassert_core_reset(imx6_pcie);
- dw_pcie_setup_rc(pp);
-
- ret = imx6_pcie_start_link(imx6_pcie->pci);
- if (ret < 0)
- dev_info(dev, "pcie link is down after resume.\n");
+ if (unlikely(imx6_pcie->drvdata->variant == IMX6Q)) {
+ /*
+ * L2 can exit by 'reset' or Inband beacon (from remote EP)
+ * toggling phy_powerdown has same effect as 'inband beacon'
+ * So, toggle bit18 of GPR1, used as a workaround of errata
+ * ERR005723 "PCIe PCIe does not support L2 Power Down"
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 0);
+ } else {
+ imx6_pcie_assert_core_reset(imx6_pcie);
+ imx6_pcie_init_phy(imx6_pcie);
+ imx6_pcie_deassert_core_reset(imx6_pcie);
+ dw_pcie_setup_rc(pp);
+ pci_imx_set_msi_en(pp);
+
+ ret = imx6_pcie_start_link(imx6_pcie->pci);
+ if (ret < 0)
+ dev_info(dev, "pcie link is down after resume.\n");
+ if (imx6_pcie->l1ss_clkreq)
+ imx6_pcie_clkreq_enable(imx6_pcie);
+ }
return 0;
}
@@ -978,16 +2309,31 @@ static const struct dev_pm_ops imx6_pcie_pm_ops = {
imx6_pcie_resume_noirq)
};
+static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
+};
+
+static int __init imx6_pcie_compliance_test_enable(char *str)
+{
+ if (!strcmp(str, "yes")) {
+ pr_info("Enable the i.MX PCIe compliance tests mode.\n");
+ imx6_pcie_cz_enabled = 1;
+ }
+ return 1;
+}
+
+__setup("pcie_cz_enabled=", imx6_pcie_compliance_test_enable);
+
static int imx6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct imx6_pcie *imx6_pcie;
struct device_node *np;
- struct resource *dbi_base;
+ struct resource *dbi_base, *hsio_res;
struct device_node *node = dev->of_node;
+ void __iomem *iomem;
+ struct regmap_config regconfig = imx6_pcie_regconfig;
int ret;
- u16 val;
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
@@ -1019,12 +2365,70 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->phy_base);
}
+ imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx6_pcie->phy)) {
+ if (PTR_ERR(imx6_pcie->phy) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ /* Set NULL if there is no pcie-phy */
+ imx6_pcie->phy = NULL;
+ }
+
+ /* Find the HSIO MIX if one is defined, only imx8mp uses it */
+ np = of_parse_phandle(node, "fsl,imx8mp-hsio-mix", 0);
+ if (np) {
+ struct resource res;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(dev, "Unable to find HSIO MIX res\n");
+ return ret;
+ }
+ imx6_pcie->hsmix_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imx6_pcie->hsmix_base)) {
+ dev_err(dev, "Unable to map HSIO MIX res\n");
+ return PTR_ERR(imx6_pcie->hsmix_base);
+ }
+ }
+
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
+ if (of_property_read_u32(node, "hsio-cfg", &imx6_pcie->hsio_cfg))
+ imx6_pcie->hsio_cfg = 0;
+ if (of_property_read_u32(node, "ext_osc", &imx6_pcie->ext_osc) < 0)
+ imx6_pcie->ext_osc = 0;
+ if (of_property_read_u32(node, "local-addr", &imx6_pcie->local_addr))
+ imx6_pcie->local_addr = 0;
+ if (of_property_read_bool(node, "l1ss-disabled"))
+ imx6_pcie->l1ss_clkreq = 0;
+ else
+ imx6_pcie->l1ss_clkreq = 1;
+
/* Fetch GPIOs */
+ imx6_pcie->clkreq_gpio = of_get_named_gpio(node, "clkreq-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->clkreq_gpio)) {
+ devm_gpio_request_one(&pdev->dev, imx6_pcie->clkreq_gpio,
+ GPIOF_OUT_INIT_LOW, "PCIe CLKREQ");
+ } else if (imx6_pcie->clkreq_gpio == -EPROBE_DEFER) {
+ return imx6_pcie->clkreq_gpio;
+ }
+
+ imx6_pcie->dis_gpio = of_get_named_gpio(node, "disable-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->dis_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->dis_gpio,
+ GPIOF_OUT_INIT_LOW, "PCIe DIS");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get disable gpio\n");
+ return ret;
+ }
+ } else if (imx6_pcie->dis_gpio == -EPROBE_DEFER) {
+ return imx6_pcie->dis_gpio;
+ }
+ imx6_pcie->epdev_on = devm_regulator_get(&pdev->dev, "epdev_on");
+ if (IS_ERR(imx6_pcie->epdev_on))
+ return -EPROBE_DEFER;
imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
imx6_pcie->gpio_active_high = of_property_read_bool(node,
"reset-gpio-active-high");
@@ -1060,19 +2464,33 @@ static int imx6_pcie_probe(struct platform_device *pdev)
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
+ case IMX6SX_EP:
imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
"pcie_inbound_axi");
if (IS_ERR(imx6_pcie->pcie_inbound_axi))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
"pcie_inbound_axi clock missing or invalid\n");
break;
+ case IMX8MP:
+ case IMX8MP_EP:
+ imx6_pcie->pciephy_perst = devm_reset_control_get_exclusive(dev,
+ "pciephy_perst");
+ if (IS_ERR(imx6_pcie->pciephy_perst)) {
+ dev_err(dev, "Failed to get PCIEPHY perst control\n");
+ return PTR_ERR(imx6_pcie->pciephy_perst);
+ }
+ fallthrough;
case IMX8MQ:
+ case IMX8MM:
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
if (IS_ERR(imx6_pcie->pcie_aux))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
"pcie_aux clock source missing or invalid\n");
fallthrough;
case IMX7D:
+ case IMX7D_EP:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
imx6_pcie->controller_id = 1;
@@ -1090,6 +2508,71 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->apps_reset);
}
break;
+ case IMX8QM:
+ case IMX8QM_EP:
+ case IMX8QXP:
+ case IMX8QXP_EP:
+ if (dbi_base->start == IMX8_HSIO_PCIEB_BASE_ADDR)
+ imx6_pcie->controller_id = 1;
+
+ imx6_pcie->pcie_per = devm_clk_get(dev, "pcie_per");
+ if (IS_ERR(imx6_pcie->pcie_per)) {
+ dev_err(dev, "pcie_per clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_per);
+ }
+
+ imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
+ "pcie_inbound_axi");
+ if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
+ dev_err(&pdev->dev,
+ "pcie clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_inbound_axi);
+ }
+
+ imx6_pcie->phy_per = devm_clk_get(dev, "phy_per");
+ if (IS_ERR(imx6_pcie->phy_per)) {
+ dev_err(dev, "failed to get phy per clock.\n");
+ return PTR_ERR(imx6_pcie->phy_per);
+ }
+
+ imx6_pcie->misc_per = devm_clk_get(dev, "misc_per");
+ if (IS_ERR(imx6_pcie->misc_per)) {
+ dev_err(dev, "failed to get misc per clock.\n");
+ return PTR_ERR(imx6_pcie->misc_per);
+ }
+ if (imx6_pcie->drvdata->variant == IMX8QM
+ && imx6_pcie->controller_id == 1) {
+ imx6_pcie->pcie_phy_pclk = devm_clk_get(dev,
+ "pcie_phy_pclk");
+ if (IS_ERR(imx6_pcie->pcie_phy_pclk)) {
+ dev_err(dev, "no pcie_phy_pclk clock\n");
+ return PTR_ERR(imx6_pcie->pcie_phy_pclk);
+ }
+
+ imx6_pcie->pciex2_per = devm_clk_get(dev, "pciex2_per");
+ if (IS_ERR(imx6_pcie->pciex2_per)) {
+ dev_err(dev, "can't get pciex2_per.\n");
+ return PTR_ERR(imx6_pcie->pciex2_per);
+ }
+ }
+
+ hsio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "hsio");
+ if (hsio_res) {
+ iomem = devm_ioremap(dev, hsio_res->start,
+ resource_size(hsio_res));
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+ imx6_pcie->iomuxc_gpr =
+ devm_regmap_init_mmio(dev, iomem, &regconfig);
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(dev, "failed to init register map\n");
+ return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ }
+ } else {
+ dev_err(dev, "missing *hsio* reg space\n");
+ }
+ break;
default:
break;
}
@@ -1101,12 +2584,20 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->turnoff_reset);
}
+ imx6_pcie->clkreq_reset = devm_reset_control_get_optional_exclusive(dev, "clkreq");
+ if (IS_ERR(imx6_pcie->clkreq_reset)) {
+ dev_err(dev, "Failed to get CLKREQ reset control\n");
+ return PTR_ERR(imx6_pcie->clkreq_reset);
+ }
+
/* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
- if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
- dev_err(dev, "unable to find iomuxc registers\n");
- return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ if (imx6_pcie->iomuxc_gpr == NULL) {
+ imx6_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(dev, "unable to find iomuxc registers\n");
+ return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ }
}
/* Grab PCIe PHY Tx Settings */
@@ -1154,18 +2645,60 @@ static int imx6_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = dw_pcie_host_init(&pci->pp);
- if (ret < 0)
- return ret;
+ ret = regulator_enable(imx6_pcie->epdev_on);
+ if (ret) {
+ dev_err(dev, "failed to enable the epdev_on regulator\n");
+ goto err_ret;
+ }
- if (pci_msi_enabled()) {
- u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
- val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
- val |= PCI_MSI_FLAGS_ENABLE;
- dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ if (gpio_is_valid(imx6_pcie->dis_gpio))
+ gpio_set_value_cansleep(imx6_pcie->dis_gpio, 1);
+ imx6_pcie_assert_core_reset(imx6_pcie);
+ imx6_pcie_init_phy(imx6_pcie);
+ imx6_pcie_deassert_core_reset(imx6_pcie);
+ imx6_setup_phy_mpll(imx6_pcie);
+
+ switch (imx6_pcie->drvdata->mode) {
+ case DW_PCIE_RC_TYPE:
+ /* add attributes for bus freq */
+ ret = sysfs_create_group(&pdev->dev.kobj, &imx_pcie_attrgroup);
+ if (ret)
+ goto err_ret;
+
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0) {
+ if (imx6_pcie_cz_enabled) {
+ /* The PCIE clocks wouldn't be turned off */
+ dev_info(dev, "To do the compliance tests.\n");
+ ret = 0;
+ } else {
+ imx6_pcie_detach_pd(dev);
+ return ret;
+ }
+ }
+
+ pci_imx_set_msi_en(&imx6_pcie->pci->pp);
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_IMX6_EP)) {
+ ret = -ENODEV;
+ goto err_ret;
+ }
+
+ ret = imx_add_pcie_ep(imx6_pcie, pdev);
+ if (ret < 0)
+ goto err_ret;
+ break;
+ default:
+ dev_err(dev, "INVALID device type.\n");
+ goto err_ret;
}
return 0;
+
+err_ret:
+ imx6_pcie_detach_pd(dev);
+ return ret;
}
static void imx6_pcie_shutdown(struct platform_device *pdev)
@@ -1179,28 +2712,102 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
static const struct imx6_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
+ .mode = DW_PCIE_RC_TYPE,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
.dbi_length = 0x200,
},
[IMX6SX] = {
.variant = IMX6SX,
+ .mode = DW_PCIE_RC_TYPE,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
},
[IMX6QP] = {
.variant = IMX6QP,
+ .mode = DW_PCIE_RC_TYPE,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
},
[IMX7D] = {
.variant = IMX7D,
+ .mode = DW_PCIE_RC_TYPE,
.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
},
[IMX8MQ] = {
.variant = IMX8MQ,
+ .mode = DW_PCIE_RC_TYPE,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_SUPPORTS_L1SS,
+ },
+ [IMX8MM] = {
+ .variant = IMX8MM,
+ .mode = DW_PCIE_RC_TYPE,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_SUPPORTS_L1SS,
+ },
+ [IMX8QM] = {
+ .variant = IMX8QM,
+ .mode = DW_PCIE_RC_TYPE,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_IMX6_CPU_ADDR_FIXUP,
+ },
+ [IMX8QXP] = {
+ .variant = IMX8QXP,
+ .mode = DW_PCIE_RC_TYPE,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_IMX6_CPU_ADDR_FIXUP,
+ },
+ [IMX8MP] = {
+ .variant = IMX8MP,
+ .mode = DW_PCIE_RC_TYPE,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_SUPPORTS_L1SS,
+ },
+ [IMX8QXP_EP] = {
+ .variant = IMX8QXP_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .flags = IMX6_PCIE_FLAG_IMX6_CPU_ADDR_FIXUP,
+ },
+ [IMX8QM_EP] = {
+ .variant = IMX8QM_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .flags = IMX6_PCIE_FLAG_IMX6_CPU_ADDR_FIXUP,
+ },
+ [IMX8MQ_EP] = {
+ .variant = IMX8MQ_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ },
+ [IMX8MM_EP] = {
+ .variant = IMX8MM_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ },
+ [IMX8MP_EP] = {
+ .variant = IMX8MP_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ },
+ [IMX6SX_EP] = {
+ .variant = IMX6SX_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY,
+ },
+ [IMX7D_EP] = {
+ .variant = IMX7D_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ },
+ [IMX6Q_EP] = {
+ .variant = IMX6Q_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY,
+ },
+ [IMX6QP_EP] = {
+ .variant = IMX6QP_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY,
},
};
@@ -1209,7 +2816,20 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
{ .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
- { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
+ { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
+ { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
+ { .compatible = "fsl,imx8qm-pcie", .data = &drvdata[IMX8QM], },
+ { .compatible = "fsl,imx8qxp-pcie", .data = &drvdata[IMX8QXP], },
+ { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx8qxp-pcie-ep", .data = &drvdata[IMX8QXP_EP], },
+ { .compatible = "fsl,imx8qm-pcie-ep", .data = &drvdata[IMX8QM_EP], },
+ { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
+ { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
+ { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx6sx-pcie-ep", .data = &drvdata[IMX6SX_EP], },
+ { .compatible = "fsl,imx7d-pcie-ep", .data = &drvdata[IMX7D_EP], },
+ { .compatible = "fsl,imx6q-pcie-ep", .data = &drvdata[IMX6Q_EP], },
+ { .compatible = "fsl,imx6qp-pcie-ep", .data = &drvdata[IMX6QP_EP], },
{},
};
@@ -1256,6 +2876,58 @@ static void imx6_pcie_quirk(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
+static void imx6_pcie_l1ss_quirk(struct pci_dev *dev)
+{
+ u32 reg, rc_l1sub, ep_l1sub, header;
+ int ttl, ret;
+ int pos = PCI_CFG_SPACE_SIZE;
+ struct pci_bus *bus = dev->bus;
+ struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ /* Return directly, if the L1SS is not supported by RC */
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_L1SS))
+ return;
+
+ /* Make sure the L1SS is not force disabled. */
+ if (imx6_pcie->l1ss_clkreq == 0)
+ return;
+
+ reg = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
+ rc_l1sub = dw_pcie_readl_dbi(pci, reg + PCI_L1SS_CAP);
+
+ /* minimum 8 bytes per capability */
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+ ret = dw_pcie_read(pp->va_cfg0_base + pos, 4, &header);
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0)
+ return;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_L1SS && pos != 0)
+ break;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < PCI_CFG_SPACE_SIZE)
+ break;
+
+ ret = dw_pcie_read(pp->va_cfg0_base + pos, 4, &header);
+ }
+ ret = dw_pcie_read(pp->va_cfg0_base + pos + PCI_L1SS_CAP, 4, &ep_l1sub);
+
+ if ((rc_l1sub && ep_l1sub) && PCI_L1SS_CAP_L1_PM_SS) {
+ imx6_pcie->l1ss_clkreq = 1;
+ imx6_pcie_clkreq_enable(imx6_pcie);
+ } else {
+ imx6_pcie->l1ss_clkreq = 0;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, imx6_pcie_l1ss_quirk);
+
static int __init imx6_pcie_init(void)
{
#ifdef CONFIG_ARM
@@ -1273,3 +2945,4 @@ static int __init imx6_pcie_init(void)
return platform_driver_register(&imx6_pcie_driver);
}
device_initcall(imx6_pcie_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 39f4664bd84c..2554f891e7db 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -18,6 +18,22 @@
#include "pcie-designware.h"
+#define PCIE_LINK_CAP 0x7C /* PCIe Link Capabilities*/
+#define MAX_LINK_SP_MASK 0x0F
+#define MAX_LINK_W_MASK 0x3F
+#define MAX_LINK_W_SHIFT 4
+
+/* PEX PFa PCIE pme and message interrupt registers*/
+#define PEX_PF0_PME_MES_DR 0xC0020
+#define PEX_PF0_PME_MES_DR_LUD (1 << 7)
+#define PEX_PF0_PME_MES_DR_LDD (1 << 9)
+#define PEX_PF0_PME_MES_DR_HRD (1 << 10)
+
+#define PEX_PF0_PME_MES_IER 0xC0028
+#define PEX_PF0_PME_MES_IER_LUDIE (1 << 7)
+#define PEX_PF0_PME_MES_IER_LDDIE (1 << 9)
+#define PEX_PF0_PME_MES_IER_HRDIE (1 << 10)
+
#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
struct ls_pcie_ep_drvdata {
@@ -30,6 +46,10 @@ struct ls_pcie_ep {
struct dw_pcie *pci;
struct pci_epc_features *ls_epc;
const struct ls_pcie_ep_drvdata *drvdata;
+ u8 max_speed;
+ u8 max_width;
+ bool big_endian;
+ int irq;
};
static int ls_pcie_establish_link(struct dw_pcie *pci)
@@ -41,6 +61,84 @@ static const struct dw_pcie_ops dw_ls_pcie_ep_ops = {
.start_link = ls_pcie_establish_link,
};
+static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pcie->big_endian)
+ return ioread32be(pci->dbi_base + offset);
+ else
+ return ioread32(pci->dbi_base + offset);
+}
+
+static void ls_lut_writel(struct ls_pcie_ep *pcie, u32 offset,
+ u32 value)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pcie->big_endian)
+ iowrite32be(value, pci->dbi_base + offset);
+ else
+ iowrite32(value, pci->dbi_base + offset);
+}
+
+static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
+{
+ struct ls_pcie_ep *pcie = (struct ls_pcie_ep *)dev_id;
+ struct dw_pcie *pci = pcie->pci;
+ u32 val;
+
+ val = ls_lut_readl(pcie, PEX_PF0_PME_MES_DR);
+ if (!val)
+ return IRQ_NONE;
+
+ if (val & PEX_PF0_PME_MES_DR_LUD)
+ dev_info(pci->dev, "Detect the link up state !\n");
+ else if (val & PEX_PF0_PME_MES_DR_LDD)
+ dev_info(pci->dev, "Detect the link down state !\n");
+ else if (val & PEX_PF0_PME_MES_DR_HRD)
+ dev_info(pci->dev, "Detect the hot reset state !\n");
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, PCIE_LINK_CAP,
+ (pcie->max_width << MAX_LINK_W_SHIFT) |
+ pcie->max_speed);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ ls_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
+
+ return IRQ_HANDLED;
+}
+
+static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie,
+ struct platform_device *pdev)
+{
+ u32 val;
+ int ret;
+
+ pcie->irq = platform_get_irq_byname(pdev, "pme");
+ if (pcie->irq < 0) {
+ dev_err(&pdev->dev, "Can't get 'pme' irq.\n");
+ return pcie->irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pcie->irq,
+ ls_pcie_ep_event_handler, IRQF_SHARED,
+ pdev->name, pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register PCIe IRQ.\n");
+ return ret;
+ }
+
+ /* Enable interrupts */
+ val = ls_lut_readl(pcie, PEX_PF0_PME_MES_IER);
+ val |= PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE |
+ PEX_PF0_PME_MES_IER_LUDIE;
+ ls_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
+
+ return 0;
+}
+
static const struct pci_epc_features*
ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
@@ -124,6 +222,7 @@ static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
static const struct of_device_id ls_pcie_ep_of_match[] = {
{ .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata },
{ .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,ls1028a-pcie-ep", .data = &ls1_ep_drvdata },
{ .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata },
{ .compatible = "fsl,lx2160ar2-pcie-ep", .data = &lx2_ep_drvdata },
{ },
@@ -136,6 +235,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
struct ls_pcie_ep *pcie;
struct pci_epc_features *ls_epc;
struct resource *dbi_base;
+ int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -166,9 +266,24 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
pci->ep.ops = &ls_pcie_ep_ops;
+ pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+
+ pcie->max_speed = dw_pcie_readw_dbi(pci, PCIE_LINK_CAP) &
+ MAX_LINK_SP_MASK;
+ pcie->max_width = (dw_pcie_readw_dbi(pci, PCIE_LINK_CAP) >>
+ MAX_LINK_W_SHIFT) & MAX_LINK_W_MASK;
+
+ /* set 64-bit DMA mask and coherent DMA mask */
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+ dev_warn(dev, "Failed to set 64-bit DMA mask.\n");
+
platform_set_drvdata(pdev, pcie);
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ return ls_pcie_ep_interrupt_init(pcie, pdev);
}
static struct platform_driver ls_pcie_ep_driver = {
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 5b9c625df7b8..456020ce400c 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -3,13 +3,16 @@
* PCIe host controller driver for Freescale Layerscape SoCs
*
* Copyright (C) 2014 Freescale Semiconductor.
+ * Copyright 2020 NXP
*
* Author: Minghuan Lian <Minghuan.Lian@freescale.com>
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
@@ -22,35 +25,65 @@
#include "pcie-designware.h"
-/* PEX1/2 Misc Ports Status Register */
-#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4)
-#define LTSSM_STATE_SHIFT 20
-#define LTSSM_STATE_MASK 0x3f
-#define LTSSM_PCIE_L0 0x11 /* L0 state */
-
/* PEX Internal Configuration Registers */
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
+/* PF Message Command Register */
+#define LS_PCIE_PF_MCR 0x2c
+#define PF_MCR_PTOMR BIT(0)
+#define PF_MCR_EXL2S BIT(1)
+
+/* LS1021A PEXn PM Write Control Register */
+#define SCFG_PEXPMWRCR(idx) (0x5c + (idx) * 0x64)
+#define PMXMTTURNOFF BIT(31)
+#define SCFG_PEXSFTRSTCR 0x190
+#define PEXSR(idx) BIT(idx)
+
+/* LS1043A PEX PME control register */
+#define SCFG_PEXPMECR 0x144
+#define PEXPME(idx) BIT(31 - (idx) * 4)
+
+/* LS1043A PEX LUT debug register */
+#define LS_PCIE_LDBG 0x7fc
+#define LDBG_SR BIT(30)
+#define LDBG_WE BIT(31)
+
#define PCIE_IATU_NUM 6
+#define LS_PCIE_IS_L2(v) \
+ (((v) & PORT_LOGIC_LTSSM_STATE_MASK) == PORT_LOGIC_LTSSM_STATE_L2)
+
+struct ls_pcie;
+
+struct ls_pcie_host_pm_ops {
+ int (*pm_init)(struct ls_pcie *pcie);
+ void (*send_turn_off_message)(struct ls_pcie *pcie);
+ void (*exit_from_l2)(struct ls_pcie *pcie);
+};
+
struct ls_pcie_drvdata {
- u32 lut_offset;
- u32 ltssm_shift;
- u32 lut_dbg;
+ const u32 pf_off;
+ const u32 lut_off;
const struct dw_pcie_host_ops *ops;
- const struct dw_pcie_ops *dw_pcie_ops;
+ const struct ls_pcie_host_pm_ops *pm_ops;
};
struct ls_pcie {
struct dw_pcie *pci;
- void __iomem *lut;
- struct regmap *scfg;
const struct ls_pcie_drvdata *drvdata;
+ void __iomem *pf_base;
+ void __iomem *lut_base;
+ bool big_endian;
+ bool ep_presence;
+ bool pm_support;
+ struct regmap *scfg;
int index;
};
+#define ls_pcie_lut_readl_addr(addr) ls_pcie_lut_readl(pcie, addr)
+#define ls_pcie_pf_readl_addr(addr) ls_pcie_pf_readl(pcie, addr)
#define to_ls_pcie(x) dev_get_drvdata((x)->dev)
static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
@@ -83,67 +116,177 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
}
-static int ls1021_pcie_link_up(struct dw_pcie *pci)
+/* Forward error response of outbound non-posted requests */
+static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
{
- u32 state;
- struct ls_pcie *pcie = to_ls_pcie(pci);
+ struct dw_pcie *pci = pcie->pci;
- if (!pcie->scfg)
- return 0;
+ iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
+}
- regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state);
- state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
+static u32 ls_pcie_lut_readl(struct ls_pcie *pcie, u32 off)
+{
+ if (pcie->big_endian)
+ return ioread32be(pcie->lut_base + off);
- if (state < LTSSM_PCIE_L0)
- return 0;
+ return ioread32(pcie->lut_base + off);
+}
+
+static void ls_pcie_lut_writel(struct ls_pcie *pcie, u32 off, u32 val)
+{
+ if (pcie->big_endian)
+ return iowrite32be(val, pcie->lut_base + off);
+
+ return iowrite32(val, pcie->lut_base + off);
- return 1;
}
-static int ls_pcie_link_up(struct dw_pcie *pci)
+static u32 ls_pcie_pf_readl(struct ls_pcie *pcie, u32 off)
{
- struct ls_pcie *pcie = to_ls_pcie(pci);
- u32 state;
+ if (pcie->big_endian)
+ return ioread32be(pcie->pf_base + off);
- state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
- pcie->drvdata->ltssm_shift) &
- LTSSM_STATE_MASK;
+ return ioread32(pcie->pf_base + off);
+}
- if (state < LTSSM_PCIE_L0)
- return 0;
+static void ls_pcie_pf_writel(struct ls_pcie *pcie, u32 off, u32 val)
+{
+ if (pcie->big_endian)
+ return iowrite32be(val, pcie->pf_base + off);
+
+ return iowrite32(val, pcie->pf_base + off);
- return 1;
}
-/* Forward error response of outbound non-posted requests */
-static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
+static void ls_pcie_send_turnoff_msg(struct ls_pcie *pcie)
+{
+ u32 val;
+ int ret;
+
+ val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_PTOMR;
+ ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val);
+
+ ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_PTOMR), 100, 10000);
+ if (ret)
+ dev_info(pcie->pci->dev, "poll turn off message timeout\n");
+}
+
+static void ls1021a_pcie_send_turnoff_msg(struct ls_pcie *pcie)
+{
+ u32 val;
+
+ if (!pcie->scfg) {
+ dev_dbg(pcie->pci->dev, "SYSCFG is NULL\n");
+ return;
+ }
+
+ /* Send Turn_off message */
+ regmap_read(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), &val);
+ val |= PMXMTTURNOFF;
+ regmap_write(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), val);
+
+ mdelay(10);
+
+ /* Clear Turn_off message */
+ regmap_read(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), &val);
+ val &= ~PMXMTTURNOFF;
+ regmap_write(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), val);
+}
+
+static void ls1043a_pcie_send_turnoff_msg(struct ls_pcie *pcie)
+{
+ u32 val;
+
+ if (!pcie->scfg) {
+ dev_dbg(pcie->pci->dev, "SYSCFG is NULL\n");
+ return;
+ }
+
+ /* Send Turn_off message */
+ regmap_read(pcie->scfg, SCFG_PEXPMECR, &val);
+ val |= PEXPME(pcie->index);
+ regmap_write(pcie->scfg, SCFG_PEXPMECR, val);
+
+ mdelay(10);
+
+ /* Clear Turn_off message */
+ regmap_read(pcie->scfg, SCFG_PEXPMECR, &val);
+ val &= ~PEXPME(pcie->index);
+ regmap_write(pcie->scfg, SCFG_PEXPMECR, val);
+}
+
+static void ls_pcie_exit_from_l2(struct ls_pcie *pcie)
+{
+ u32 val;
+ int ret;
+
+ val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_EXL2S;
+ ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val);
+
+ ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_EXL2S), 100, 10000);
+ if (ret)
+ dev_info(pcie->pci->dev, "poll exit L2 state timeout\n");
+}
+
+static void ls_pcie_retrain_link(struct ls_pcie *pcie)
{
struct dw_pcie *pci = pcie->pci;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
- iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL);
+ val |= PCI_EXP_LNKCTL_RL;
+ dw_pcie_writew_dbi(pci, offset + PCI_EXP_LNKCTL, val);
}
-static int ls_pcie_host_init(struct pcie_port *pp)
+static void ls1021a_pcie_exit_from_l2(struct ls_pcie *pcie)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
- ls_pcie_fix_error_response(pcie);
+ regmap_read(pcie->scfg, SCFG_PEXSFTRSTCR, &val);
+ val |= PEXSR(pcie->index);
+ regmap_write(pcie->scfg, SCFG_PEXSFTRSTCR, val);
- dw_pcie_dbi_ro_wr_en(pci);
- ls_pcie_clear_multifunction(pcie);
- dw_pcie_dbi_ro_wr_dis(pci);
+ regmap_read(pcie->scfg, SCFG_PEXSFTRSTCR, &val);
+ val &= ~PEXSR(pcie->index);
+ regmap_write(pcie->scfg, SCFG_PEXSFTRSTCR, val);
- ls_pcie_drop_msg_tlp(pcie);
+ mdelay(1);
- return 0;
+ ls_pcie_retrain_link(pcie);
}
+static void ls1043a_pcie_exit_from_l2(struct ls_pcie *pcie)
+{
+ u32 val;
+
+ val = ls_pcie_lut_readl(pcie, LS_PCIE_LDBG);
+ val |= LDBG_WE;
+ ls_pcie_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_lut_readl(pcie, LS_PCIE_LDBG);
+ val |= LDBG_SR;
+ ls_pcie_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_lut_readl(pcie, LS_PCIE_LDBG);
+ val &= ~LDBG_SR;
+ ls_pcie_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_lut_readl(pcie, LS_PCIE_LDBG);
+ val &= ~LDBG_WE;
+ ls_pcie_lut_writel(pcie, LS_PCIE_LDBG, val);
-static int ls1021_pcie_host_init(struct pcie_port *pp)
+ mdelay(1);
+
+ ls_pcie_retrain_link(pcie);
+}
+
+static int ls1021a_pcie_pm_init(struct ls_pcie *pcie)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct ls_pcie *pcie = to_ls_pcie(pci);
- struct device *dev = pci->dev;
+ struct device *dev = pcie->pci->dev;
u32 index[2];
int ret;
@@ -156,79 +299,101 @@ static int ls1021_pcie_host_init(struct pcie_port *pp)
return ret;
}
- if (of_property_read_u32_array(dev->of_node,
- "fsl,pcie-scfg", index, 2)) {
+ ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg",
+ index, 2);
+ if (ret) {
pcie->scfg = NULL;
- return -EINVAL;
+ return ret;
}
+
pcie->index = index[1];
- return ls_pcie_host_init(pp);
+ return 0;
}
-static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
- .host_init = ls1021_pcie_host_init,
-};
+static int ls_pcie_pm_init(struct ls_pcie *pcie)
+{
+ return 0;
+}
-static const struct dw_pcie_host_ops ls_pcie_host_ops = {
- .host_init = ls_pcie_host_init,
-};
+static void ls_pcie_set_dstate(struct ls_pcie *pcie, u32 dstate)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_PM);
+ u32 val;
+
+ val = dw_pcie_readw_dbi(pci, offset + PCI_PM_CTRL);
+ val &= ~PCI_PM_CTRL_STATE_MASK;
+ val |= dstate;
+ dw_pcie_writew_dbi(pci, offset + PCI_PM_CTRL, val);
+}
+
+static int ls_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ ls_pcie_fix_error_response(pcie);
-static const struct dw_pcie_ops dw_ls1021_pcie_ops = {
- .link_up = ls1021_pcie_link_up,
+ dw_pcie_dbi_ro_wr_en(pci);
+ ls_pcie_clear_multifunction(pcie);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ ls_pcie_drop_msg_tlp(pcie);
+
+ return 0;
+}
+
+static struct ls_pcie_host_pm_ops ls1021a_pcie_host_pm_ops = {
+ .pm_init = &ls1021a_pcie_pm_init,
+ .send_turn_off_message = &ls1021a_pcie_send_turnoff_msg,
+ .exit_from_l2 = &ls1021a_pcie_exit_from_l2,
};
-static const struct dw_pcie_ops dw_ls_pcie_ops = {
- .link_up = ls_pcie_link_up,
+static struct ls_pcie_host_pm_ops ls1043a_pcie_host_pm_ops = {
+ .pm_init = &ls1021a_pcie_pm_init,
+ .send_turn_off_message = &ls1043a_pcie_send_turnoff_msg,
+ .exit_from_l2 = &ls1043a_pcie_exit_from_l2,
};
-static const struct ls_pcie_drvdata ls1021_drvdata = {
- .ops = &ls1021_pcie_host_ops,
- .dw_pcie_ops = &dw_ls1021_pcie_ops,
+static struct ls_pcie_host_pm_ops ls_pcie_host_pm_ops = {
+ .pm_init = &ls_pcie_pm_init,
+ .send_turn_off_message = &ls_pcie_send_turnoff_msg,
+ .exit_from_l2 = &ls_pcie_exit_from_l2,
};
-static const struct ls_pcie_drvdata ls1043_drvdata = {
- .lut_offset = 0x10000,
- .ltssm_shift = 24,
- .lut_dbg = 0x7fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
+static const struct dw_pcie_host_ops ls_pcie_host_ops = {
+ .host_init = ls_pcie_host_init,
};
-static const struct ls_pcie_drvdata ls1046_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 24,
- .lut_dbg = 0x407fc,
+static const struct ls_pcie_drvdata ls1021a_drvdata = {
.ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
+ .pm_ops = &ls1021a_pcie_host_pm_ops,
};
-static const struct ls_pcie_drvdata ls2080_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 0,
- .lut_dbg = 0x7fc,
+static const struct ls_pcie_drvdata ls1043a_drvdata = {
.ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
+ .lut_off = 0x10000,
+ .pm_ops = &ls1043a_pcie_host_pm_ops,
};
-static const struct ls_pcie_drvdata ls2088_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 0,
- .lut_dbg = 0x407fc,
+static const struct ls_pcie_drvdata layerscape_drvdata = {
.ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
+ .lut_off = 0x80000,
+ .pf_off = 0xc0000,
+ .pm_ops = &ls_pcie_host_pm_ops,
};
static const struct of_device_id ls_pcie_of_match[] = {
- { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
- { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
- { .compatible = "fsl,ls1028a-pcie", .data = &ls2088_drvdata },
- { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
- { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
- { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
- { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
- { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata },
- { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata },
+ { .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata },
+ { .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043a_drvdata },
+ { .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2088a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1088a-pcie", .data = &layerscape_drvdata },
{ },
};
@@ -238,6 +403,7 @@ static int ls_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct ls_pcie *pcie;
struct resource *dbi_base;
+ int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -250,7 +416,6 @@ static int ls_pcie_probe(struct platform_device *pdev)
pcie->drvdata = of_device_get_match_data(dev);
pci->dev = dev;
- pci->ops = pcie->drvdata->dw_pcie_ops;
pci->pp.ops = pcie->drvdata->ops;
pcie->pci = pci;
@@ -260,22 +425,120 @@ static int ls_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
- pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset;
+ pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+
+ if (pcie->drvdata->lut_off)
+ pcie->lut_base = pci->dbi_base + pcie->drvdata->lut_off;
+
+ if (pcie->drvdata->pf_off)
+ pcie->pf_base = pci->dbi_base + pcie->drvdata->pf_off;
if (!ls_pcie_is_bridge(pcie))
return -ENODEV;
platform_set_drvdata(pdev, pcie);
- return dw_pcie_host_init(&pci->pp);
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret)
+ return ret;
+
+ if (dw_pcie_link_up(pci)) {
+ dev_dbg(pci->dev, "Endpoint is present\n");
+ pcie->ep_presence = true;
+ }
+
+ if (pcie->drvdata->pm_ops && pcie->drvdata->pm_ops->pm_init &&
+ !pcie->drvdata->pm_ops->pm_init(pcie))
+ pcie->pm_support = true;
+
+ return 0;
}
+static bool ls_pcie_pm_check(struct ls_pcie *pcie)
+{
+ if (!pcie->ep_presence) {
+ dev_dbg(pcie->pci->dev, "Endpoint isn't present\n");
+ return false;
+ }
+
+ if (!pcie->pm_support)
+ return false;
+
+ return true;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ls_pcie_suspend_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+ struct dw_pcie *pci = pcie->pci;
+ u32 val;
+ int ret;
+
+ if (!ls_pcie_pm_check(pcie))
+ return 0;
+
+ pcie->drvdata->pm_ops->send_turn_off_message(pcie);
+
+ /* 10ms timeout to check L2 ready */
+ ret = readl_poll_timeout(pci->dbi_base + PCIE_PORT_DEBUG0,
+ val, LS_PCIE_IS_L2(val), 100, 10000);
+ if (ret) {
+ dev_err(dev, "PCIe link enter L2 timeout! ltssm = 0x%x\n", val);
+ return ret;
+ }
+
+ ls_pcie_set_dstate(pcie, 0x3);
+
+ return 0;
+}
+
+static int ls_pcie_resume_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+ struct dw_pcie *pci = pcie->pci;
+ int ret;
+
+ if (!ls_pcie_pm_check(pcie))
+ return 0;
+
+ ls_pcie_set_dstate(pcie, 0x0);
+
+ pcie->drvdata->pm_ops->exit_from_l2(pcie);
+
+ /* delay 10ms to access EP */
+ mdelay(10);
+
+ ret = ls_pcie_host_init(&pci->pp);
+ if (ret) {
+ dev_err(dev, "ls_pcie_host_init failed! ret = 0x%x\n", ret);
+ return ret;
+ }
+
+ dw_pcie_setup_rc(&pci->pp);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret) {
+ dev_err(dev, "wait link up timeout! ret = 0x%x\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops ls_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(ls_pcie_suspend_noirq,
+ ls_pcie_resume_noirq)
+};
+
static struct platform_driver ls_pcie_driver = {
.probe = ls_pcie_probe,
.driver = {
.name = "layerscape-pcie",
.of_match_table = ls_pcie_of_match,
.suppress_bind_attrs = true,
+ .pm = &ls_pcie_pm_ops,
},
};
builtin_platform_driver(ls_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d1d9b8344ec9..594b9f851270 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -318,6 +318,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
return PTR_ERR(pci->dbi_base);
}
+ dw_pcie_iatu_detect(pci);
+
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index a945f0c0e73d..850b4533f4ef 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -538,6 +538,7 @@ int dw_pcie_link_up(struct dw_pcie *pci)
return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
}
+EXPORT_SYMBOL_GPL(dw_pcie_link_up);
void dw_pcie_upconfig_setup(struct dw_pcie *pci)
{
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 7d6e9b7576be..0fb14cc25a38 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -54,6 +54,7 @@
#define PCIE_PORT_DEBUG0 0x728
#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
+#define PORT_LOGIC_LTSSM_STATE_L2 0x15
#define PCIE_PORT_DEBUG1 0x72C
#define PCIE_PORT_DEBUG1_LINK_UP BIT(4)
#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29)
diff --git a/include/dt-bindings/soc/imx8_hsio.h b/include/dt-bindings/soc/imx8_hsio.h
new file mode 100644
index 000000000000..3cf1056b63d7
--- /dev/null
+++ b/include/dt-bindings/soc/imx8_hsio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_BINDINGS_IMX8_HSIO_H
+#define __DT_BINDINGS_IMX8_HSIO_H
+
+/*
+ * imx8qm hsio has pciea, pcieb and sata modules, and hsio
+ * can be configured to the following different work modes.
+ * 1 - pciea 2 lanes and one sata ahci port.
+ * 2 - pciea 1 lane, pcieb 1 lane and one sata ahci port.
+ * 3 - pciea 2 lanes, pcieb 1 lane.
+ * Choose one mode, refer to the exact hardware board design.
+ */
+#define PCIEAX2SATA 1
+#define PCIEAX1PCIEBX1SATA 2
+#define PCIEAX2PCIEBX1 3
+
+#endif /* __DT_BINDINGS_IMX8_HSIO_H */