summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig12
-rw-r--r--drivers/pci/controller/Kconfig3
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c3
-rw-r--r--drivers/pci/controller/cadence/pci-sky1.c6
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c7
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h75
-rw-r--r--drivers/pci/controller/cadence/pcie-sg2042.c2
-rw-r--r--drivers/pci/controller/dwc/Kconfig28
-rw-r--r--drivers/pci/controller/dwc/Makefile3
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c4
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c81
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c12
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c6
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-andes-qilai.c197
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c645
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c73
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c55
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c29
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c18
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h3
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c134
-rw-r--r--drivers/pci/controller/dwc/pcie-eswin.c408
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c20
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32-ep.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c300
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c19
-rw-r--r--drivers/pci/controller/pci-hyperv.c2
-rw-r--r--drivers/pci/controller/pcie-aspeed.c8
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c5
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c12
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c233
-rw-r--r--drivers/pci/controller/pcie-mediatek.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c3
-rw-r--r--drivers/pci/controller/pcie-rzg3s-host.c365
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c4
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c56
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c39
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c69
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c30
-rw-r--r--drivers/pci/endpoint/pci-ep-msi.c5
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c5
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c3
-rw-r--r--drivers/pci/hotplug/pnv_php.c19
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c4
-rw-r--r--drivers/pci/msi/api.c5
-rw-r--r--drivers/pci/msi/msi.c10
-rw-r--r--drivers/pci/npem.c2
-rw-r--r--drivers/pci/of.c21
-rw-r--r--drivers/pci/p2pdma.c10
-rw-r--r--drivers/pci/pci-driver.c11
-rw-r--r--drivers/pci/pci-sysfs.c34
-rw-r--r--drivers/pci/pci.c289
-rw-r--r--drivers/pci/pci.h7
-rw-r--r--drivers/pci/pcie/aer.c2
-rw-r--r--drivers/pci/pcie/aspm.c17
-rw-r--r--drivers/pci/pcie/dpc.c3
-rw-r--r--drivers/pci/pcie/ptm.c77
-rw-r--r--drivers/pci/probe.c40
-rw-r--r--drivers/pci/pwrctrl/Kconfig13
-rw-r--r--drivers/pci/pwrctrl/Makefile4
-rw-r--r--drivers/pci/pwrctrl/generic.c (renamed from drivers/pci/pwrctrl/slot.c)13
-rw-r--r--drivers/pci/quirks.c7
-rw-r--r--drivers/pci/setup-bus.c65
-rw-r--r--drivers/pci/setup-res.c40
-rw-r--r--drivers/pci/slot.c44
-rw-r--r--drivers/pci/tph.c25
-rw-r--r--drivers/pci/trace.c1
-rw-r--r--drivers/pci/vgaarb.c20
77 files changed, 2253 insertions, 1602 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index e3f848ffb52a..33c88432b728 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -31,7 +31,6 @@ if PCI
config PCI_DOMAINS
bool
- depends on PCI
config PCI_DOMAINS_GENERIC
bool
@@ -255,7 +254,7 @@ config PCI_DYNAMIC_OF_NODES
choice
prompt "PCI Express hierarchy optimization setting"
default PCIE_BUS_DEFAULT
- depends on PCI && EXPERT
+ depends on EXPERT
help
MPS (Max Payload Size) and MRRS (Max Read Request Size) are PCIe
device parameters that affect performance and the ability to
@@ -272,20 +271,17 @@ choice
config PCIE_BUS_TUNE_OFF
bool "Tune Off"
- depends on PCI
help
Use the BIOS defaults; don't touch MPS at all. This is the same
as booting with 'pci=pcie_bus_tune_off'.
config PCIE_BUS_DEFAULT
bool "Default"
- depends on PCI
help
Default choice; ensure that the MPS matches upstream bridge.
config PCIE_BUS_SAFE
bool "Safe"
- depends on PCI
help
Use largest MPS that boot-time devices support. If you have a
closed system with no possibility of adding new devices, this
@@ -294,7 +290,6 @@ config PCIE_BUS_SAFE
config PCIE_BUS_PERFORMANCE
bool "Performance"
- depends on PCI
help
Use MPS and MRRS for best performance. Ensure that a given
device's MPS is no larger than its parent MPS, which allows us to
@@ -303,7 +298,6 @@ config PCIE_BUS_PERFORMANCE
config PCIE_BUS_PEER2PEER
bool "Peer2peer"
- depends on PCI
help
Set MPS = 128 for all devices. MPS configuration effected by the
other options could cause the MPS on one root port to be
@@ -317,7 +311,7 @@ endchoice
config VGA_ARB
bool "VGA Arbitration" if EXPERT
default y
- depends on (PCI && !S390)
+ depends on !S390
select SCREEN_INFO if X86
help
Some "legacy" VGA devices implemented on PCI typically have the same
@@ -340,4 +334,4 @@ source "drivers/pci/endpoint/Kconfig"
source "drivers/pci/switch/Kconfig"
source "drivers/pci/pwrctrl/Kconfig"
-endif
+endif # PCI
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 5aaed8ac6e44..2247709ef6d6 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -187,7 +187,7 @@ config VMD
config PCI_LOONGSON
bool "LOONGSON PCIe controller"
- depends on MACH_LOONGSON64 || COMPILE_TEST
+ depends on MACH_LOONGSON32 || MACH_LOONGSON64 || COMPILE_TEST
depends on OF || ACPI
depends on PCI_QUIRKS
default MACH_LOONGSON64
@@ -222,6 +222,7 @@ config PCIE_MEDIATEK_GEN3
depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on PCI_MSI
select IRQ_MSI_LIB
+ select PCI_PWRCTRL_GENERIC
help
Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed,
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 6f2501479c70..bfdfe98d5aba 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -202,7 +202,8 @@ static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
int ret;
link_speed = of_pci_get_max_link_speed(np);
- if (link_speed < 2)
+ if ((link_speed < 2) ||
+ (pcie_get_link_speed(link_speed) == PCI_SPEED_UNKNOWN))
link_speed = 2;
val = link_speed - 1;
diff --git a/drivers/pci/controller/cadence/pci-sky1.c b/drivers/pci/controller/cadence/pci-sky1.c
index d8c216dc120d..cd55c64e58a9 100644
--- a/drivers/pci/controller/cadence/pci-sky1.c
+++ b/drivers/pci/controller/cadence/pci-sky1.c
@@ -173,11 +173,13 @@ static int sky1_pcie_probe(struct platform_device *pdev)
cdns_pcie->ops = &sky1_pcie_ops;
cdns_pcie->reg_base = pcie->reg_base;
cdns_pcie->msg_res = pcie->msg_res;
- cdns_pcie->is_rc = 1;
+ cdns_pcie->is_rc = true;
reg_off = devm_kzalloc(dev, sizeof(*reg_off), GFP_KERNEL);
- if (!reg_off)
+ if (!reg_off) {
+ pci_ecam_free(pcie->cfg);
return -ENOMEM;
+ }
reg_off->ip_reg_bank_offset = SKY1_IP_REG_BANK;
reg_off->ip_cfg_ctrl_reg_offset = SKY1_IP_CFG_CTRL_REG_BANK;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index db3154c1eccb..0bc9e6e90e0e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -147,6 +147,13 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
+ value = cdns_pcie_rp_readl(pcie, CDNS_PCIE_RP_CAP_OFFSET + PCI_EXP_LNKCAP);
+ if (rc->quirk_broken_aspm_l0s)
+ value &= ~PCI_EXP_LNKCAP_ASPM_L0S;
+ if (rc->quirk_broken_aspm_l1)
+ value &= ~PCI_EXP_LNKCAP_ASPM_L1;
+ cdns_pcie_rp_writel(pcie, CDNS_PCIE_RP_CAP_OFFSET + PCI_EXP_LNKCAP, value);
+
return 0;
}
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 443033c607d7..574e9cf4d003 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -115,6 +115,8 @@ struct cdns_pcie {
* @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
* @ecam_supported: Whether the ECAM is supported
* @no_inbound_map: Whether inbound mapping is supported
+ * @quirk_broken_aspm_l0s: Disable ASPM L0s support as quirk
+ * @quirk_broken_aspm_l1: Disable ASPM L1 support as quirk
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
@@ -127,6 +129,8 @@ struct cdns_pcie_rc {
unsigned int quirk_detect_quiet_flag:1;
unsigned int ecam_supported:1;
unsigned int no_inbound_map:1;
+ unsigned int quirk_broken_aspm_l0s:1;
+ unsigned int quirk_broken_aspm_l1:1;
};
/**
@@ -249,37 +253,6 @@ static inline u32 cdns_pcie_hpa_readl(struct cdns_pcie *pcie,
return readl(pcie->reg_base + reg);
}
-static inline u16 cdns_pcie_readw(struct cdns_pcie *pcie, u32 reg)
-{
- return readw(pcie->reg_base + reg);
-}
-
-static inline u8 cdns_pcie_readb(struct cdns_pcie *pcie, u32 reg)
-{
- return readb(pcie->reg_base + reg);
-}
-
-static inline int cdns_pcie_read_cfg_byte(struct cdns_pcie *pcie, int where,
- u8 *val)
-{
- *val = cdns_pcie_readb(pcie, where);
- return PCIBIOS_SUCCESSFUL;
-}
-
-static inline int cdns_pcie_read_cfg_word(struct cdns_pcie *pcie, int where,
- u16 *val)
-{
- *val = cdns_pcie_readw(pcie, where);
- return PCIBIOS_SUCCESSFUL;
-}
-
-static inline int cdns_pcie_read_cfg_dword(struct cdns_pcie *pcie, int where,
- u32 *val)
-{
- *val = cdns_pcie_readl(pcie, where);
- return PCIBIOS_SUCCESSFUL;
-}
-
static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
{
void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
@@ -320,6 +293,31 @@ static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value)
writel(val, aligned_addr);
}
+static inline int cdns_pcie_read_cfg_byte(struct cdns_pcie *pcie, int where,
+ u8 *val)
+{
+ void __iomem *addr = pcie->reg_base + where;
+
+ *val = cdns_pcie_read_sz(addr, 0x1);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_word(struct cdns_pcie *pcie, int where,
+ u16 *val)
+{
+ void __iomem *addr = pcie->reg_base + where;
+
+ *val = cdns_pcie_read_sz(addr, 0x2);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_dword(struct cdns_pcie *pcie, int where,
+ u32 *val)
+{
+ *val = cdns_pcie_readl(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
/* Root Port register access */
static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
u32 reg, u8 value)
@@ -344,6 +342,21 @@ static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
return cdns_pcie_read_sz(addr, 0x2);
}
+static inline void cdns_pcie_rp_writel(struct cdns_pcie *pcie,
+ u32 reg, u32 value)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
+
+ cdns_pcie_write_sz(addr, 0x4, value);
+}
+
+static inline u32 cdns_pcie_rp_readl(struct cdns_pcie *pcie, u32 reg)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
+
+ return cdns_pcie_read_sz(addr, 0x4);
+}
+
static inline void cdns_pcie_hpa_rp_writeb(struct cdns_pcie *pcie,
u32 reg, u8 value)
{
diff --git a/drivers/pci/controller/cadence/pcie-sg2042.c b/drivers/pci/controller/cadence/pcie-sg2042.c
index 0c50c74d03ee..4a2af4d0713e 100644
--- a/drivers/pci/controller/cadence/pcie-sg2042.c
+++ b/drivers/pci/controller/cadence/pcie-sg2042.c
@@ -48,6 +48,8 @@ static int sg2042_pcie_probe(struct platform_device *pdev)
bridge->child_ops = &sg2042_pcie_child_ops;
rc = pci_host_bridge_priv(bridge);
+ rc->quirk_broken_aspm_l0s = 1;
+ rc->quirk_broken_aspm_l1 = 1;
pcie = &rc->pcie;
pcie->dev = dev;
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index d0aa031397fa..f2fde13107f2 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -61,6 +61,17 @@ config PCI_MESON
and therefore the driver re-uses the DesignWare core functions to
implement the driver.
+config PCIE_ANDES_QILAI
+ tristate "Andes QiLai PCIe controller"
+ depends on ARCH_ANDES || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here to enable PCIe controller support on Andes QiLai SoCs,
+ which operate in Root Complex mode. The Andes QiLai SoC PCIe
+ controller is based on DesignWare IP and therefore the driver
+ re-uses the DesignWare core functions to implement the driver.
+
config PCIE_ARTPEC6
bool
@@ -84,14 +95,15 @@ config PCIE_ARTPEC6_EP
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
endpoint mode. This uses the DesignWare core.
-config PCIE_BT1
- tristate "Baikal-T1 PCIe controller"
- depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+config PCIE_ESWIN
+ tristate "ESWIN PCIe controller"
+ depends on ARCH_ESWIN || COMPILE_TEST
depends on PCI_MSI
select PCIE_DW_HOST
help
- Enables support for the PCIe controller in the Baikal-T1 SoC to work
- in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core.
+ Say Y here if you want PCIe controller support for the ESWIN SoCs.
+ The PCIe controller in ESWIN SoCs is based on DesignWare hardware, and
+ works only in host mode.
config PCI_IMX6
bool
@@ -121,7 +133,7 @@ config PCI_IMX6_EP
DesignWare core functions to implement the driver.
config PCI_LAYERSCAPE
- bool "Freescale Layerscape PCIe controller (host mode)"
+ tristate "Freescale Layerscape PCIe controller (host mode)"
depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
depends on PCI_MSI
select PCIE_DW_HOST
@@ -309,7 +321,7 @@ config PCIE_QCOM
select CRC8
select PCIE_QCOM_COMMON
select PCI_HOST_COMMON
- select PCI_PWRCTRL_SLOT
+ select PCI_PWRCTRL_GENERIC
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
@@ -431,7 +443,7 @@ config PCIE_SPACEMIT_K1
depends on ARCH_SPACEMIT || COMPILE_TEST
depends on HAS_IOMEM
select PCIE_DW_HOST
- select PCI_PWRCTRL_SLOT
+ select PCI_PWRCTRL_GENERIC
default ARCH_SPACEMIT
help
Enables support for the DesignWare based PCIe controller in
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 67ba59c02038..7177451db8aa 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -5,7 +5,8 @@ obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o
-obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
+obj-$(CONFIG_PCIE_ANDES_QILAI) += pcie-andes-qilai.o
+obj-$(CONFIG_PCIE_ESWIN) += pcie-eswin.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index d5d26229063f..cd904659c321 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -378,10 +378,6 @@ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
- enum pci_barno bar;
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
- dw_pcie_ep_reset_bar(pci, bar);
dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
}
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index a5b8d0b71677..e35044cc5218 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -117,6 +117,8 @@ enum imx_pcie_variants {
#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
#define IMX_PCIE_FLAG_8GT_ECN_ERR051586 BIT(11)
#define IMX_PCIE_FLAG_SKIP_L23_READY BIT(12)
+/* Preserve MSI capability for platforms that require it */
+#define IMX_PCIE_FLAG_KEEP_MSI_CAP BIT(13)
#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
@@ -268,8 +270,8 @@ static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
IMX95_PCIE_PHY_CR_PARA_SEL);
regmap_update_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_PHY_GEN_CTRL,
- ext ? IMX95_PCIE_REF_USE_PAD : 0,
- IMX95_PCIE_REF_USE_PAD);
+ IMX95_PCIE_REF_USE_PAD,
+ ext ? IMX95_PCIE_REF_USE_PAD : 0);
regmap_update_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_REF_CLKEN,
ext ? 0 : IMX95_PCIE_REF_CLKEN);
@@ -901,27 +903,14 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
if (imx_pcie->drvdata->core_reset)
imx_pcie->drvdata->core_reset(imx_pcie, true);
-
- /* Some boards don't have PCIe reset GPIO. */
- gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1);
}
-static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
+static void imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
{
reset_control_deassert(imx_pcie->pciephy_reset);
if (imx_pcie->drvdata->core_reset)
imx_pcie->drvdata->core_reset(imx_pcie, false);
-
- /* Some boards don't have PCIe reset GPIO. */
- if (imx_pcie->reset_gpiod) {
- msleep(100);
- gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0);
- /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
- msleep(100);
- }
-
- return 0;
}
static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie)
@@ -1233,6 +1222,19 @@ static void imx_pcie_disable_device(struct pci_host_bridge *bridge,
imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev));
}
+static void imx_pcie_assert_perst(struct imx_pcie *imx_pcie, bool assert)
+{
+ if (assert) {
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1);
+ } else {
+ if (imx_pcie->reset_gpiod) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0);
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+ }
+ }
+}
+
static int imx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -1255,6 +1257,7 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
}
imx_pcie_assert_core_reset(imx_pcie);
+ imx_pcie_assert_perst(imx_pcie, true);
if (imx_pcie->drvdata->init_phy)
imx_pcie->drvdata->init_phy(imx_pcie);
@@ -1292,11 +1295,8 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
/* Make sure that PCIe LTSSM is cleared */
imx_pcie_ltssm_disable(dev);
- ret = imx_pcie_deassert_core_reset(imx_pcie);
- if (ret < 0) {
- dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
- goto err_phy_off;
- }
+ imx_pcie_deassert_core_reset(imx_pcie);
+ imx_pcie_assert_perst(imx_pcie, false);
if (imx_pcie->drvdata->wait_pll_lock) {
ret = imx_pcie->drvdata->wait_pll_lock(imx_pcie);
@@ -1401,15 +1401,6 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.stop_link = imx_pcie_stop_link,
};
-static void imx_pcie_ep_init(struct dw_pcie_ep *ep)
-{
- enum pci_barno bar;
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-
- for (bar = BAR_0; bar <= BAR_5; bar++)
- dw_pcie_ep_reset_bar(pci, bar);
-}
-
static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
unsigned int type, u16 interrupt_num)
{
@@ -1433,19 +1424,19 @@ static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features imx8m_pcie_epc_features = {
DWC_EPC_COMMON_FEATURES,
.msi_capable = true,
- .bar[BAR_1] = { .type = BAR_RESERVED, },
- .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_1] = { .type = BAR_DISABLED, },
+ .bar[BAR_3] = { .type = BAR_DISABLED, },
.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, },
- .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_DISABLED, },
.align = SZ_64K,
};
static const struct pci_epc_features imx8q_pcie_epc_features = {
DWC_EPC_COMMON_FEATURES,
.msi_capable = true,
- .bar[BAR_1] = { .type = BAR_RESERVED, },
- .bar[BAR_3] = { .type = BAR_RESERVED, },
- .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .bar[BAR_1] = { .type = BAR_DISABLED, },
+ .bar[BAR_3] = { .type = BAR_DISABLED, },
+ .bar[BAR_5] = { .type = BAR_DISABLED, },
.align = SZ_64K,
};
@@ -1478,7 +1469,6 @@ imx_pcie_ep_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .init = imx_pcie_ep_init,
.raise_irq = imx_pcie_ep_raise_irq,
.get_features = imx_pcie_ep_get_features,
};
@@ -1593,6 +1583,7 @@ static int imx_pcie_suspend_noirq(struct device *dev)
* clock which saves some power.
*/
imx_pcie_assert_core_reset(imx_pcie);
+ imx_pcie_assert_perst(imx_pcie, true);
imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
} else {
return dw_pcie_suspend_noirq(imx_pcie->pci);
@@ -1613,9 +1604,8 @@ static int imx_pcie_resume_noirq(struct device *dev)
ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
if (ret)
return ret;
- ret = imx_pcie_deassert_core_reset(imx_pcie);
- if (ret)
- return ret;
+ imx_pcie_deassert_core_reset(imx_pcie);
+ imx_pcie_assert_perst(imx_pcie, false);
/*
* Using PCIE_TEST_PD seems to disable MSI and powers down the
@@ -1647,7 +1637,6 @@ static int imx_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct imx_pcie *imx_pcie;
- struct device_node *np;
struct device_node *node = dev->of_node;
int i, ret, domain;
u16 val;
@@ -1674,7 +1663,8 @@ static int imx_pcie_probe(struct platform_device *pdev)
pci->pp.ops = &imx_pcie_host_dw_pme_ops;
/* Find the PHY if one is defined, only imx7d uses it */
- np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
+ struct device_node *np __free(device_node) =
+ of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
if (np) {
struct resource res;
@@ -1830,6 +1820,8 @@ static int imx_pcie_probe(struct platform_device *pdev)
} else {
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SKIP_L23_READY))
pci->pp.skip_l23_ready = true;
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_KEEP_MSI_CAP))
+ pci->pp.keep_rp_msi_en = true;
pci->pp.use_atu_msg = true;
ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
@@ -1853,6 +1845,7 @@ static void imx_pcie_shutdown(struct platform_device *pdev)
/* bring down link, so bootloader gets clean state in case of reboot */
imx_pcie_assert_core_reset(imx_pcie);
+ imx_pcie_assert_perst(imx_pcie, true);
}
static const struct imx_pcie_drvdata drvdata[] = {
@@ -1876,6 +1869,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
.variant = IMX6SX,
.flags = IMX_PCIE_FLAG_IMX_PHY |
IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
+ IMX_PCIE_FLAG_SKIP_L23_READY |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
.ltssm_off = IOMUXC_GPR12,
@@ -1907,6 +1901,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX7D] = {
.variant = IMX7D,
.flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_KEEP_MSI_CAP |
IMX_PCIE_FLAG_HAS_APP_RESET |
IMX_PCIE_FLAG_SKIP_L23_READY |
IMX_PCIE_FLAG_HAS_PHY_RESET,
@@ -1919,6 +1914,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX8MQ] = {
.variant = IMX8MQ,
.flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_KEEP_MSI_CAP |
IMX_PCIE_FLAG_HAS_PHY_RESET |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mq-iomuxc-gpr",
@@ -1933,6 +1929,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX8MM] = {
.variant = IMX8MM,
.flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_KEEP_MSI_CAP |
IMX_PCIE_FLAG_HAS_PHYDRV |
IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 20fa4dadb82a..278d2dba1db0 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -933,6 +933,18 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = {
DWC_EPC_COMMON_FEATURES,
.msi_capable = true,
.msix_capable = true,
+ /*
+ * TODO: This driver is the only DWC glue driver that had BAR_RESERVED
+ * BARs, but did not call dw_pcie_ep_reset_bar() for the reserved BARs.
+ *
+ * To not change the existing behavior, these BARs were not migrated to
+ * BAR_DISABLED. If this driver wants the BAR_RESERVED BARs to be
+ * disabled, it should migrate them to BAR_DISABLED.
+ *
+ * If they actually should be enabled, then the driver must also define
+ * what is behind these reserved BARs, see the definition of struct
+ * pci_epc_bar_rsvd_region.
+ */
.bar[BAR_0] = { .type = BAR_RESERVED, },
.bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_2] = { .type = BAR_RESIZABLE, },
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index a4a800699f89..8936975ff104 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -152,15 +152,11 @@ static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
struct dw_pcie_ep_func *ep_func;
- enum pci_barno bar;
ep_func = dw_pcie_ep_get_func_from_ep(ep, 0);
if (!ep_func)
return;
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
- dw_pcie_ep_reset_bar(pci, bar);
-
pcie->ls_epc->msi_capable = ep_func->msi_cap ? true : false;
pcie->ls_epc->msix_capable = ep_func->msix_cap ? true : false;
}
@@ -251,9 +247,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
pci->ops = pcie->drvdata->dw_pcie_ops;
ls_epc->bar[BAR_2].only_64bit = true;
- ls_epc->bar[BAR_3].type = BAR_RESERVED;
ls_epc->bar[BAR_4].only_64bit = true;
- ls_epc->bar[BAR_5].type = BAR_RESERVED;
ls_epc->linkup_notifier = true;
pcie->pci = pci;
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index a44b5c256d6e..14d6ac4fc53f 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -13,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/iopoll.h>
+#include <linux/module.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
@@ -403,8 +404,16 @@ static const struct dev_pm_ops ls_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(ls_pcie_suspend_noirq, ls_pcie_resume_noirq)
};
+static void ls_pcie_remove(struct platform_device *pdev)
+{
+ struct ls_pcie *pcie = platform_get_drvdata(pdev);
+
+ dw_pcie_host_deinit(&pcie->pci->pp);
+}
+
static struct platform_driver ls_pcie_driver = {
.probe = ls_pcie_probe,
+ .remove = ls_pcie_remove,
.driver = {
.name = "layerscape-pcie",
.of_match_table = ls_pcie_of_match,
@@ -412,4 +421,9 @@ static struct platform_driver ls_pcie_driver = {
.pm = &ls_pcie_pm_ops,
},
};
-builtin_platform_driver(ls_pcie_driver);
+module_platform_driver(ls_pcie_driver);
+
+MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@freescale.com>");
+MODULE_DESCRIPTION("Layerscape PCIe host controller driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c
index 3c6e837465bb..7e50e11fbffd 100644
--- a/drivers/pci/controller/dwc/pcie-amd-mdb.c
+++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c
@@ -389,7 +389,7 @@ static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
IRQF_NO_THREAD, NULL, pcie);
if (err) {
dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n",
- irq, err);
+ pcie->intx_irq, err);
return err;
}
diff --git a/drivers/pci/controller/dwc/pcie-andes-qilai.c b/drivers/pci/controller/dwc/pcie-andes-qilai.c
new file mode 100644
index 000000000000..bd1588be44e0
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-andes-qilai.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the PCIe Controller in QiLai from Andes
+ *
+ * Copyright (C) 2026 Andes Technology Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define PCIE_INTR_CONTROL1 0x15c
+#define PCIE_MSI_CTRL_INT_EN BIT(28)
+
+#define PCIE_LOGIC_COHERENCY_CONTROL3 0x8e8
+
+/*
+ * Refer to Table A4-5 (Memory type encoding) in the
+ * AMBA AXI and ACE Protocol Specification.
+ *
+ * The selected value corresponds to the Memory type field:
+ * "Write-back, Read and Write-allocate".
+ *
+ * The last three rows in the table A4-5 in
+ * AMBA AXI and ACE Protocol Specification:
+ * ARCACHE AWCACHE Memory type
+ * ------------------------------------------------------------------
+ * 1111 (0111) 0111 Write-back Read-allocate
+ * 1011 1111 (1011) Write-back Write-allocate
+ * 1111 1111 Write-back Read and Write-allocate (selected)
+ */
+#define IOCP_ARCACHE 0b1111
+#define IOCP_AWCACHE 0b1111
+
+#define PCIE_CFG_MSTR_ARCACHE_MODE GENMASK(6, 3)
+#define PCIE_CFG_MSTR_AWCACHE_MODE GENMASK(14, 11)
+#define PCIE_CFG_MSTR_ARCACHE_VALUE GENMASK(22, 19)
+#define PCIE_CFG_MSTR_AWCACHE_VALUE GENMASK(30, 27)
+
+#define PCIE_GEN_CONTROL2 0x54
+#define PCIE_CFG_LTSSM_EN BIT(0)
+
+#define PCIE_REGS_PCIE_SII_PM_STATE 0xc0
+#define SMLH_LINK_UP BIT(6)
+#define RDLH_LINK_UP BIT(7)
+
+struct qilai_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+};
+
+#define to_qilai_pcie(_pci) container_of(_pci, struct qilai_pcie, pci)
+
+static bool qilai_pcie_link_up(struct dw_pcie *pci)
+{
+ struct qilai_pcie *pcie = to_qilai_pcie(pci);
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_SII_PM_STATE);
+
+ return FIELD_GET(SMLH_LINK_UP, val) && FIELD_GET(RDLH_LINK_UP, val);
+}
+
+static int qilai_pcie_start_link(struct dw_pcie *pci)
+{
+ struct qilai_pcie *pcie = to_qilai_pcie(pci);
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_GEN_CONTROL2);
+ val |= PCIE_CFG_LTSSM_EN;
+ writel(val, pcie->apb_base + PCIE_GEN_CONTROL2);
+
+ return 0;
+}
+
+static const struct dw_pcie_ops qilai_pcie_ops = {
+ .link_up = qilai_pcie_link_up,
+ .start_link = qilai_pcie_start_link,
+};
+
+/*
+ * Set up the QiLai PCIe IOCP (IO Coherence Port) Read/Write Behaviors to the
+ * Write-Back, Read and Write Allocate mode.
+ *
+ * The IOCP HW target is SoC last-level cache (L2 Cache), which serves as the
+ * system cache. The IOCP HW helps maintain cache monitoring, ensuring that
+ * the device can snoop data from/to the cache.
+ */
+static void qilai_pcie_iocp_cache_setup(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u32 val;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_LOGIC_COHERENCY_CONTROL3);
+ FIELD_MODIFY(PCIE_CFG_MSTR_ARCACHE_MODE, &val, IOCP_ARCACHE);
+ FIELD_MODIFY(PCIE_CFG_MSTR_AWCACHE_MODE, &val, IOCP_AWCACHE);
+ FIELD_MODIFY(PCIE_CFG_MSTR_ARCACHE_VALUE, &val, IOCP_ARCACHE);
+ FIELD_MODIFY(PCIE_CFG_MSTR_AWCACHE_VALUE, &val, IOCP_AWCACHE);
+ dw_pcie_writel_dbi(pci, PCIE_LOGIC_COHERENCY_CONTROL3, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static void qilai_pcie_enable_msi(struct qilai_pcie *pcie)
+{
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_INTR_CONTROL1);
+ val |= PCIE_MSI_CTRL_INT_EN;
+ writel(val, pcie->apb_base + PCIE_INTR_CONTROL1);
+}
+
+static int qilai_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qilai_pcie *pcie = to_qilai_pcie(pci);
+
+ qilai_pcie_enable_msi(pcie);
+
+ return 0;
+}
+
+static void qilai_pcie_host_post_init(struct dw_pcie_rp *pp)
+{
+ qilai_pcie_iocp_cache_setup(pp);
+}
+
+static const struct dw_pcie_host_ops qilai_pcie_host_ops = {
+ .init = qilai_pcie_host_init,
+ .post_init = qilai_pcie_host_post_init,
+};
+
+static int qilai_pcie_probe(struct platform_device *pdev)
+{
+ struct qilai_pcie *pcie;
+ struct dw_pcie *pci;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pcie);
+
+ pci = &pcie->pci;
+ pcie->pci.dev = dev;
+ pcie->pci.ops = &qilai_pcie_ops;
+ pcie->pci.pp.ops = &qilai_pcie_host_ops;
+ pci->use_parent_dt_ranges = true;
+
+ dw_pcie_cap_set(&pcie->pci, REQ_RES);
+
+ pcie->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(pcie->apb_base))
+ return PTR_ERR(pcie->apb_base);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+
+ ret = dw_pcie_host_init(&pcie->pci.pp);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize PCIe host\n");
+
+ return 0;
+}
+
+static const struct of_device_id qilai_pcie_of_match[] = {
+ { .compatible = "andestech,qilai-pcie" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, qilai_pcie_of_match);
+
+static struct platform_driver qilai_pcie_driver = {
+ .probe = qilai_pcie_probe,
+ .driver = {
+ .name = "qilai-pcie",
+ .of_match_table = qilai_pcie_of_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+builtin_platform_driver(qilai_pcie_driver);
+
+MODULE_AUTHOR("Randolph Lin <randolph@andestech.com>");
+MODULE_DESCRIPTION("Andes QiLai PCIe driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index e994b75986c3..55cb957ae1f3 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -340,15 +340,11 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
- enum pci_barno bar;
artpec6_pcie_assert_core_reset(artpec6_pcie);
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
- dw_pcie_ep_reset_bar(pci, bar);
}
static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c
deleted file mode 100644
index 1340edc18d12..000000000000
--- a/drivers/pci/controller/dwc/pcie-bt1.c
+++ /dev/null
@@ -1,645 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
- *
- * Authors:
- * Vadim Vlasov <Vadim.Vlasov@baikalelectronics.ru>
- * Serge Semin <Sergey.Semin@baikalelectronics.ru>
- *
- * Baikal-T1 PCIe controller driver
- */
-
-#include <linux/bitfield.h>
-#include <linux/bits.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <linux/types.h>
-
-#include "pcie-designware.h"
-
-/* Baikal-T1 System CCU control registers */
-#define BT1_CCU_PCIE_CLKC 0x140
-#define BT1_CCU_PCIE_REQ_PCS_CLK BIT(16)
-#define BT1_CCU_PCIE_REQ_MAC_CLK BIT(17)
-#define BT1_CCU_PCIE_REQ_PIPE_CLK BIT(18)
-
-#define BT1_CCU_PCIE_RSTC 0x144
-#define BT1_CCU_PCIE_REQ_LINK_RST BIT(13)
-#define BT1_CCU_PCIE_REQ_SMLH_RST BIT(14)
-#define BT1_CCU_PCIE_REQ_PHY_RST BIT(16)
-#define BT1_CCU_PCIE_REQ_CORE_RST BIT(24)
-#define BT1_CCU_PCIE_REQ_STICKY_RST BIT(26)
-#define BT1_CCU_PCIE_REQ_NSTICKY_RST BIT(27)
-
-#define BT1_CCU_PCIE_PMSC 0x148
-#define BT1_CCU_PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
-#define BT1_CCU_PCIE_LTSSM_DET_QUIET 0x00
-#define BT1_CCU_PCIE_LTSSM_DET_ACT 0x01
-#define BT1_CCU_PCIE_LTSSM_POLL_ACT 0x02
-#define BT1_CCU_PCIE_LTSSM_POLL_COMP 0x03
-#define BT1_CCU_PCIE_LTSSM_POLL_CONF 0x04
-#define BT1_CCU_PCIE_LTSSM_PRE_DET_QUIET 0x05
-#define BT1_CCU_PCIE_LTSSM_DET_WAIT 0x06
-#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_START 0x07
-#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_ACEPT 0x08
-#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_WAIT 0x09
-#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_ACEPT 0x0a
-#define BT1_CCU_PCIE_LTSSM_CFG_COMPLETE 0x0b
-#define BT1_CCU_PCIE_LTSSM_CFG_IDLE 0x0c
-#define BT1_CCU_PCIE_LTSSM_RCVR_LOCK 0x0d
-#define BT1_CCU_PCIE_LTSSM_RCVR_SPEED 0x0e
-#define BT1_CCU_PCIE_LTSSM_RCVR_RCVRCFG 0x0f
-#define BT1_CCU_PCIE_LTSSM_RCVR_IDLE 0x10
-#define BT1_CCU_PCIE_LTSSM_L0 0x11
-#define BT1_CCU_PCIE_LTSSM_L0S 0x12
-#define BT1_CCU_PCIE_LTSSM_L123_SEND_IDLE 0x13
-#define BT1_CCU_PCIE_LTSSM_L1_IDLE 0x14
-#define BT1_CCU_PCIE_LTSSM_L2_IDLE 0x15
-#define BT1_CCU_PCIE_LTSSM_L2_WAKE 0x16
-#define BT1_CCU_PCIE_LTSSM_DIS_ENTRY 0x17
-#define BT1_CCU_PCIE_LTSSM_DIS_IDLE 0x18
-#define BT1_CCU_PCIE_LTSSM_DISABLE 0x19
-#define BT1_CCU_PCIE_LTSSM_LPBK_ENTRY 0x1a
-#define BT1_CCU_PCIE_LTSSM_LPBK_ACTIVE 0x1b
-#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT 0x1c
-#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT_TOUT 0x1d
-#define BT1_CCU_PCIE_LTSSM_HOT_RST_ENTRY 0x1e
-#define BT1_CCU_PCIE_LTSSM_HOT_RST 0x1f
-#define BT1_CCU_PCIE_LTSSM_RCVR_EQ0 0x20
-#define BT1_CCU_PCIE_LTSSM_RCVR_EQ1 0x21
-#define BT1_CCU_PCIE_LTSSM_RCVR_EQ2 0x22
-#define BT1_CCU_PCIE_LTSSM_RCVR_EQ3 0x23
-#define BT1_CCU_PCIE_SMLH_LINKUP BIT(6)
-#define BT1_CCU_PCIE_RDLH_LINKUP BIT(7)
-#define BT1_CCU_PCIE_PM_LINKSTATE_L0S BIT(8)
-#define BT1_CCU_PCIE_PM_LINKSTATE_L1 BIT(9)
-#define BT1_CCU_PCIE_PM_LINKSTATE_L2 BIT(10)
-#define BT1_CCU_PCIE_L1_PENDING BIT(12)
-#define BT1_CCU_PCIE_REQ_EXIT_L1 BIT(14)
-#define BT1_CCU_PCIE_LTSSM_RCVR_EQ BIT(15)
-#define BT1_CCU_PCIE_PM_DSTAT_MASK GENMASK(18, 16)
-#define BT1_CCU_PCIE_PM_PME_EN BIT(20)
-#define BT1_CCU_PCIE_PM_PME_STATUS BIT(21)
-#define BT1_CCU_PCIE_AUX_PM_EN BIT(22)
-#define BT1_CCU_PCIE_AUX_PWR_DET BIT(23)
-#define BT1_CCU_PCIE_WAKE_DET BIT(24)
-#define BT1_CCU_PCIE_TURNOFF_REQ BIT(30)
-#define BT1_CCU_PCIE_TURNOFF_ACK BIT(31)
-
-#define BT1_CCU_PCIE_GENC 0x14c
-#define BT1_CCU_PCIE_LTSSM_EN BIT(1)
-#define BT1_CCU_PCIE_DBI2_MODE BIT(2)
-#define BT1_CCU_PCIE_MGMT_EN BIT(3)
-#define BT1_CCU_PCIE_RXLANE_FLIP_EN BIT(16)
-#define BT1_CCU_PCIE_TXLANE_FLIP_EN BIT(17)
-#define BT1_CCU_PCIE_SLV_XFER_PEND BIT(24)
-#define BT1_CCU_PCIE_RCV_XFER_PEND BIT(25)
-#define BT1_CCU_PCIE_DBI_XFER_PEND BIT(26)
-#define BT1_CCU_PCIE_DMA_XFER_PEND BIT(27)
-
-#define BT1_CCU_PCIE_LTSSM_LINKUP(_pmsc) \
-({ \
- int __state = FIELD_GET(BT1_CCU_PCIE_LTSSM_STATE_MASK, _pmsc); \
- __state >= BT1_CCU_PCIE_LTSSM_L0 && __state <= BT1_CCU_PCIE_LTSSM_L2_WAKE; \
-})
-
-/* Baikal-T1 PCIe specific control registers */
-#define BT1_PCIE_AXI2MGM_LANENUM 0xd04
-#define BT1_PCIE_AXI2MGM_LANESEL_MASK GENMASK(3, 0)
-
-#define BT1_PCIE_AXI2MGM_ADDRCTL 0xd08
-#define BT1_PCIE_AXI2MGM_PHYREG_ADDR_MASK GENMASK(20, 0)
-#define BT1_PCIE_AXI2MGM_READ_FLAG BIT(29)
-#define BT1_PCIE_AXI2MGM_DONE BIT(30)
-#define BT1_PCIE_AXI2MGM_BUSY BIT(31)
-
-#define BT1_PCIE_AXI2MGM_WRITEDATA 0xd0c
-#define BT1_PCIE_AXI2MGM_WDATA GENMASK(15, 0)
-
-#define BT1_PCIE_AXI2MGM_READDATA 0xd10
-#define BT1_PCIE_AXI2MGM_RDATA GENMASK(15, 0)
-
-/* Generic Baikal-T1 PCIe interface resources */
-#define BT1_PCIE_NUM_APP_CLKS ARRAY_SIZE(bt1_pcie_app_clks)
-#define BT1_PCIE_NUM_CORE_CLKS ARRAY_SIZE(bt1_pcie_core_clks)
-#define BT1_PCIE_NUM_APP_RSTS ARRAY_SIZE(bt1_pcie_app_rsts)
-#define BT1_PCIE_NUM_CORE_RSTS ARRAY_SIZE(bt1_pcie_core_rsts)
-
-/* PCIe bus setup delays and timeouts */
-#define BT1_PCIE_RST_DELAY_MS 100
-#define BT1_PCIE_RUN_DELAY_US 100
-#define BT1_PCIE_REQ_DELAY_US 1
-#define BT1_PCIE_REQ_TIMEOUT_US 1000
-#define BT1_PCIE_LNK_DELAY_US 1000
-#define BT1_PCIE_LNK_TIMEOUT_US 1000000
-
-static const enum dw_pcie_app_clk bt1_pcie_app_clks[] = {
- DW_PCIE_DBI_CLK, DW_PCIE_MSTR_CLK, DW_PCIE_SLV_CLK,
-};
-
-static const enum dw_pcie_core_clk bt1_pcie_core_clks[] = {
- DW_PCIE_REF_CLK,
-};
-
-static const enum dw_pcie_app_rst bt1_pcie_app_rsts[] = {
- DW_PCIE_MSTR_RST, DW_PCIE_SLV_RST,
-};
-
-static const enum dw_pcie_core_rst bt1_pcie_core_rsts[] = {
- DW_PCIE_NON_STICKY_RST, DW_PCIE_STICKY_RST, DW_PCIE_CORE_RST,
- DW_PCIE_PIPE_RST, DW_PCIE_PHY_RST, DW_PCIE_HOT_RST, DW_PCIE_PWR_RST,
-};
-
-struct bt1_pcie {
- struct dw_pcie dw;
- struct platform_device *pdev;
- struct regmap *sys_regs;
-};
-#define to_bt1_pcie(_dw) container_of(_dw, struct bt1_pcie, dw)
-
-/*
- * Baikal-T1 MMIO space must be read/written by the dword-aligned
- * instructions. Note the methods are optimized to have the dword operations
- * performed with minimum overhead as the most frequently used ones.
- */
-static int bt1_pcie_read_mmio(void __iomem *addr, int size, u32 *val)
-{
- unsigned int ofs = (uintptr_t)addr & 0x3;
-
- if (!IS_ALIGNED((uintptr_t)addr, size))
- return -EINVAL;
-
- *val = readl(addr - ofs) >> ofs * BITS_PER_BYTE;
- if (size == 4) {
- return 0;
- } else if (size == 2) {
- *val &= 0xffff;
- return 0;
- } else if (size == 1) {
- *val &= 0xff;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int bt1_pcie_write_mmio(void __iomem *addr, int size, u32 val)
-{
- unsigned int ofs = (uintptr_t)addr & 0x3;
- u32 tmp, mask;
-
- if (!IS_ALIGNED((uintptr_t)addr, size))
- return -EINVAL;
-
- if (size == 4) {
- writel(val, addr);
- return 0;
- } else if (size == 2 || size == 1) {
- mask = GENMASK(size * BITS_PER_BYTE - 1, 0);
- tmp = readl(addr - ofs) & ~(mask << ofs * BITS_PER_BYTE);
- tmp |= (val & mask) << ofs * BITS_PER_BYTE;
- writel(tmp, addr - ofs);
- return 0;
- }
-
- return -EINVAL;
-}
-
-static u32 bt1_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size)
-{
- int ret;
- u32 val;
-
- ret = bt1_pcie_read_mmio(base + reg, size, &val);
- if (ret) {
- dev_err(pci->dev, "Read DBI address failed\n");
- return ~0U;
- }
-
- return val;
-}
-
-static void bt1_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val)
-{
- int ret;
-
- ret = bt1_pcie_write_mmio(base + reg, size, val);
- if (ret)
- dev_err(pci->dev, "Write DBI address failed\n");
-}
-
-static void bt1_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val)
-{
- struct bt1_pcie *btpci = to_bt1_pcie(pci);
- int ret;
-
- regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
- BT1_CCU_PCIE_DBI2_MODE, BT1_CCU_PCIE_DBI2_MODE);
-
- ret = bt1_pcie_write_mmio(base + reg, size, val);
- if (ret)
- dev_err(pci->dev, "Write DBI2 address failed\n");
-
- regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
- BT1_CCU_PCIE_DBI2_MODE, 0);
-}
-
-static int bt1_pcie_start_link(struct dw_pcie *pci)
-{
- struct bt1_pcie *btpci = to_bt1_pcie(pci);
- u32 val;
- int ret;
-
- /*
- * Enable LTSSM and make sure it was able to establish both PHY and
- * data links. This procedure shall work fine to reach 2.5 GT/s speed.
- */
- regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
- BT1_CCU_PCIE_LTSSM_EN, BT1_CCU_PCIE_LTSSM_EN);
-
- ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
- (val & BT1_CCU_PCIE_SMLH_LINKUP),
- BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
- if (ret) {
- dev_err(pci->dev, "LTSSM failed to set PHY link up\n");
- return ret;
- }
-
- ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
- (val & BT1_CCU_PCIE_RDLH_LINKUP),
- BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
- if (ret) {
- dev_err(pci->dev, "LTSSM failed to set data link up\n");
- return ret;
- }
-
- /*
- * Activate direct speed change after the link is established in an
- * attempt to reach a higher bus performance (up to Gen.3 - 8.0 GT/s).
- * This is required at least to get 8.0 GT/s speed.
- */
- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- val |= PORT_LOGIC_SPEED_CHANGE;
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
-
- ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
- BT1_CCU_PCIE_LTSSM_LINKUP(val),
- BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
- if (ret)
- dev_err(pci->dev, "LTSSM failed to get into L0 state\n");
-
- return ret;
-}
-
-static void bt1_pcie_stop_link(struct dw_pcie *pci)
-{
- struct bt1_pcie *btpci = to_bt1_pcie(pci);
-
- regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
- BT1_CCU_PCIE_LTSSM_EN, 0);
-}
-
-static const struct dw_pcie_ops bt1_pcie_ops = {
- .read_dbi = bt1_pcie_read_dbi,
- .write_dbi = bt1_pcie_write_dbi,
- .write_dbi2 = bt1_pcie_write_dbi2,
- .start_link = bt1_pcie_start_link,
- .stop_link = bt1_pcie_stop_link,
-};
-
-static struct pci_ops bt1_pci_ops = {
- .map_bus = dw_pcie_own_conf_map_bus,
- .read = pci_generic_config_read32,
- .write = pci_generic_config_write32,
-};
-
-static int bt1_pcie_get_resources(struct bt1_pcie *btpci)
-{
- struct device *dev = btpci->dw.dev;
- int i;
-
- /* DBI access is supposed to be performed by the dword-aligned IOs */
- btpci->dw.pp.bridge->ops = &bt1_pci_ops;
-
- /* These CSRs are in MMIO so we won't check the regmap-methods status */
- btpci->sys_regs =
- syscon_regmap_lookup_by_phandle(dev->of_node, "baikal,bt1-syscon");
- if (IS_ERR(btpci->sys_regs))
- return dev_err_probe(dev, PTR_ERR(btpci->sys_regs),
- "Failed to get syscon\n");
-
- /* Make sure all the required resources have been specified */
- for (i = 0; i < BT1_PCIE_NUM_APP_CLKS; i++) {
- if (!btpci->dw.app_clks[bt1_pcie_app_clks[i]].clk) {
- dev_err(dev, "App clocks set is incomplete\n");
- return -ENOENT;
- }
- }
-
- for (i = 0; i < BT1_PCIE_NUM_CORE_CLKS; i++) {
- if (!btpci->dw.core_clks[bt1_pcie_core_clks[i]].clk) {
- dev_err(dev, "Core clocks set is incomplete\n");
- return -ENOENT;
- }
- }
-
- for (i = 0; i < BT1_PCIE_NUM_APP_RSTS; i++) {
- if (!btpci->dw.app_rsts[bt1_pcie_app_rsts[i]].rstc) {
- dev_err(dev, "App resets set is incomplete\n");
- return -ENOENT;
- }
- }
-
- for (i = 0; i < BT1_PCIE_NUM_CORE_RSTS; i++) {
- if (!btpci->dw.core_rsts[bt1_pcie_core_rsts[i]].rstc) {
- dev_err(dev, "Core resets set is incomplete\n");
- return -ENOENT;
- }
- }
-
- return 0;
-}
-
-static void bt1_pcie_full_stop_bus(struct bt1_pcie *btpci, bool init)
-{
- struct device *dev = btpci->dw.dev;
- struct dw_pcie *pci = &btpci->dw;
- int ret;
-
- /* Disable LTSSM for sure */
- regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
- BT1_CCU_PCIE_LTSSM_EN, 0);
-
- /*
- * Application reset controls are trigger-based so assert the core
- * resets only.
- */
- ret = reset_control_bulk_assert(DW_PCIE_NUM_CORE_RSTS, pci->core_rsts);
- if (ret)
- dev_err(dev, "Failed to assert core resets\n");
-
- /*
- * Clocks are disabled by default at least in accordance with the clk
- * enable counter value on init stage.
- */
- if (!init) {
- clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
-
- clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
- }
-
- /* The peripheral devices are unavailable anyway so reset them too */
- gpiod_set_value_cansleep(pci->pe_rst, 1);
-
- /* Make sure all the resets are settled */
- msleep(BT1_PCIE_RST_DELAY_MS);
-}
-
-/*
- * Implements the cold reset procedure in accordance with the reference manual
- * and available PM signals.
- */
-static int bt1_pcie_cold_start_bus(struct bt1_pcie *btpci)
-{
- struct device *dev = btpci->dw.dev;
- struct dw_pcie *pci = &btpci->dw;
- u32 val;
- int ret;
-
- /* First get out of the Power/Hot reset state */
- ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
- if (ret) {
- dev_err(dev, "Failed to deassert PHY reset\n");
- return ret;
- }
-
- ret = reset_control_deassert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
- if (ret) {
- dev_err(dev, "Failed to deassert hot reset\n");
- goto err_assert_pwr_rst;
- }
-
- /* Wait for the PM-core to stop requesting the PHY reset */
- ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
- !(val & BT1_CCU_PCIE_REQ_PHY_RST),
- BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
- if (ret) {
- dev_err(dev, "Timed out waiting for PM to stop PHY resetting\n");
- goto err_assert_hot_rst;
- }
-
- ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
- if (ret) {
- dev_err(dev, "Failed to deassert PHY reset\n");
- goto err_assert_hot_rst;
- }
-
- /* Clocks can be now enabled, but the ref one is crucial at this stage */
- ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
- if (ret) {
- dev_err(dev, "Failed to enable app clocks\n");
- goto err_assert_phy_rst;
- }
-
- ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
- if (ret) {
- dev_err(dev, "Failed to enable ref clocks\n");
- goto err_disable_app_clk;
- }
-
- /* Wait for the PM to stop requesting the controller core reset */
- ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
- !(val & BT1_CCU_PCIE_REQ_CORE_RST),
- BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
- if (ret) {
- dev_err(dev, "Timed out waiting for PM to stop core resetting\n");
- goto err_disable_core_clk;
- }
-
- /* PCS-PIPE interface and controller core can be now activated */
- ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
- if (ret) {
- dev_err(dev, "Failed to deassert PIPE reset\n");
- goto err_disable_core_clk;
- }
-
- ret = reset_control_deassert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
- if (ret) {
- dev_err(dev, "Failed to deassert core reset\n");
- goto err_assert_pipe_rst;
- }
-
- /* It's recommended to reset the core and application logic together */
- ret = reset_control_bulk_reset(DW_PCIE_NUM_APP_RSTS, pci->app_rsts);
- if (ret) {
- dev_err(dev, "Failed to reset app domain\n");
- goto err_assert_core_rst;
- }
-
- /* Sticky/Non-sticky CSR flags can be now unreset too */
- ret = reset_control_deassert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
- if (ret) {
- dev_err(dev, "Failed to deassert sticky reset\n");
- goto err_assert_core_rst;
- }
-
- ret = reset_control_deassert(pci->core_rsts[DW_PCIE_NON_STICKY_RST].rstc);
- if (ret) {
- dev_err(dev, "Failed to deassert non-sticky reset\n");
- goto err_assert_sticky_rst;
- }
-
- /* Activate the PCIe bus peripheral devices */
- gpiod_set_value_cansleep(pci->pe_rst, 0);
-
- /* Make sure the state is settled (LTSSM is still disabled though) */
- usleep_range(BT1_PCIE_RUN_DELAY_US, BT1_PCIE_RUN_DELAY_US + 100);
-
- return 0;
-
-err_assert_sticky_rst:
- reset_control_assert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
-
-err_assert_core_rst:
- reset_control_assert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
-
-err_assert_pipe_rst:
- reset_control_assert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
-
-err_disable_core_clk:
- clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
-
-err_disable_app_clk:
- clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
-
-err_assert_phy_rst:
- reset_control_assert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
-
-err_assert_hot_rst:
- reset_control_assert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
-
-err_assert_pwr_rst:
- reset_control_assert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
-
- return ret;
-}
-
-static int bt1_pcie_host_init(struct dw_pcie_rp *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct bt1_pcie *btpci = to_bt1_pcie(pci);
- int ret;
-
- ret = bt1_pcie_get_resources(btpci);
- if (ret)
- return ret;
-
- bt1_pcie_full_stop_bus(btpci, true);
-
- return bt1_pcie_cold_start_bus(btpci);
-}
-
-static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct bt1_pcie *btpci = to_bt1_pcie(pci);
-
- bt1_pcie_full_stop_bus(btpci, false);
-}
-
-static const struct dw_pcie_host_ops bt1_pcie_host_ops = {
- .init = bt1_pcie_host_init,
- .deinit = bt1_pcie_host_deinit,
-};
-
-static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev)
-{
- struct bt1_pcie *btpci;
-
- btpci = devm_kzalloc(&pdev->dev, sizeof(*btpci), GFP_KERNEL);
- if (!btpci)
- return ERR_PTR(-ENOMEM);
-
- btpci->pdev = pdev;
-
- platform_set_drvdata(pdev, btpci);
-
- return btpci;
-}
-
-static int bt1_pcie_add_port(struct bt1_pcie *btpci)
-{
- struct device *dev = &btpci->pdev->dev;
- int ret;
-
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (ret)
- return ret;
-
- btpci->dw.version = DW_PCIE_VER_460A;
- btpci->dw.dev = dev;
- btpci->dw.ops = &bt1_pcie_ops;
-
- btpci->dw.pp.num_vectors = MAX_MSI_IRQS;
- btpci->dw.pp.ops = &bt1_pcie_host_ops;
-
- dw_pcie_cap_set(&btpci->dw, REQ_RES);
-
- ret = dw_pcie_host_init(&btpci->dw.pp);
-
- return dev_err_probe(dev, ret, "Failed to initialize DWC PCIe host\n");
-}
-
-static void bt1_pcie_del_port(struct bt1_pcie *btpci)
-{
- dw_pcie_host_deinit(&btpci->dw.pp);
-}
-
-static int bt1_pcie_probe(struct platform_device *pdev)
-{
- struct bt1_pcie *btpci;
-
- btpci = bt1_pcie_create_data(pdev);
- if (IS_ERR(btpci))
- return PTR_ERR(btpci);
-
- return bt1_pcie_add_port(btpci);
-}
-
-static void bt1_pcie_remove(struct platform_device *pdev)
-{
- struct bt1_pcie *btpci = platform_get_drvdata(pdev);
-
- bt1_pcie_del_port(btpci);
-}
-
-static const struct of_device_id bt1_pcie_of_match[] = {
- { .compatible = "baikal,bt1-pcie" },
- {},
-};
-MODULE_DEVICE_TABLE(of, bt1_pcie_of_match);
-
-static struct platform_driver bt1_pcie_driver = {
- .probe = bt1_pcie_probe,
- .remove = bt1_pcie_remove,
- .driver = {
- .name = "bt1-pcie",
- .of_match_table = bt1_pcie_of_match,
- },
-};
-module_platform_driver(bt1_pcie_driver);
-
-MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
-MODULE_DESCRIPTION("Baikal-T1 PCIe driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
index 0d1340c9b364..d0884253be97 100644
--- a/drivers/pci/controller/dwc/pcie-designware-debugfs.c
+++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
@@ -131,13 +131,16 @@ static const u32 err_inj_type_mask[] = {
* supported in DWC RAS DES
* @name: Name of the error counter
* @group_no: Group number that the event belongs to. The value can range
- * from 0 to 4
+ * from 0 to 7
* @event_no: Event number of the particular event. The value ranges are:
* Group 0: 0 - 10
* Group 1: 5 - 13
* Group 2: 0 - 7
* Group 3: 0 - 5
* Group 4: 0 - 1
+ * Group 5: 0 - 13
+ * Group 6: 0 - 6
+ * Group 7: 0 - 25
*/
struct dwc_pcie_event_counter {
const char *name;
@@ -181,6 +184,53 @@ static const struct dwc_pcie_event_counter event_list[] = {
{"completion_timeout", 0x3, 0x5},
{"ebuf_skp_add", 0x4, 0x0},
{"ebuf_skp_del", 0x4, 0x1},
+ {"l0_to_recovery_entry", 0x5, 0x0},
+ {"l1_to_recovery_entry", 0x5, 0x1},
+ {"tx_l0s_entry", 0x5, 0x2},
+ {"rx_l0s_entry", 0x5, 0x3},
+ {"aspm_l1_reject", 0x5, 0x4},
+ {"l1_entry", 0x5, 0x5},
+ {"l1_cpm", 0x5, 0x6},
+ {"l1.1_entry", 0x5, 0x7},
+ {"l1.2_entry", 0x5, 0x8},
+ {"l1_short_duration", 0x5, 0x9},
+ {"l1.2_abort", 0x5, 0xa},
+ {"l2_entry", 0x5, 0xb},
+ {"speed_change", 0x5, 0xc},
+ {"link_width_change", 0x5, 0xd},
+ {"tx_ack_dllp", 0x6, 0x0},
+ {"tx_update_fc_dllp", 0x6, 0x1},
+ {"rx_ack_dllp", 0x6, 0x2},
+ {"rx_update_fc_dllp", 0x6, 0x3},
+ {"rx_nullified_tlp", 0x6, 0x4},
+ {"tx_nullified_tlp", 0x6, 0x5},
+ {"rx_duplicate_tlp", 0x6, 0x6},
+ {"tx_memory_write", 0x7, 0x0},
+ {"tx_memory_read", 0x7, 0x1},
+ {"tx_configuration_write", 0x7, 0x2},
+ {"tx_configuration_read", 0x7, 0x3},
+ {"tx_io_write", 0x7, 0x4},
+ {"tx_io_read", 0x7, 0x5},
+ {"tx_completion_without_data", 0x7, 0x6},
+ {"tx_completion_w_data", 0x7, 0x7},
+ {"tx_message_tlp_pcie_vc_only", 0x7, 0x8},
+ {"tx_atomic", 0x7, 0x9},
+ {"tx_tlp_with_prefix", 0x7, 0xa},
+ {"rx_memory_write", 0x7, 0xb},
+ {"rx_memory_read", 0x7, 0xc},
+ {"rx_configuration_write", 0x7, 0xd},
+ {"rx_configuration_read", 0x7, 0xe},
+ {"rx_io_write", 0x7, 0xf},
+ {"rx_io_read", 0x7, 0x10},
+ {"rx_completion_without_data", 0x7, 0x11},
+ {"rx_completion_w_data", 0x7, 0x12},
+ {"rx_message_tlp_pcie_vc_only", 0x7, 0x13},
+ {"rx_atomic", 0x7, 0x14},
+ {"rx_tlp_with_prefix", 0x7, 0x15},
+ {"tx_ccix_tlp", 0x7, 0x16},
+ {"rx_ccix_tlp", 0x7, 0x17},
+ {"tx_deferrable_memory_write_tlp", 0x7, 0x18},
+ {"rx_deferrable_memory_write_tlp", 0x7, 0x19},
};
static ssize_t lane_detect_read(struct file *file, char __user *buf,
@@ -208,10 +258,11 @@ static ssize_t lane_detect_write(struct file *file, const char __user *buf,
struct dw_pcie *pci = file->private_data;
struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
u32 lane, val;
+ int ret;
- val = kstrtou32_from_user(buf, count, 0, &lane);
- if (val)
- return val;
+ ret = kstrtou32_from_user(buf, count, 0, &lane);
+ if (ret)
+ return ret;
val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
val &= ~(LANE_SELECT);
@@ -347,10 +398,11 @@ static ssize_t counter_enable_write(struct file *file, const char __user *buf,
struct dw_pcie *pci = pdata->pci;
struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
u32 val, enable;
+ int ret;
- val = kstrtou32_from_user(buf, count, 0, &enable);
- if (val)
- return val;
+ ret = kstrtou32_from_user(buf, count, 0, &enable);
+ if (ret)
+ return ret;
mutex_lock(&rinfo->reg_event_lock);
set_event_number(pdata, pci, rinfo);
@@ -408,10 +460,11 @@ static ssize_t counter_lane_write(struct file *file, const char __user *buf,
struct dw_pcie *pci = pdata->pci;
struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
u32 val, lane;
+ int ret;
- val = kstrtou32_from_user(buf, count, 0, &lane);
- if (val)
- return val;
+ ret = kstrtou32_from_user(buf, count, 0, &lane);
+ if (ret)
+ return ret;
mutex_lock(&rinfo->reg_event_lock);
set_event_number(pdata, pci, rinfo);
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index c57ae4d6c5c0..d4dc3b24da60 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -754,7 +754,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
val |= nr_irqs - 1; /* encoded as N-1 */
- dw_pcie_writew_dbi(pci, reg, val);
+ dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
reg = ep_func->msix_cap + PCI_MSIX_TABLE;
val = offset | bir;
@@ -1110,7 +1110,8 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
{
struct dw_pcie_ep *ep = &pci->ep;
u8 funcs = ep->epc->max_functions;
- u8 func_no;
+ u32 func0_lnkcap, lnkcap;
+ u8 func_no, offset;
dw_pcie_dbi_ro_wr_en(pci);
@@ -1118,9 +1119,57 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
dw_pcie_ep_init_rebar_registers(ep, func_no);
dw_pcie_setup(pci);
+
+ /*
+ * PCIe r7.0, section 7.5.3.6 states that for multi-function
+ * endpoints, max link width and speed fields must report same
+ * values for all functions. However, dw_pcie_setup() programs
+ * these fields only for function 0. Hence, mirror these fields
+ * to all other functions as well.
+ */
+ if (funcs > 1) {
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ func0_lnkcap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ func0_lnkcap = FIELD_GET(PCI_EXP_LNKCAP_MLW |
+ PCI_EXP_LNKCAP_SLS, func0_lnkcap);
+
+ for (func_no = 1; func_no < funcs; func_no++) {
+ offset = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_EXP);
+ lnkcap = dw_pcie_ep_readl_dbi(ep, func_no,
+ offset + PCI_EXP_LNKCAP);
+ FIELD_MODIFY(PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS,
+ &lnkcap, func0_lnkcap);
+ dw_pcie_ep_writel_dbi(ep, func_no,
+ offset + PCI_EXP_LNKCAP, lnkcap);
+ }
+ }
+
dw_pcie_dbi_ro_wr_dis(pci);
}
+static void dw_pcie_ep_disable_bars(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_epc_bar_type bar_type;
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ bar_type = dw_pcie_ep_get_bar_type(ep, bar);
+
+ /*
+ * Reserved BARs should not get disabled by default. All other
+ * BAR types are disabled by default.
+ *
+ * This is in line with the current EPC core design, where all
+ * BARs are disabled by default, and then the EPF driver enables
+ * the BARs it wishes to use.
+ */
+ if (bar_type != BAR_RESERVED)
+ dw_pcie_ep_reset_bar(pci, bar);
+ }
+}
+
/**
* dw_pcie_ep_init_registers - Initialize DWC EP specific registers
* @ep: DWC EP device
@@ -1203,6 +1252,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
if (ep->ops->init)
ep->ops->init(ep);
+ dw_pcie_ep_disable_bars(ep);
+
/*
* PCIe r6.0, section 7.9.15 states that for endpoints that support
* PTM, this capability structure is required in exactly one
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 6ae6189e9b8a..c9517a348836 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -1081,7 +1081,7 @@ static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed sp
static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed];
+ enum pci_bus_speed speed = pcie_get_link_speed(pci->max_link_speed);
/*
* Lane equalization settings need to be applied for all data rates the
@@ -1171,7 +1171,7 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
* the MSI and MSI-X capabilities of the Root Port to allow the drivers
* to fall back to INTx instead.
*/
- if (pp->use_imsi_rx) {
+ if (pp->use_imsi_rx && !pp->keep_rp_msi_en) {
dw_pcie_remove_capability(pci, PCI_CAP_ID_MSI);
dw_pcie_remove_capability(pci, PCI_CAP_ID_MSIX);
}
@@ -1256,9 +1256,13 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
PCIE_PME_TO_L2_TIMEOUT_US/10,
PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
if (ret) {
- /* Only log message when LTSSM isn't in DETECT or POLL */
- dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
- return ret;
+ /*
+ * Failure is non-fatal since spec r7.0, sec 5.3.3.2.1,
+ * recommends proceeding with L2/L3 sequence even if one or more
+ * devices do not respond with PME_TO_Ack after 10ms timeout.
+ */
+ dev_warn(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
+ ret = 0;
}
/*
@@ -1300,15 +1304,24 @@ int dw_pcie_resume_noirq(struct dw_pcie *pci)
ret = dw_pcie_start_link(pci);
if (ret)
- return ret;
+ goto err_deinit;
ret = dw_pcie_wait_for_link(pci);
- if (ret)
- return ret;
+ if (ret == -ETIMEDOUT)
+ goto err_stop_link;
if (pci->pp.ops->post_init)
pci->pp.ops->post_init(&pci->pp);
+ return 0;
+
+err_stop_link:
+ dw_pcie_stop_link(pci);
+
+err_deinit:
+ if (pci->pp.ops->deinit)
+ pci->pp.ops->deinit(&pci->pp);
+
return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 8530746ec5cb..d103ab759c4e 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -32,15 +32,6 @@ struct dw_plat_pcie_of_data {
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
};
-static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
-{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- enum pci_barno bar;
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
- dw_pcie_ep_reset_bar(pci, bar);
-}
-
static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
unsigned int type, u16 interrupt_num)
{
@@ -73,7 +64,6 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .init = dw_plat_pcie_ep_init,
.raise_irq = dw_plat_pcie_ep_raise_irq,
.get_features = dw_plat_pcie_get_features,
};
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 5741c09dde7f..c11cf61b8319 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -487,13 +487,13 @@ static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg
static inline u32 dw_pcie_enable_ecrc(u32 val)
{
/*
- * DesignWare core version 4.90A has a design issue where the 'TD'
- * bit in the Control register-1 of the ATU outbound region acts
- * like an override for the ECRC setting, i.e., the presence of TLP
- * Digest (ECRC) in the outgoing TLPs is solely determined by this
- * bit. This is contrary to the PCIe spec which says that the
- * enablement of the ECRC is solely determined by the AER
- * registers.
+ * DWC versions 0x3530302a and 0x3536322a have a design issue where
+ * the 'TD' bit in the Control register-1 of the ATU outbound
+ * region acts like an override for the ECRC setting, i.e., the
+ * presence of TLP Digest (ECRC) in the outgoing TLPs is solely
+ * determined by this bit. This is contrary to the PCIe spec which
+ * says that the enablement of the ECRC is solely determined by the
+ * AER registers.
*
* Because of this, even when the ECRC is enabled through AER
* registers, the transactions going through ATU won't have TLP
@@ -563,7 +563,7 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) &&
dw_pcie_ver_is_ge(pci, 460A))
val |= PCIE_ATU_INCREASE_REGION_SIZE;
- if (dw_pcie_ver_is(pci, 490A))
+ if (dw_pcie_ver_is(pci, 490A) || dw_pcie_ver_is(pci, 500A))
val = dw_pcie_enable_ecrc(val);
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
@@ -861,7 +861,7 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
- switch (pcie_link_speed[pci->max_link_speed]) {
+ switch (pcie_get_link_speed(pci->max_link_speed)) {
case PCIE_SPEED_2_5GT:
link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
break;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index ae6389dd9caa..3e69ef60165b 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -34,8 +34,10 @@
#define DW_PCIE_VER_470A 0x3437302a
#define DW_PCIE_VER_480A 0x3438302a
#define DW_PCIE_VER_490A 0x3439302a
+#define DW_PCIE_VER_500A 0x3530302a
#define DW_PCIE_VER_520A 0x3532302a
#define DW_PCIE_VER_540A 0x3534302a
+#define DW_PCIE_VER_562A 0x3536322a
#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
((_pci)->version _op DW_PCIE_VER_ ## _ver)
@@ -421,6 +423,7 @@ struct dw_pcie_host_ops {
struct dw_pcie_rp {
bool use_imsi_rx:1;
+ bool keep_rp_msi_en:1;
bool cfg0_io_shared:1;
u64 cfg0_base;
void __iomem *va_cfg0_base;
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 5b17da63151d..731d93663cca 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -22,6 +22,8 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/workqueue.h>
+#include <trace/events/pci_controller.h>
#include "../../pci.h"
#include "pcie-designware.h"
@@ -73,6 +75,20 @@
#define PCIE_CLIENT_CDM_RASDES_TBA_L1_1 BIT(4)
#define PCIE_CLIENT_CDM_RASDES_TBA_L1_2 BIT(5)
+/* Debug FIFO information */
+#define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310
+#define PCIE_CLIENT_DBG_EN 0xffff0007
+#define PCIE_CLIENT_DBG_DIS 0xffff0000
+#define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320
+#define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324
+#define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0 0x328
+#define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c
+#define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000
+#define PCIE_CLIENT_DBG_FIFO_STATUS 0x350
+#define PCIE_DBG_FIFO_RATE_MASK GENMASK(22, 20)
+#define PCIE_DBG_FIFO_L1SUB_MASK GENMASK(10, 8)
+#define PCIE_DBG_LTSSM_HISTORY_CNT 64
+
/* Hot Reset Control Register */
#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
#define PCIE_LTSSM_APP_DLY2_EN BIT(1)
@@ -98,6 +114,7 @@ struct rockchip_pcie {
struct irq_domain *irq_domain;
const struct rockchip_pcie_of_data *data;
bool supports_clkreq;
+ struct delayed_work trace_work;
};
struct rockchip_pcie_of_data {
@@ -208,6 +225,96 @@ static enum dw_pcie_ltssm rockchip_pcie_get_ltssm(struct dw_pcie *pci)
return rockchip_pcie_get_ltssm_reg(rockchip) & PCIE_LTSSM_STATUS_MASK;
}
+#ifdef CONFIG_TRACING
+static void rockchip_pcie_ltssm_trace_work(struct work_struct *work)
+{
+ struct rockchip_pcie *rockchip = container_of(work,
+ struct rockchip_pcie,
+ trace_work.work);
+ struct dw_pcie *pci = &rockchip->pci;
+ enum dw_pcie_ltssm state;
+ u32 i, l1ss, prev_val = DW_PCIE_LTSSM_UNKNOWN, rate, val;
+
+ if (!trace_pcie_ltssm_state_transition_enabled())
+ goto skip_trace;
+
+ for (i = 0; i < PCIE_DBG_LTSSM_HISTORY_CNT; i++) {
+ val = rockchip_pcie_readl_apb(rockchip,
+ PCIE_CLIENT_DBG_FIFO_STATUS);
+ rate = FIELD_GET(PCIE_DBG_FIFO_RATE_MASK, val);
+ l1ss = FIELD_GET(PCIE_DBG_FIFO_L1SUB_MASK, val);
+ val = FIELD_GET(PCIE_LTSSM_STATUS_MASK, val);
+
+ /*
+ * Hardware Mechanism: The ring FIFO employs two tracking
+ * counters:
+ * - 'last-read-point': maintains the user's last read position
+ * - 'last-valid-point': tracks the HW's last state update
+ *
+ * Software Handling: When two consecutive LTSSM states are
+ * identical, it indicates invalid subsequent data in the FIFO.
+ * In this case, we skip the remaining entries. The dual counter
+ * design ensures that on the next state transition, reading can
+ * resume from the last user position.
+ */
+ if ((i > 0 && val == prev_val) || val > DW_PCIE_LTSSM_RCVRY_EQ3)
+ break;
+
+ state = prev_val = val;
+ if (val == DW_PCIE_LTSSM_L1_IDLE) {
+ if (l1ss == 2)
+ state = DW_PCIE_LTSSM_L1_2;
+ else if (l1ss == 1)
+ state = DW_PCIE_LTSSM_L1_1;
+ }
+
+ trace_pcie_ltssm_state_transition(dev_name(pci->dev),
+ dw_pcie_ltssm_status_string(state),
+ ((rate + 1) > pci->max_link_speed) ?
+ PCI_SPEED_UNKNOWN : PCIE_SPEED_2_5GT + rate);
+ }
+
+skip_trace:
+ schedule_delayed_work(&rockchip->trace_work, msecs_to_jiffies(5000));
+}
+
+static void rockchip_pcie_ltssm_trace(struct rockchip_pcie *rockchip,
+ bool enable)
+{
+ if (enable) {
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_DBG_TRANSITION_DATA,
+ PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0);
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_DBG_TRANSITION_DATA,
+ PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1);
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_DBG_TRANSITION_DATA,
+ PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0);
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_DBG_TRANSITION_DATA,
+ PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1);
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_DBG_EN,
+ PCIE_CLIENT_DBG_FIFO_MODE_CON);
+
+ INIT_DELAYED_WORK(&rockchip->trace_work,
+ rockchip_pcie_ltssm_trace_work);
+ schedule_delayed_work(&rockchip->trace_work, 0);
+ } else {
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_DBG_DIS,
+ PCIE_CLIENT_DBG_FIFO_MODE_CON);
+ cancel_delayed_work_sync(&rockchip->trace_work);
+ }
+}
+#else
+static void rockchip_pcie_ltssm_trace(struct rockchip_pcie *rockchip,
+ bool enable)
+{
+}
+#endif
+
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
@@ -291,6 +398,9 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
* 100us as we don't know how long should the device need to reset.
*/
msleep(PCIE_T_PVPERL_MS);
+
+ rockchip_pcie_ltssm_trace(rockchip, true);
+
gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
return 0;
@@ -301,6 +411,7 @@ static void rockchip_pcie_stop_link(struct dw_pcie *pci)
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
rockchip_pcie_disable_ltssm(rockchip);
+ rockchip_pcie_ltssm_trace(rockchip, false);
}
static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
@@ -361,13 +472,9 @@ static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep)
static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- enum pci_barno bar;
rockchip_pcie_enable_l0s(pci);
rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
- dw_pcie_ep_reset_bar(pci, bar);
};
static int rockchip_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
@@ -403,12 +510,19 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {
.bar[BAR_5] = { .type = BAR_RESIZABLE, },
};
+static const struct pci_epc_bar_rsvd_region rk3588_bar4_rsvd[] = {
+ {
+ /* DMA_CAP (BAR4: DMA Port Logic Structure) */
+ .type = PCI_EPC_BAR_RSVD_DMA_CTRL_MMIO,
+ .offset = 0x0,
+ .size = 0x2000,
+ },
+};
+
/*
* BAR4 on rk3588 exposes the ATU Port Logic Structure to the host regardless of
* iATU settings for BAR4. This means that BAR4 cannot be used by an EPF driver,
- * so mark it as RESERVED. (rockchip_pcie_ep_init() will disable all BARs by
- * default.) If the host could write to BAR4, the iATU settings (for all other
- * BARs) would be overwritten, resulting in (all other BARs) no longer working.
+ * so mark it as RESERVED.
*/
static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
DWC_EPC_COMMON_FEATURES,
@@ -420,7 +534,11 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
.bar[BAR_1] = { .type = BAR_RESIZABLE, },
.bar[BAR_2] = { .type = BAR_RESIZABLE, },
.bar[BAR_3] = { .type = BAR_RESIZABLE, },
- .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = {
+ .type = BAR_RESERVED,
+ .nr_rsvd_regions = ARRAY_SIZE(rk3588_bar4_rsvd),
+ .rsvd_regions = rk3588_bar4_rsvd,
+ },
.bar[BAR_5] = { .type = BAR_RESIZABLE, },
};
diff --git a/drivers/pci/controller/dwc/pcie-eswin.c b/drivers/pci/controller/dwc/pcie-eswin.c
new file mode 100644
index 000000000000..2845832b3824
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-eswin.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ESWIN PCIe Root Complex driver
+ *
+ * Copyright 2026, Beijing ESWIN Computing Technology Co., Ltd.
+ *
+ * Authors: Yu Ning <ningyu@eswincomputing.com>
+ * Senchuan Zhang <zhangsenchuan@eswincomputing.com>
+ * Yanghui Ou <ouyanghui@eswincomputing.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/resource.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* ELBI registers */
+#define PCIEELBI_CTRL0_OFFSET 0x0
+#define PCIEELBI_STATUS0_OFFSET 0x100
+
+/* LTSSM register fields */
+#define PCIEELBI_APP_LTSSM_ENABLE BIT(5)
+
+/* APP_HOLD_PHY_RST register fields */
+#define PCIEELBI_APP_HOLD_PHY_RST BIT(6)
+
+/* PM_SEL_AUX_CLK register fields */
+#define PCIEELBI_PM_SEL_AUX_CLK BIT(16)
+
+/* DEV_TYPE register fields */
+#define PCIEELBI_CTRL0_DEV_TYPE GENMASK(3, 0)
+
+/* Vendor and device ID value */
+#define PCI_VENDOR_ID_ESWIN 0x1fe1
+#define PCI_DEVICE_ID_ESWIN_EIC7700 0x2030
+
+#define ESWIN_NUM_RSTS ARRAY_SIZE(eswin_pcie_rsts)
+
+static const char * const eswin_pcie_rsts[] = {
+ "pwr",
+ "dbi",
+};
+
+struct eswin_pcie_data {
+ bool skip_l23;
+};
+
+struct eswin_pcie_port {
+ struct list_head list;
+ struct reset_control *perst;
+ int num_lanes;
+};
+
+struct eswin_pcie {
+ struct dw_pcie pci;
+ struct clk_bulk_data *clks;
+ struct reset_control_bulk_data resets[ESWIN_NUM_RSTS];
+ struct list_head ports;
+ const struct eswin_pcie_data *data;
+ int num_clks;
+};
+
+#define to_eswin_pcie(x) dev_get_drvdata((x)->dev)
+
+static int eswin_pcie_start_link(struct dw_pcie *pci)
+{
+ u32 val;
+
+ /* Enable LTSSM */
+ val = readl_relaxed(pci->elbi_base + PCIEELBI_CTRL0_OFFSET);
+ val |= PCIEELBI_APP_LTSSM_ENABLE;
+ writel_relaxed(val, pci->elbi_base + PCIEELBI_CTRL0_OFFSET);
+
+ return 0;
+}
+
+static bool eswin_pcie_link_up(struct dw_pcie *pci)
+{
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u16 val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+
+ return val & PCI_EXP_LNKSTA_DLLLA;
+}
+
+static int eswin_pcie_perst_reset(struct eswin_pcie_port *port,
+ struct eswin_pcie *pcie)
+{
+ int ret;
+
+ ret = reset_control_assert(port->perst);
+ if (ret) {
+ dev_err(pcie->pci.dev, "Failed to assert PERST#\n");
+ return ret;
+ }
+
+ /* Ensure that PERST# has been asserted for at least 100 ms */
+ msleep(PCIE_T_PVPERL_MS);
+
+ ret = reset_control_deassert(port->perst);
+ if (ret) {
+ dev_err(pcie->pci.dev, "Failed to deassert PERST#\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void eswin_pcie_assert(struct eswin_pcie *pcie)
+{
+ struct eswin_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ reset_control_assert(port->perst);
+ reset_control_bulk_assert(ESWIN_NUM_RSTS, pcie->resets);
+}
+
+static int eswin_pcie_parse_port(struct eswin_pcie *pcie,
+ struct device_node *node)
+{
+ struct device *dev = pcie->pci.dev;
+ struct eswin_pcie_port *port;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->perst = of_reset_control_get_exclusive(node, "perst");
+ if (IS_ERR(port->perst)) {
+ dev_err(dev, "Failed to get PERST# reset\n");
+ return PTR_ERR(port->perst);
+ }
+
+ /*
+ * TODO: Since the Root Port node is separated out by pcie devicetree,
+ * the DWC core initialization code can't parse the num-lanes attribute
+ * in the Root Port. Before entering the DWC core initialization code,
+ * the platform driver code parses the Root Port node. The ESWIN only
+ * supports one Root Port node, and the num-lanes attribute is suitable
+ * for the case of one Root Port.
+ */
+ if (!of_property_read_u32(node, "num-lanes", &port->num_lanes))
+ pcie->pci.num_lanes = port->num_lanes;
+
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+}
+
+static int eswin_pcie_parse_ports(struct eswin_pcie *pcie)
+{
+ struct eswin_pcie_port *port, *tmp;
+ struct device *dev = pcie->pci.dev;
+ int ret;
+
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ ret = eswin_pcie_parse_port(pcie, of_port);
+ if (ret)
+ goto err_port;
+ }
+
+ return 0;
+
+err_port:
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ reset_control_put(port->perst);
+ list_del(&port->list);
+ }
+
+ return ret;
+}
+
+static int eswin_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct eswin_pcie *pcie = to_eswin_pcie(pci);
+ struct eswin_pcie_port *port, *tmp;
+ u32 val;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (ret)
+ return ret;
+
+ /*
+ * The PWR and DBI reset signals are respectively used to reset the
+ * PCIe controller and the DBI register.
+ *
+ * The PERST# signal is a reset signal that simultaneously controls the
+ * PCIe controller, PHY, and Endpoint. Before configuring the PHY, the
+ * PERST# signal must first be deasserted.
+ *
+ * The external reference clock is supplied simultaneously to the PHY
+ * and EP. When the PHY is configurable, the entire chip already has
+ * stable power and reference clock. The PHY will be ready within 20ms
+ * after writing app_hold_phy_rst register bit of ELBI register space.
+ */
+ ret = reset_control_bulk_deassert(ESWIN_NUM_RSTS, pcie->resets);
+ if (ret) {
+ dev_err(pcie->pci.dev, "Failed to deassert resets\n");
+ goto err_deassert;
+ }
+
+ /* Configure Root Port type */
+ val = readl_relaxed(pci->elbi_base + PCIEELBI_CTRL0_OFFSET);
+ val &= ~PCIEELBI_CTRL0_DEV_TYPE;
+ val |= FIELD_PREP(PCIEELBI_CTRL0_DEV_TYPE, PCI_EXP_TYPE_ROOT_PORT);
+ writel_relaxed(val, pci->elbi_base + PCIEELBI_CTRL0_OFFSET);
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ ret = eswin_pcie_perst_reset(port, pcie);
+ if (ret)
+ goto err_perst;
+ }
+
+ /* Configure app_hold_phy_rst */
+ val = readl_relaxed(pci->elbi_base + PCIEELBI_CTRL0_OFFSET);
+ val &= ~PCIEELBI_APP_HOLD_PHY_RST;
+ writel_relaxed(val, pci->elbi_base + PCIEELBI_CTRL0_OFFSET);
+
+ /* The maximum waiting time for the clock switch lock is 20ms */
+ ret = readl_poll_timeout(pci->elbi_base + PCIEELBI_STATUS0_OFFSET, val,
+ !(val & PCIEELBI_PM_SEL_AUX_CLK), 1000,
+ 20000);
+ if (ret) {
+ dev_err(pci->dev, "Timeout waiting for PM_SEL_AUX_CLK ready\n");
+ goto err_phy_init;
+ }
+
+ /*
+ * Configure ESWIN VID:DID for Root Port as the default values are
+ * invalid.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, PCI_VENDOR_ID_ESWIN);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, PCI_DEVICE_ID_ESWIN_EIC7700);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+
+err_phy_init:
+ list_for_each_entry(port, &pcie->ports, list)
+ reset_control_assert(port->perst);
+err_perst:
+ reset_control_bulk_assert(ESWIN_NUM_RSTS, pcie->resets);
+err_deassert:
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ reset_control_put(port->perst);
+ list_del(&port->list);
+ }
+
+ return ret;
+}
+
+static void eswin_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct eswin_pcie *pcie = to_eswin_pcie(pci);
+
+ eswin_pcie_assert(pcie);
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+}
+
+static void eswin_pcie_pme_turn_off(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct eswin_pcie *pcie = to_eswin_pcie(pci);
+
+ /*
+ * The ESWIN EIC7700 SoC lacks hardware support for the L2/L3 low-power
+ * link states. It cannot enter the L2/L3 Ready state through the
+ * PME_Turn_Off/PME_To_Ack handshake protocol. To avoid this problem,
+ * the skip_l23_ready has been set.
+ */
+ pp->skip_l23_ready = pcie->data->skip_l23;
+}
+
+static const struct dw_pcie_host_ops eswin_pcie_host_ops = {
+ .init = eswin_pcie_host_init,
+ .deinit = eswin_pcie_host_deinit,
+ .pme_turn_off = eswin_pcie_pme_turn_off,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = eswin_pcie_start_link,
+ .link_up = eswin_pcie_link_up,
+};
+
+static int eswin_pcie_probe(struct platform_device *pdev)
+{
+ const struct eswin_pcie_data *data;
+ struct eswin_pcie_port *port, *tmp;
+ struct device *dev = &pdev->dev;
+ struct eswin_pcie *pcie;
+ struct dw_pcie *pci;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return dev_err_probe(dev, -ENODATA, "No platform data\n");
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pcie->ports);
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &eswin_pcie_host_ops;
+ pcie->data = data;
+
+ pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
+ if (pcie->num_clks < 0)
+ return dev_err_probe(dev, pcie->num_clks,
+ "Failed to get pcie clocks\n");
+
+ for (i = 0; i < ESWIN_NUM_RSTS; i++)
+ pcie->resets[i].id = eswin_pcie_rsts[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, ESWIN_NUM_RSTS,
+ pcie->resets);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get resets\n");
+
+ ret = eswin_pcie_parse_ports(pcie);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to parse Root Port\n");
+
+ platform_set_drvdata(pdev, pcie);
+
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm_runtime_put;
+
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret) {
+ dev_err(dev, "Failed to init host\n");
+ goto err_init;
+ }
+
+ return 0;
+
+err_pm_runtime_put:
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ reset_control_put(port->perst);
+ list_del(&port->list);
+ }
+err_init:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static int eswin_pcie_suspend_noirq(struct device *dev)
+{
+ struct eswin_pcie *pcie = dev_get_drvdata(dev);
+
+ return dw_pcie_suspend_noirq(&pcie->pci);
+}
+
+static int eswin_pcie_resume_noirq(struct device *dev)
+{
+ struct eswin_pcie *pcie = dev_get_drvdata(dev);
+
+ return dw_pcie_resume_noirq(&pcie->pci);
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(eswin_pcie_pm, eswin_pcie_suspend_noirq,
+ eswin_pcie_resume_noirq);
+
+static const struct eswin_pcie_data eswin_eic7700_data = {
+ .skip_l23 = true,
+};
+
+static const struct of_device_id eswin_pcie_of_match[] = {
+ { .compatible = "eswin,eic7700-pcie", .data = &eswin_eic7700_data },
+ {}
+};
+
+static struct platform_driver eswin_pcie_driver = {
+ .probe = eswin_pcie_probe,
+ .driver = {
+ .name = "eswin-pcie",
+ .of_match_table = eswin_pcie_of_match,
+ .suppress_bind_attrs = true,
+ .pm = &eswin_pcie_pm,
+ },
+};
+builtin_platform_driver(eswin_pcie_driver);
+
+MODULE_DESCRIPTION("ESWIN PCIe Root Complex driver");
+MODULE_AUTHOR("Yu Ning <ningyu@eswincomputing.com>");
+MODULE_AUTHOR("Senchuan Zhang <zhangsenchuan@eswincomputing.com>");
+MODULE_AUTHOR("Yanghui Ou <ouyanghui@eswincomputing.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 2666a9c3d67e..7cf2c312ecec 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -313,11 +313,8 @@ static const struct pci_epc_features keembay_pcie_epc_features = {
.msi_capable = true,
.msix_capable = true,
.bar[BAR_0] = { .only_64bit = true, },
- .bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_2] = { .only_64bit = true, },
- .bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_4] = { .only_64bit = true, },
- .bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_16K,
};
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c
index 01c5387e53bf..5aa73c628737 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-common.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.c
@@ -22,7 +22,7 @@ void qcom_pcie_common_set_equalization(struct dw_pcie *pci)
* applied.
*/
- for (speed = PCIE_SPEED_8_0GT; speed <= pcie_link_speed[pci->max_link_speed]; speed++) {
+ for (speed = PCIE_SPEED_8_0GT; speed <= pcie_get_link_speed(pci->max_link_speed); speed++) {
if (speed > PCIE_SPEED_32_0GT) {
dev_warn(dev, "Skipped equalization settings for unsupported data rate\n");
break;
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 18460f01b2c6..257c2bcb5f76 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -152,7 +152,7 @@
#define WAKE_DELAY_US 2000 /* 2 ms */
#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
- Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
+ Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_get_link_speed(speed)))
#define to_pcie_ep(x) dev_get_drvdata((x)->dev)
@@ -531,7 +531,7 @@ skip_resources_enable:
qcom_pcie_common_set_equalization(pci);
- if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
+ if (pcie_get_link_speed(pci->max_link_speed) == PCIE_SPEED_16_0GT)
qcom_pcie_common_set_16gt_lane_margining(pci);
/*
@@ -850,9 +850,7 @@ static const struct pci_epc_features qcom_pcie_epc_features = {
.msi_capable = true,
.align = SZ_4K,
.bar[BAR_0] = { .only_64bit = true, },
- .bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_2] = { .only_64bit = true, },
- .bar[BAR_3] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features *
@@ -861,17 +859,7 @@ qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep)
return &qcom_pcie_epc_features;
}
-static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
-{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- enum pci_barno bar;
-
- for (bar = BAR_0; bar <= BAR_5; bar++)
- dw_pcie_ep_reset_bar(pci, bar);
-}
-
static const struct dw_pcie_ep_ops pci_ep_ops = {
- .init = qcom_pcie_ep_init,
.raise_irq = qcom_pcie_ep_raise_irq,
.get_features = qcom_pcie_epc_get_features,
};
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 67a16af69ddc..af6bf5cce65b 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -170,7 +170,7 @@
#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
- Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
+ Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_get_link_speed(speed)))
struct qcom_pcie_resources_1_0_0 {
struct clk_bulk_data *clks;
@@ -320,7 +320,7 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
qcom_pcie_common_set_equalization(pci);
- if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
+ if (pcie_get_link_speed(pci->max_link_speed) == PCIE_SPEED_16_0GT)
qcom_pcie_common_set_16gt_lane_margining(pci);
/* Enable Link Training state machine */
@@ -350,15 +350,20 @@ static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
}
-static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
+static void qcom_pcie_set_slot_nccs(struct dw_pcie *pci)
{
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
dw_pcie_dbi_ro_wr_en(pci);
+ /*
+ * Qcom PCIe Root Ports do not support generating command completion
+ * notifications for the Hot-Plug commands. So set the NCCS field to
+ * avoid waiting for the completions.
+ */
val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP);
- val &= ~PCI_EXP_SLTCAP_HPC;
+ val |= PCI_EXP_SLTCAP_NCCS;
writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP);
dw_pcie_dbi_ro_wr_dis(pci);
@@ -558,7 +563,7 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
writel(CFG_BRIDGE_SB_INIT,
pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
- qcom_pcie_clear_hpc(pcie->pci);
+ qcom_pcie_set_slot_nccs(pcie->pci);
return 0;
}
@@ -638,7 +643,7 @@ static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
}
- qcom_pcie_clear_hpc(pcie->pci);
+ qcom_pcie_set_slot_nccs(pcie->pci);
return 0;
}
@@ -731,7 +736,7 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
val |= EN;
writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
- qcom_pcie_clear_hpc(pcie->pci);
+ qcom_pcie_set_slot_nccs(pcie->pci);
return 0;
}
@@ -1037,7 +1042,7 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
pcie->parf + PARF_NO_SNOOP_OVERRIDE);
- qcom_pcie_clear_hpc(pcie->pci);
+ qcom_pcie_set_slot_nccs(pcie->pci);
return 0;
}
@@ -1579,7 +1584,7 @@ static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
ret);
}
} else if (pcie->use_pm_opp) {
- freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
+ freq_mbps = pcie_dev_speed_mbps(pcie_get_link_speed(speed));
if (freq_mbps < 0)
return;
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index a6912e85e4dd..8b03c42f8c84 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -386,15 +386,6 @@ static void rcar_gen4_pcie_ep_pre_init(struct dw_pcie_ep *ep)
writel(PCIEDMAINTSTSEN_INIT, rcar->base + PCIEDMAINTSTSEN);
}
-static void rcar_gen4_pcie_ep_init(struct dw_pcie_ep *ep)
-{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- enum pci_barno bar;
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
- dw_pcie_ep_reset_bar(pci, bar);
-}
-
static void rcar_gen4_pcie_ep_deinit(struct rcar_gen4_pcie *rcar)
{
writel(0, rcar->base + PCIEDMAINTSTSEN);
@@ -422,11 +413,13 @@ static int rcar_gen4_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
DWC_EPC_COMMON_FEATURES,
.msi_capable = true,
- .bar[BAR_1] = { .type = BAR_RESERVED, },
- .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_DISABLED, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_DISABLED, },
.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 },
- .bar[BAR_5] = { .type = BAR_RESERVED, },
- .align = SZ_1M,
+ .bar[BAR_5] = { .type = BAR_DISABLED, },
+ .align = SZ_4K,
};
static const struct pci_epc_features*
@@ -449,7 +442,6 @@ static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.pre_init = rcar_gen4_pcie_ep_pre_init,
- .init = rcar_gen4_pcie_ep_init,
.raise_irq = rcar_gen4_pcie_ep_raise_irq,
.get_features = rcar_gen4_pcie_ep_get_features,
.get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset,
diff --git a/drivers/pci/controller/dwc/pcie-stm32-ep.c b/drivers/pci/controller/dwc/pcie-stm32-ep.c
index c1944b40ce02..a7988dff1045 100644
--- a/drivers/pci/controller/dwc/pcie-stm32-ep.c
+++ b/drivers/pci/controller/dwc/pcie-stm32-ep.c
@@ -28,15 +28,6 @@ struct stm32_pcie {
unsigned int perst_irq;
};
-static void stm32_pcie_ep_init(struct dw_pcie_ep *ep)
-{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- enum pci_barno bar;
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
- dw_pcie_ep_reset_bar(pci, bar);
-}
-
static int stm32_pcie_start_link(struct dw_pcie *pci)
{
struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
@@ -82,7 +73,6 @@ stm32_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops stm32_pcie_ep_ops = {
- .init = stm32_pcie_ep_init,
.raise_irq = stm32_pcie_raise_irq,
.get_features = stm32_pcie_get_features,
};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 06571d806ab3..9dcfa194050e 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -35,8 +35,8 @@
#include <soc/tegra/bpmp-abi.h>
#include "../../pci.h"
-#define TEGRA194_DWC_IP_VER 0x490A
-#define TEGRA234_DWC_IP_VER 0x562A
+#define TEGRA194_DWC_IP_VER DW_PCIE_VER_500A
+#define TEGRA234_DWC_IP_VER DW_PCIE_VER_562A
#define APPL_PINMUX 0x0
#define APPL_PINMUX_PEX_RST BIT(0)
@@ -44,6 +44,7 @@
#define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
+#define APPL_PINMUX_CLKREQ_DEFAULT_VALUE BIT(13)
#define APPL_CTRL 0x4
#define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
@@ -90,6 +91,7 @@
#define APPL_INTR_EN_L1_8_0 0x44
#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
#define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3)
+#define APPL_INTR_EN_L1_8_EDMA_INT_EN BIT(6)
#define APPL_INTR_EN_L1_8_INTX_EN BIT(11)
#define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15)
@@ -137,7 +139,11 @@
#define APPL_DEBUG_PM_LINKST_IN_L0 0x11
#define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3)
#define APPL_DEBUG_LTSSM_STATE_SHIFT 3
-#define LTSSM_STATE_PRE_DETECT 5
+#define LTSSM_STATE_DETECT_QUIET 0x00
+#define LTSSM_STATE_DETECT_ACT 0x08
+#define LTSSM_STATE_PRE_DETECT_QUIET 0x28
+#define LTSSM_STATE_DETECT_WAIT 0x30
+#define LTSSM_STATE_L2_IDLE 0xa8
#define APPL_RADM_STATUS 0xE4
#define APPL_PM_XMT_TURNOFF_STATE BIT(0)
@@ -198,9 +204,8 @@
#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8)
#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8
-#define PME_ACK_TIMEOUT 10000
-
-#define LTSSM_TIMEOUT 50000 /* 50ms */
+#define LTSSM_DELAY_US 10000 /* 10 ms */
+#define LTSSM_TIMEOUT_US 120000 /* 120 ms */
#define GEN3_GEN4_EQ_PRESET_INIT 5
@@ -231,6 +236,7 @@ struct tegra_pcie_dw_of_data {
bool has_sbr_reset_fix;
bool has_l1ss_exit_fix;
bool has_ltr_req_fix;
+ bool disable_l1_2;
u32 cdm_chk_int_en_bit;
u32 gen4_preset_vec;
u8 n_fts[2];
@@ -243,6 +249,7 @@ struct tegra_pcie_dw {
struct resource *atu_dma_res;
void __iomem *appl_base;
struct clk *core_clk;
+ struct clk *core_clk_m;
struct reset_control *core_apb_rst;
struct reset_control *core_rst;
struct dw_pcie pci;
@@ -310,7 +317,7 @@ static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val);
width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
- val = width * PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]);
+ val = width * PCIE_SPEED2MBS_ENC(pcie_get_link_speed(speed));
if (icc_set_bw(pcie->icc_path, Mbps_to_icc(val), 0))
dev_err(pcie->dev, "can't set bw[%u]\n", val);
@@ -482,15 +489,6 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
if (val & PCI_COMMAND_MASTER) {
ktime_t timeout;
- /* 110us for both snoop and no-snoop */
- val = FIELD_PREP(PCI_LTR_VALUE_MASK, 110) |
- FIELD_PREP(PCI_LTR_SCALE_MASK, 2) |
- LTR_MSG_REQ |
- FIELD_PREP(PCI_LTR_NOSNOOP_VALUE, 110) |
- FIELD_PREP(PCI_LTR_NOSNOOP_SCALE, 2) |
- LTR_NOSNOOP_MSG_REQ;
- appl_writel(pcie, val, APPL_LTR_MSG_1);
-
/* Send LTR upstream */
val = appl_readl(pcie, APPL_LTR_MSG_2);
val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
@@ -548,6 +546,17 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
spurious = 0;
}
+ if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
+
+ /*
+ * Interrupt is handled by DMA driver; don't treat it as
+ * spurious
+ */
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK)
+ spurious = 0;
+ }
+
if (spurious) {
dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
status_l0);
@@ -685,6 +694,23 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
if (pcie->supports_clkreq)
pci->l1ss_support = true;
+ /*
+ * Disable L1.2 capability advertisement for Tegra234 Endpoint mode.
+ * Tegra234 has a hardware bug where during L1.2 exit, the UPHY PLL is
+ * powered up immediately without waiting for REFCLK to stabilize. This
+ * causes the PLL to fail to lock to the correct frequency, resulting in
+ * PCIe link loss. Since there is no hardware fix available, we prevent
+ * the Endpoint from advertising L1.2 support by clearing the L1.2 bits
+ * in the L1 PM Substates Capabilities register. This ensures the host
+ * will not attempt to enter L1.2 state with this Endpoint.
+ */
+ if (pcie->of_data->disable_l1_2 &&
+ pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ val = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP);
+ val &= ~(PCI_L1SS_CAP_PCIPM_L1_2 | PCI_L1SS_CAP_ASPM_L1_2);
+ dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, val);
+ }
+
/* Program L0s and L1 entrance latencies */
val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
@@ -767,6 +793,7 @@ static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp *pp)
val |= APPL_INTR_EN_L1_8_INTX_EN;
val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
+ val |= APPL_INTR_EN_L1_8_EDMA_INT_EN;
if (IS_ENABLED(CONFIG_PCIEAER))
val |= APPL_INTR_EN_L1_8_AER_INT_EN;
appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
@@ -924,6 +951,8 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
}
clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+ if (clk_prepare_enable(pcie->core_clk_m))
+ dev_err(pci->dev, "Failed to enable core monitor clock\n");
return 0;
}
@@ -996,6 +1025,12 @@ retry_link:
val &= ~PCI_DLF_EXCHANGE_ENABLE;
dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
+ /*
+ * core_clk_m is enabled as part of host_init callback in
+ * dw_pcie_host_init(). Disable the clock since below
+ * tegra_pcie_dw_host_init() will enable it again.
+ */
+ clk_disable_unprepare(pcie->core_clk_m);
tegra_pcie_dw_host_init(pp);
dw_pcie_setup_rc(pp);
@@ -1022,7 +1057,8 @@ static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
{
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
- disable_irq(pcie->pex_rst_irq);
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
+ disable_irq(pcie->pex_rst_irq);
}
static const struct dw_pcie_ops tegra_dw_pcie_ops = {
@@ -1058,6 +1094,9 @@ static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
ret = phy_power_on(pcie->phys[i]);
if (ret < 0)
goto phy_exit;
+
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
+ phy_calibrate(pcie->phys[i]);
}
return 0;
@@ -1163,9 +1202,9 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
return err;
}
- pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
- "nvidia,refclk-select",
- GPIOD_OUT_HIGH);
+ pcie->pex_refclk_sel_gpiod = devm_gpiod_get_optional(pcie->dev,
+ "nvidia,refclk-select",
+ GPIOD_OUT_HIGH);
if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
const char *level = KERN_ERR;
@@ -1255,44 +1294,6 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
return 0;
}
-static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
-{
- struct dw_pcie_rp *pp = &pcie->pci.pp;
- struct pci_bus *child, *root_port_bus = NULL;
- struct pci_dev *pdev;
-
- /*
- * link doesn't go into L2 state with some of the endpoints with Tegra
- * if they are not in D0 state. So, need to make sure that immediate
- * downstream devices are in D0 state before sending PME_TurnOff to put
- * link into L2 state.
- * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
- * 5.2 Link State Power Management (Page #428).
- */
-
- list_for_each_entry(child, &pp->bridge->bus->children, node) {
- if (child->parent == pp->bridge->bus) {
- root_port_bus = child;
- break;
- }
- }
-
- if (!root_port_bus) {
- dev_err(pcie->dev, "Failed to find downstream bus of Root Port\n");
- return;
- }
-
- /* Bring downstream devices to D0 if they are not already in */
- list_for_each_entry(pdev, &root_port_bus->devices, bus_list) {
- if (PCI_SLOT(pdev->devfn) == 0) {
- if (pci_set_power_state(pdev, PCI_D0))
- dev_err(pcie->dev,
- "Failed to transition %s to D0 state\n",
- dev_name(&pdev->dev));
- }
- }
-}
-
static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
{
pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
@@ -1454,6 +1455,7 @@ static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
val = appl_readl(pcie, APPL_PINMUX);
val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
+ val &= ~APPL_PINMUX_CLKREQ_DEFAULT_VALUE;
appl_writel(pcie, val, APPL_PINMUX);
}
@@ -1553,9 +1555,10 @@ static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
val |= APPL_PM_XMT_TURNOFF_STATE;
appl_writel(pcie, val, APPL_RADM_STATUS);
- return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
- val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
- 1, PME_ACK_TIMEOUT);
+ return readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
+ val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
+ PCIE_PME_TO_L2_TIMEOUT_US/10,
+ PCIE_PME_TO_L2_TIMEOUT_US);
}
static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
@@ -1590,23 +1593,22 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
data &= ~APPL_PINMUX_PEX_RST;
appl_writel(pcie, data, APPL_PINMUX);
+ err = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, data,
+ ((data & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_QUIET) ||
+ ((data & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_ACT) ||
+ ((data & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_PRE_DETECT_QUIET) ||
+ ((data & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_WAIT),
+ LTSSM_DELAY_US, LTSSM_TIMEOUT_US);
+ if (err)
+ dev_info(pcie->dev, "LTSSM state: 0x%x detect timeout: %d\n", data, err);
+
/*
- * Some cards do not go to detect state even after de-asserting
- * PERST#. So, de-assert LTSSM to bring link to detect state.
+ * Deassert LTSSM state to stop the state toggling between
+ * Polling and Detect.
*/
data = readl(pcie->appl_base + APPL_CTRL);
data &= ~APPL_CTRL_LTSSM_EN;
writel(data, pcie->appl_base + APPL_CTRL);
-
- err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
- data,
- ((data &
- APPL_DEBUG_LTSSM_STATE_MASK) >>
- APPL_DEBUG_LTSSM_STATE_SHIFT) ==
- LTSSM_STATE_PRE_DETECT,
- 1, LTSSM_TIMEOUT);
- if (err)
- dev_info(pcie->dev, "Link didn't go to detect state\n");
}
/*
* DBI registers may not be accessible after this as PLL-E would be
@@ -1622,7 +1624,7 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
{
- tegra_pcie_downstream_dev_to_D0(pcie);
+ clk_disable_unprepare(pcie->core_clk_m);
dw_pcie_host_deinit(&pcie->pci.pp);
tegra_pcie_dw_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
@@ -1680,19 +1682,24 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
if (pcie->ep_state == EP_STATE_DISABLED)
return;
- /* Disable LTSSM */
+ ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_QUIET) ||
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_ACT) ||
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_PRE_DETECT_QUIET) ||
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_WAIT) ||
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_L2_IDLE),
+ LTSSM_DELAY_US, LTSSM_TIMEOUT_US);
+ if (ret)
+ dev_info(pcie->dev, "LTSSM state: 0x%x detect timeout: %d\n", val, ret);
+
+ /*
+ * Deassert LTSSM state to stop the state toggling between
+ * Polling and Detect.
+ */
val = appl_readl(pcie, APPL_CTRL);
val &= ~APPL_CTRL_LTSSM_EN;
appl_writel(pcie, val, APPL_CTRL);
- ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
- ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
- APPL_DEBUG_LTSSM_STATE_SHIFT) ==
- LTSSM_STATE_PRE_DETECT,
- 1, LTSSM_TIMEOUT);
- if (ret)
- dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
-
reset_control_assert(pcie->core_rst);
tegra_pcie_disable_phy(pcie);
@@ -1771,10 +1778,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
goto fail_phy;
}
- /* Perform cleanup that requires refclk */
- pci_epc_deinit_notify(pcie->pci.ep.epc);
- dw_pcie_ep_cleanup(&pcie->pci.ep);
-
/* Clear any stale interrupt statuses */
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
@@ -1803,6 +1806,8 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val = appl_readl(pcie, APPL_CTRL);
val |= APPL_CTRL_SYS_PRE_DET_STATE;
val |= APPL_CTRL_HW_HOT_RST_EN;
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN << APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
appl_writel(pcie, val, APPL_CTRL);
val = appl_readl(pcie, APPL_CFG_MISC);
@@ -1826,6 +1831,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
+ val |= APPL_INTR_EN_L0_0_INT_INT_EN;
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
@@ -1833,8 +1839,29 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+ val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
+ val |= APPL_INTR_EN_L1_8_EDMA_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
+
+ /* 110us for both snoop and no-snoop */
+ val = FIELD_PREP(PCI_LTR_VALUE_MASK, 110) |
+ FIELD_PREP(PCI_LTR_SCALE_MASK, 2) |
+ LTR_MSG_REQ |
+ FIELD_PREP(PCI_LTR_NOSNOOP_VALUE, 110) |
+ FIELD_PREP(PCI_LTR_NOSNOOP_SCALE, 2) |
+ LTR_NOSNOOP_MSG_REQ;
+ appl_writel(pcie, val, APPL_LTR_MSG_1);
+
reset_control_deassert(pcie->core_rst);
+ /* Perform cleanup that requires refclk and core reset deasserted */
+ pci_epc_deinit_notify(pcie->pci.ep.epc);
+ dw_pcie_ep_cleanup(&pcie->pci.ep);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
if (pcie->update_fc_fixup) {
val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
@@ -1923,15 +1950,6 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static void tegra_pcie_ep_init(struct dw_pcie_ep *ep)
-{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- enum pci_barno bar;
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
- dw_pcie_ep_reset_bar(pci, bar);
-};
-
static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
@@ -1987,17 +2005,48 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static const struct pci_epc_bar_rsvd_region tegra194_bar2_rsvd[] = {
+ {
+ /* MSI-X table structure */
+ .type = PCI_EPC_BAR_RSVD_MSIX_TBL_RAM,
+ .offset = 0x0,
+ .size = SZ_64K,
+ },
+ {
+ /* MSI-X PBA structure */
+ .type = PCI_EPC_BAR_RSVD_MSIX_PBA_RAM,
+ .offset = 0x10000,
+ .size = SZ_64K,
+ },
+};
+
+static const struct pci_epc_bar_rsvd_region tegra194_bar4_rsvd[] = {
+ {
+ /* DMA_CAP (BAR4: DMA Port Logic Structure) */
+ .type = PCI_EPC_BAR_RSVD_DMA_CTRL_MMIO,
+ .offset = 0x0,
+ .size = SZ_4K,
+ },
+};
+
+/* Tegra EP: BAR0 = 64-bit programmable BAR, BAR2 = 64-bit MSI-X table, BAR4 = 64-bit DMA regs. */
static const struct pci_epc_features tegra_pcie_epc_features = {
DWC_EPC_COMMON_FEATURES,
.linkup_notifier = true,
.msi_capable = true,
- .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
- .only_64bit = true, },
- .bar[BAR_1] = { .type = BAR_RESERVED, },
- .bar[BAR_2] = { .type = BAR_RESERVED, },
- .bar[BAR_3] = { .type = BAR_RESERVED, },
- .bar[BAR_4] = { .type = BAR_RESERVED, },
- .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_2] = {
+ .type = BAR_RESERVED,
+ .only_64bit = true,
+ .nr_rsvd_regions = ARRAY_SIZE(tegra194_bar2_rsvd),
+ .rsvd_regions = tegra194_bar2_rsvd,
+ },
+ .bar[BAR_4] = {
+ .type = BAR_RESERVED,
+ .only_64bit = true,
+ .nr_rsvd_regions = ARRAY_SIZE(tegra194_bar4_rsvd),
+ .rsvd_regions = tegra194_bar4_rsvd,
+ },
.align = SZ_64K,
};
@@ -2008,7 +2057,6 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .init = tegra_pcie_ep_init,
.raise_irq = tegra_pcie_ep_raise_irq,
.get_features = tegra_pcie_ep_get_features,
};
@@ -2149,6 +2197,11 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
return PTR_ERR(pcie->core_clk);
}
+ pcie->core_clk_m = devm_clk_get_optional(dev, "core_m");
+ if (IS_ERR(pcie->core_clk_m))
+ return dev_err_probe(dev, PTR_ERR(pcie->core_clk_m),
+ "Failed to get monitor clock\n");
+
pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"appl");
if (!pcie->appl_res) {
@@ -2248,7 +2301,7 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(dev, pp->irq,
tegra_pcie_ep_hard_irq,
tegra_pcie_ep_irq_thread,
- IRQF_SHARED | IRQF_ONESHOT,
+ IRQF_SHARED,
"tegra-pcie-ep-intr", pcie);
if (ret) {
dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
@@ -2277,6 +2330,7 @@ fail:
static void tegra_pcie_dw_remove(struct platform_device *pdev)
{
struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
if (!pcie->link_state)
@@ -2288,6 +2342,7 @@ static void tegra_pcie_dw_remove(struct platform_device *pdev)
} else {
disable_irq(pcie->pex_rst_irq);
pex_ep_event_pex_rst_assert(pcie);
+ dw_pcie_ep_deinit(ep);
}
pm_runtime_disable(pcie->dev);
@@ -2296,16 +2351,28 @@ static void tegra_pcie_dw_remove(struct platform_device *pdev)
gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
}
-static int tegra_pcie_dw_suspend_late(struct device *dev)
+static int tegra_pcie_dw_suspend(struct device *dev)
{
struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
- u32 val;
if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
- dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
- return -EPERM;
+ if (pcie->ep_state == EP_STATE_ENABLED) {
+ dev_err(dev, "Tegra PCIe is in EP mode, suspend not allowed\n");
+ return -EPERM;
+ }
+
+ disable_irq(pcie->pex_rst_irq);
+ return 0;
}
+ return 0;
+}
+
+static int tegra_pcie_dw_suspend_late(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ u32 val;
+
if (!pcie->link_state)
return 0;
@@ -2325,10 +2392,13 @@ static int tegra_pcie_dw_suspend_noirq(struct device *dev)
{
struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
+ return 0;
+
if (!pcie->link_state)
return 0;
- tegra_pcie_downstream_dev_to_D0(pcie);
+ clk_disable_unprepare(pcie->core_clk_m);
tegra_pcie_dw_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
@@ -2340,6 +2410,9 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
int ret;
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
+ return 0;
+
if (!pcie->link_state)
return 0;
@@ -2372,8 +2445,8 @@ static int tegra_pcie_dw_resume_early(struct device *dev)
u32 val;
if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
- dev_err(dev, "Suspend is not supported in EP mode");
- return -ENOTSUPP;
+ enable_irq(pcie->pex_rst_irq);
+ return 0;
}
if (!pcie->link_state)
@@ -2402,7 +2475,6 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
return;
debugfs_remove_recursive(pcie->debugfs);
- tegra_pcie_downstream_dev_to_D0(pcie);
disable_irq(pcie->pci.pp.irq);
if (IS_ENABLED(CONFIG_PCI_MSI))
@@ -2452,6 +2524,7 @@ static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = {
.mode = DW_PCIE_EP_TYPE,
.has_l1ss_exit_fix = true,
.has_ltr_req_fix = true,
+ .disable_l1_2 = true,
.cdm_chk_int_en_bit = BIT(18),
/* Gen4 - 6, 8 and 9 presets enabled */
.gen4_preset_vec = 0x340,
@@ -2479,6 +2552,7 @@ static const struct of_device_id tegra_pcie_dw_of_match[] = {
};
static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
+ .suspend = tegra_pcie_dw_suspend,
.suspend_late = tegra_pcie_dw_suspend_late,
.suspend_noirq = tegra_pcie_dw_suspend_noirq,
.resume_noirq = tegra_pcie_dw_resume_noirq,
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index d52753060970..89fb78200222 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -203,15 +203,6 @@ static void uniphier_pcie_stop_link(struct dw_pcie *pci)
uniphier_pcie_ltssm_enable(priv, false);
}
-static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
-{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- enum pci_barno bar;
-
- for (bar = BAR_0; bar <= BAR_5; bar++)
- dw_pcie_ep_reset_bar(pci, bar);
-}
-
static int uniphier_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -283,7 +274,6 @@ uniphier_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
- .init = uniphier_pcie_ep_init,
.raise_irq = uniphier_pcie_ep_raise_irq,
.get_features = uniphier_pcie_get_features,
};
@@ -426,11 +416,9 @@ static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
.msix_capable = false,
.align = 1 << 16,
.bar[BAR_0] = { .only_64bit = true, },
- .bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_2] = { .only_64bit = true, },
- .bar[BAR_3] = { .type = BAR_RESERVED, },
- .bar[BAR_4] = { .type = BAR_RESERVED, },
- .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_DISABLED, },
+ .bar[BAR_5] = { .type = BAR_DISABLED, },
},
};
@@ -445,11 +433,8 @@ static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = {
.msix_capable = false,
.align = 1 << 12,
.bar[BAR_0] = { .only_64bit = true, },
- .bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_2] = { .only_64bit = true, },
- .bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_4] = { .only_64bit = true, },
- .bar[BAR_5] = { .type = BAR_RESERVED, },
},
};
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 49c0a2d51162..cfc8fa403dad 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -4172,7 +4172,7 @@ static int __init init_hv_pci_drv(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
- if (hv_root_partition() && !hv_nested)
+ if (!hv_vmbus_exists())
return -ENODEV;
ret = hv_pci_irqchip_init();
diff --git a/drivers/pci/controller/pcie-aspeed.c b/drivers/pci/controller/pcie-aspeed.c
index 3e1a39d1e648..6acfae7d026e 100644
--- a/drivers/pci/controller/pcie-aspeed.c
+++ b/drivers/pci/controller/pcie-aspeed.c
@@ -1052,14 +1052,14 @@ static int aspeed_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
ret = devm_add_action_or_reset(dev, aspeed_pcie_irq_domain_free, pcie);
if (ret)
return ret;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
ret = devm_request_irq(dev, irq, aspeed_pcie_intr_handler, IRQF_SHARED,
dev_name(dev), pcie);
if (ret)
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 062f55690012..714bcab97b60 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -1442,7 +1442,7 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
dev_info(dev, "link up, %s x%u %s\n",
- pci_speed_string(pcie_link_speed[cls]), nlw,
+ pci_speed_string(pcie_get_link_speed(cls)), nlw,
ssc_good ? "(SSC)" : "(!SSC)");
return 0;
@@ -2072,7 +2072,8 @@ static int brcm_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->clk);
ret = of_pci_get_max_link_speed(np);
- pcie->gen = (ret < 0) ? 0 : ret;
+ if (pcie_get_link_speed(ret) == PCI_SPEED_UNKNOWN)
+ pcie->gen = 0;
pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c
index aaf1ed2b6e59..36be86d827a8 100644
--- a/drivers/pci/controller/pcie-hisi-error.c
+++ b/drivers/pci/controller/pcie-hisi-error.c
@@ -287,25 +287,16 @@ static int hisi_pcie_error_handler_probe(struct platform_device *pdev)
priv->nb.notifier_call = hisi_pcie_notify_error;
priv->dev = &pdev->dev;
- ret = ghes_register_vendor_record_notifier(&priv->nb);
+ ret = devm_ghes_register_vendor_record_notifier(&pdev->dev, &priv->nb);
if (ret) {
dev_err(&pdev->dev,
"Failed to register hisi pcie controller error handler with apei\n");
return ret;
}
- platform_set_drvdata(pdev, priv);
-
return 0;
}
-static void hisi_pcie_error_handler_remove(struct platform_device *pdev)
-{
- struct hisi_pcie_error_private *priv = platform_get_drvdata(pdev);
-
- ghes_unregister_vendor_record_notifier(&priv->nb);
-}
-
static const struct acpi_device_id hisi_pcie_acpi_match[] = {
{ "HISI0361", 0 },
{ }
@@ -317,7 +308,6 @@ static struct platform_driver hisi_pcie_error_handler_driver = {
.acpi_match_table = hisi_pcie_acpi_match,
},
.probe = hisi_pcie_error_handler_probe,
- .remove = hisi_pcie_error_handler_remove,
};
module_platform_driver(hisi_pcie_error_handler_driver);
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 75ddb8bee168..b0accd828589 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -22,6 +22,7 @@
#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/pci-pwrctrl.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -403,6 +404,64 @@ static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
}
+static int mtk_pcie_devices_power_up(struct mtk_gen3_pcie *pcie)
+{
+ int err;
+ u32 val;
+
+ /*
+ * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
+ * causing occasional PCIe link down. In order to overcome the issue,
+ * PCIE_RSTB signals are not asserted/released at this stage and the
+ * PCIe block is reset using en7523_reset_assert() and
+ * en7581_pci_enable().
+ */
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert all reset signals */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
+
+ err = pci_pwrctrl_power_on_devices(pcie->dev);
+ if (err) {
+ dev_err(pcie->dev, "Failed to power on devices: %pe\n", ERR_PTR(err));
+ return err;
+ }
+
+ /*
+ * Described in PCIe CEM specification revision 6.0.
+ *
+ * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+ * for the power and clock to become stable.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* De-assert reset signals */
+ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
+
+ return 0;
+}
+
+static void mtk_pcie_devices_power_down(struct mtk_gen3_pcie *pcie)
+{
+ u32 val;
+
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert the PERST# pin */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
+
+ pci_pwrctrl_power_off_devices(pcie->dev);
+}
+
static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
{
struct resource_entry *entry;
@@ -464,52 +523,6 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
- /*
- * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
- * causing occasional PCIe link down. In order to overcome the issue,
- * PCIE_RSTB signals are not asserted/released at this stage and the
- * PCIe block is reset using en7523_reset_assert() and
- * en7581_pci_enable().
- */
- if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
- /* Assert all reset signals */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
- PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-
- /*
- * Described in PCIe CEM specification revision 6.0.
- *
- * The deassertion of PERST# should be delayed 100ms (TPVPERL)
- * for the power and clock to become stable.
- */
- msleep(PCIE_T_PVPERL_MS);
-
- /* De-assert reset signals */
- val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
- PCIE_PE_RSTB);
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
- }
-
- /* Check if the link is up or not */
- err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
- !!(val & PCIE_PORT_LINKUP), 20,
- PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
- if (err) {
- const char *ltssm_state;
- int ltssm_index;
-
- val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
- ltssm_index = PCIE_LTSSM_STATE(val);
- ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
- "Unknown state" : ltssm_str[ltssm_index];
- dev_err(pcie->dev,
- "PCIe link down, current LTSSM state: %s (%#x)\n",
- ltssm_state, val);
- return err;
- }
-
mtk_pcie_enable_msi(pcie);
/* Set PCIe translation windows */
@@ -535,7 +548,33 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
return err;
}
+ err = mtk_pcie_devices_power_up(pcie);
+ if (err)
+ return err;
+
+ /* Check if the link is up or not */
+ err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
+ !!(val & PCIE_PORT_LINKUP), 20,
+ PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
+ if (err) {
+ const char *ltssm_state;
+ int ltssm_index;
+
+ val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
+ ltssm_index = PCIE_LTSSM_STATE(val);
+ ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
+ "Unknown state" : ltssm_str[ltssm_index];
+ dev_err(pcie->dev,
+ "PCIe link down, current LTSSM state: %s (%#x)\n",
+ ltssm_state, val);
+ goto err_power_down_device;
+ }
+
return 0;
+
+err_power_down_device:
+ mtk_pcie_devices_power_down(pcie);
+ return err;
}
#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
@@ -851,14 +890,14 @@ static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
struct platform_device *pdev = to_platform_device(dev);
int err;
- err = mtk_pcie_init_irq_domains(pcie);
- if (err)
- return err;
-
pcie->irq = platform_get_irq(pdev, 0);
if (pcie->irq < 0)
return pcie->irq;
+ err = mtk_pcie_init_irq_domains(pcie);
+ if (err)
+ return err;
+
irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
return 0;
@@ -876,10 +915,8 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
if (!regs)
return -EINVAL;
pcie->base = devm_ioremap_resource(dev, regs);
- if (IS_ERR(pcie->base)) {
- dev_err(dev, "failed to map register base\n");
- return PTR_ERR(pcie->base);
- }
+ if (IS_ERR(pcie->base))
+ return dev_err_probe(dev, PTR_ERR(pcie->base), "failed to map register base\n");
pcie->reg_base = regs->start;
@@ -888,34 +925,20 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets,
pcie->phy_resets);
- if (ret) {
- dev_err(dev, "failed to get PHY bulk reset\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get PHY bulk reset\n");
pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
- if (IS_ERR(pcie->mac_reset)) {
- ret = PTR_ERR(pcie->mac_reset);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get MAC reset\n");
-
- return ret;
- }
+ if (IS_ERR(pcie->mac_reset))
+ return dev_err_probe(dev, PTR_ERR(pcie->mac_reset), "failed to get MAC reset\n");
pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
- if (IS_ERR(pcie->phy)) {
- ret = PTR_ERR(pcie->phy);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get PHY\n");
-
- return ret;
- }
+ if (IS_ERR(pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(pcie->phy), "failed to get PHY\n");
pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
- if (pcie->num_clks < 0) {
- dev_err(dev, "failed to get clocks\n");
- return pcie->num_clks;
- }
+ if (pcie->num_clks < 0)
+ return dev_err_probe(dev, pcie->num_clks, "failed to get clocks\n");
ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes);
if (ret == 0) {
@@ -1150,7 +1173,7 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
return err;
err = of_pci_get_max_link_speed(pcie->dev->of_node);
- if (err) {
+ if (pcie_get_link_speed(err) != PCI_SPEED_UNKNOWN) {
/* Get the maximum speed supported by the controller */
max_speed = mtk_pcie_get_controller_max_link_speed(pcie);
@@ -1168,10 +1191,6 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
if (err)
goto err_setup;
- err = mtk_pcie_setup_irq(pcie);
- if (err)
- goto err_setup;
-
return 0;
err_setup:
@@ -1197,21 +1216,38 @@ static int mtk_pcie_probe(struct platform_device *pdev)
pcie->soc = device_get_match_data(dev);
platform_set_drvdata(pdev, pcie);
+ err = mtk_pcie_setup_irq(pcie);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to setup IRQ domains\n");
+
+ err = pci_pwrctrl_create_devices(pcie->dev);
+ if (err) {
+ goto err_tear_down_irq;
+ dev_err_probe(dev, err, "failed to create pwrctrl devices\n");
+ }
+
err = mtk_pcie_setup(pcie);
if (err)
- return err;
+ goto err_destroy_pwrctrl;
host->ops = &mtk_pcie_ops;
host->sysdata = pcie;
err = pci_host_probe(host);
- if (err) {
- mtk_pcie_irq_teardown(pcie);
- mtk_pcie_power_down(pcie);
- return err;
- }
+ if (err)
+ goto err_power_down_pcie;
return 0;
+
+err_power_down_pcie:
+ mtk_pcie_devices_power_down(pcie);
+ mtk_pcie_power_down(pcie);
+err_destroy_pwrctrl:
+ if (err != -EPROBE_DEFER)
+ pci_pwrctrl_destroy_devices(pcie->dev);
+err_tear_down_irq:
+ mtk_pcie_irq_teardown(pcie);
+ return err;
}
static void mtk_pcie_remove(struct platform_device *pdev)
@@ -1224,8 +1260,10 @@ static void mtk_pcie_remove(struct platform_device *pdev)
pci_remove_root_bus(host->bus);
pci_unlock_rescan_remove();
- mtk_pcie_irq_teardown(pcie);
+ pci_pwrctrl_power_off_devices(pcie->dev);
mtk_pcie_power_down(pcie);
+ pci_pwrctrl_destroy_devices(pcie->dev);
+ mtk_pcie_irq_teardown(pcie);
}
static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
@@ -1283,7 +1321,6 @@ static int mtk_pcie_suspend_noirq(struct device *dev)
{
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
- u32 val;
/* Trigger link to L2 state */
err = mtk_pcie_turn_off_link(pcie);
@@ -1292,13 +1329,7 @@ static int mtk_pcie_suspend_noirq(struct device *dev)
return err;
}
- if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
- /* Assert the PERST# pin */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
- }
-
+ mtk_pcie_devices_power_down(pcie);
dev_dbg(pcie->dev, "entered L2 states successfully");
mtk_pcie_irq_save(pcie);
@@ -1317,14 +1348,16 @@ static int mtk_pcie_resume_noirq(struct device *dev)
return err;
err = mtk_pcie_startup_port(pcie);
- if (err) {
- mtk_pcie_power_down(pcie);
- return err;
- }
+ if (err)
+ goto err_power_down;
mtk_pcie_irq_restore(pcie);
return 0;
+
+err_power_down:
+ mtk_pcie_power_down(pcie);
+ return err;
}
static const struct dev_pm_ops mtk_pcie_pm_ops = {
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 5defa5cc4c2b..75722524fe74 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -953,7 +953,7 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
struct mtk_pcie_port *port;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- char name[10];
+ char name[20];
int err;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index 657875ef4657..c2da8ac1f2e8 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -440,13 +440,10 @@ static const struct pci_epc_features rcar_pcie_epc_features = {
/* use 64-bit BARs so mark BAR[1,3,5] as reserved */
.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = 128,
.only_64bit = true, },
- .bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = 256,
.only_64bit = true, },
- .bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256,
.only_64bit = true, },
- .bar[BAR_5] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features*
diff --git a/drivers/pci/controller/pcie-rzg3s-host.c b/drivers/pci/controller/pcie-rzg3s-host.c
index 2809112e6317..d86e7516dcc2 100644
--- a/drivers/pci/controller/pcie-rzg3s-host.c
+++ b/drivers/pci/controller/pcie-rzg3s-host.c
@@ -111,6 +111,15 @@
#define RZG3S_PCI_PERM_CFG_HWINIT_EN BIT(2)
#define RZG3S_PCI_PERM_PIPE_PHY_REG_EN BIT(1)
+#define RZG3S_PCI_RESET 0x310
+#define RZG3S_PCI_RESET_RST_OUT_B BIT(6)
+#define RZG3S_PCI_RESET_RST_PS_B BIT(5)
+#define RZG3S_PCI_RESET_RST_LOAD_B BIT(4)
+#define RZG3S_PCI_RESET_RST_CFG_B BIT(3)
+#define RZG3S_PCI_RESET_RST_RSM_B BIT(2)
+#define RZG3S_PCI_RESET_RST_GP_B BIT(1)
+#define RZG3S_PCI_RESET_RST_B BIT(0)
+
#define RZG3S_PCI_MSIRE(id) (0x600 + (id) * 0x10)
#define RZG3S_PCI_MSIRE_ENA BIT(0)
@@ -159,10 +168,6 @@
#define RZG3S_PCI_CFG_PCIEC 0x60
-/* System controller registers */
-#define RZG3S_SYS_PCIE_RST_RSM_B 0xd74
-#define RZG3S_SYS_PCIE_RST_RSM_B_MASK BIT(0)
-
/* Maximum number of windows */
#define RZG3S_MAX_WINDOWS 8
@@ -175,6 +180,48 @@
#define RZG3S_REQ_ISSUE_TIMEOUT_US 2500
/**
+ * struct rzg3s_sysc_function - System Controller function descriptor
+ * @offset: Register offset from the System Controller base address
+ * @mask: Bit mask for the function within the register
+ */
+struct rzg3s_sysc_function {
+ u32 offset;
+ u32 mask;
+};
+
+/**
+ * enum rzg3s_sysc_func_id - System controller function IDs
+ * @RZG3S_SYSC_FUNC_ID_RST_RSM_B: RST_RSM_B SYSC function ID
+ * @RZG3S_SYSC_FUNC_ID_L1_ALLOW: L1 allow SYSC function ID
+ * @RZG3S_SYSC_FUNC_ID_MODE: Mode SYSC function ID
+ * @RZG3S_SYSC_FUNC_ID_MAX: Max SYSC function ID
+ */
+enum rzg3s_sysc_func_id {
+ RZG3S_SYSC_FUNC_ID_RST_RSM_B,
+ RZG3S_SYSC_FUNC_ID_L1_ALLOW,
+ RZG3S_SYSC_FUNC_ID_MODE,
+ RZG3S_SYSC_FUNC_ID_MAX,
+};
+
+/**
+ * struct rzg3s_sysc_info - RZ/G3S System Controller info
+ * @functions: SYSC function descriptors array
+ */
+struct rzg3s_sysc_info {
+ const struct rzg3s_sysc_function functions[RZG3S_SYSC_FUNC_ID_MAX];
+};
+
+/**
+ * struct rzg3s_sysc - RZ/G3S System Controller descriptor
+ * @regmap: System controller regmap
+ * @info: System controller info
+ */
+struct rzg3s_sysc {
+ struct regmap *regmap;
+ const struct rzg3s_sysc_info *info;
+};
+
+/**
* struct rzg3s_pcie_msi - RZ/G3S PCIe MSI data structure
* @domain: IRQ domain
* @map: bitmap with the allocated MSIs
@@ -199,17 +246,25 @@ struct rzg3s_pcie_host;
/**
* struct rzg3s_pcie_soc_data - SoC specific data
* @init_phy: PHY initialization function
+ * @config_pre_init: Optional callback for SoC-specific pre-configuration
+ * @config_post_init: Callback for SoC-specific post-configuration
+ * @config_deinit: Callback for SoC-specific de-initialization
* @power_resets: array with the resets that need to be de-asserted after
* power-on
* @cfg_resets: array with the resets that need to be de-asserted after
* configuration
+ * @sysc_info: SYSC info
* @num_power_resets: number of power resets
* @num_cfg_resets: number of configuration resets
*/
struct rzg3s_pcie_soc_data {
int (*init_phy)(struct rzg3s_pcie_host *host);
+ void (*config_pre_init)(struct rzg3s_pcie_host *host);
+ int (*config_post_init)(struct rzg3s_pcie_host *host);
+ int (*config_deinit)(struct rzg3s_pcie_host *host);
const char * const *power_resets;
const char * const *cfg_resets;
+ struct rzg3s_sysc_info sysc_info;
u8 num_power_resets;
u8 num_cfg_resets;
};
@@ -233,7 +288,7 @@ struct rzg3s_pcie_port {
* @dev: struct device
* @power_resets: reset control signals that should be set after power up
* @cfg_resets: reset control signals that should be set after configuration
- * @sysc: SYSC regmap
+ * @sysc: SYSC descriptor
* @intx_domain: INTx IRQ domain
* @data: SoC specific data
* @msi: MSI data structure
@@ -248,7 +303,7 @@ struct rzg3s_pcie_host {
struct device *dev;
struct reset_control_bulk_data *power_resets;
struct reset_control_bulk_data *cfg_resets;
- struct regmap *sysc;
+ struct rzg3s_sysc *sysc;
struct irq_domain *intx_domain;
const struct rzg3s_pcie_soc_data *data;
struct rzg3s_pcie_msi msi;
@@ -260,6 +315,23 @@ struct rzg3s_pcie_host {
#define rzg3s_msi_to_host(_msi) container_of(_msi, struct rzg3s_pcie_host, msi)
+static int rzg3s_sysc_config_func(struct rzg3s_sysc *sysc,
+ enum rzg3s_sysc_func_id fid, u32 val)
+{
+ const struct rzg3s_sysc_info *info = sysc->info;
+ const struct rzg3s_sysc_function *functions = info->functions;
+
+ if (fid >= RZG3S_SYSC_FUNC_ID_MAX)
+ return -EINVAL;
+
+ if (!functions[fid].mask)
+ return 0;
+
+ return regmap_update_bits(sysc->regmap, functions[fid].offset,
+ functions[fid].mask,
+ field_prep(functions[fid].mask, val));
+}
+
static void rzg3s_pcie_update_bits(void __iomem *base, u32 offset, u32 mask,
u32 val)
{
@@ -945,8 +1017,9 @@ static int rzg3s_pcie_set_max_link_speed(struct rzg3s_pcie_host *host)
{
u32 remote_supported_link_speeds, max_supported_link_speeds;
u32 cs2, tmp, pcie_cap = RZG3S_PCI_CFG_PCIEC;
- u32 cur_link_speed, link_speed;
+ u32 cur_link_speed, link_speed, hw_max_speed;
u8 ltssm_state_l0 = 0xc;
+ u32 lnkcap;
int ret;
u16 ls;
@@ -966,7 +1039,22 @@ static int rzg3s_pcie_set_max_link_speed(struct rzg3s_pcie_host *host)
ls = readw_relaxed(host->pcie + pcie_cap + PCI_EXP_LNKSTA);
cs2 = readl_relaxed(host->axi + RZG3S_PCI_PCSTAT2);
- switch (pcie_link_speed[host->max_link_speed]) {
+ /* Read hardware supported link speed from Link Capabilities Register */
+ lnkcap = readl_relaxed(host->pcie + pcie_cap + PCI_EXP_LNKCAP);
+ hw_max_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, lnkcap);
+
+ /*
+ * Use DT max-link-speed only as a limit. If specified and lower
+ * than hardware capability, cap to that value.
+ */
+ if (host->max_link_speed > 0 && host->max_link_speed < hw_max_speed)
+ hw_max_speed = host->max_link_speed;
+
+ switch (pcie_get_link_speed(hw_max_speed)) {
+ case PCIE_SPEED_8_0GT:
+ max_supported_link_speeds = GENMASK(PCI_EXP_LNKSTA_CLS_8_0GB - 1, 0);
+ link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
+ break;
case PCIE_SPEED_5_0GT:
max_supported_link_speeds = GENMASK(PCI_EXP_LNKSTA_CLS_5_0GB - 1, 0);
link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
@@ -982,10 +1070,10 @@ static int rzg3s_pcie_set_max_link_speed(struct rzg3s_pcie_host *host)
remote_supported_link_speeds &= max_supported_link_speeds;
/*
- * Return if max link speed is already set or the connected device
+ * Return if target link speed is already set or the connected device
* doesn't support it.
*/
- if (cur_link_speed == host->max_link_speed ||
+ if (cur_link_speed == hw_max_speed ||
remote_supported_link_speeds != max_supported_link_speeds)
return 0;
@@ -1022,6 +1110,7 @@ static int rzg3s_pcie_set_max_link_speed(struct rzg3s_pcie_host *host)
static int rzg3s_pcie_config_init(struct rzg3s_pcie_host *host)
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ u32 mask = GENMASK(31, 8);
struct resource_entry *ft;
struct resource *bus;
u8 subordinate_bus;
@@ -1045,6 +1134,13 @@ static int rzg3s_pcie_config_init(struct rzg3s_pcie_host *host)
writel_relaxed(0xffffffff, host->pcie + RZG3S_PCI_CFG_BARMSK00L);
writel_relaxed(0xffffffff, host->pcie + RZG3S_PCI_CFG_BARMSK00U);
+ /*
+ * Explicitly program class code. RZ/G3E requires this configuration.
+ * Harmless for RZ/G3S where this matches the hardware default.
+ */
+ rzg3s_pcie_update_bits(host->pcie, PCI_CLASS_REVISION, mask,
+ field_prep(mask, PCI_CLASS_BRIDGE_PCI_NORMAL));
+
/* Disable access control to the CFGU */
writel_relaxed(0, host->axi + RZG3S_PCI_PERM);
@@ -1056,6 +1152,57 @@ static int rzg3s_pcie_config_init(struct rzg3s_pcie_host *host)
return 0;
}
+static int rzg3s_pcie_config_post_init(struct rzg3s_pcie_host *host)
+{
+ return reset_control_bulk_deassert(host->data->num_cfg_resets,
+ host->cfg_resets);
+}
+
+static int rzg3s_pcie_config_deinit(struct rzg3s_pcie_host *host)
+{
+ return reset_control_bulk_assert(host->data->num_cfg_resets,
+ host->cfg_resets);
+}
+
+static void rzg3e_pcie_config_pre_init(struct rzg3s_pcie_host *host)
+{
+ u32 mask = RZG3S_PCI_RESET_RST_LOAD_B | RZG3S_PCI_RESET_RST_CFG_B;
+
+ /* De-assert LOAD_B and CFG_B */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_RESET, mask, mask);
+}
+
+static int rzg3e_pcie_config_deinit(struct rzg3s_pcie_host *host)
+{
+ writel_relaxed(0, host->axi + RZG3S_PCI_RESET);
+ return 0;
+}
+
+static int rzg3e_pcie_config_post_init(struct rzg3s_pcie_host *host)
+{
+ u32 mask = RZG3S_PCI_RESET_RST_PS_B | RZG3S_PCI_RESET_RST_GP_B |
+ RZG3S_PCI_RESET_RST_B;
+
+ /* De-assert PS_B, GP_B, RST_B */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_RESET, mask, mask);
+
+ /* Flush deassert */
+ readl_relaxed(host->axi + RZG3S_PCI_RESET);
+
+ /*
+ * According to the RZ/G3E HW manual (Rev.1.15, Table 6.6-130
+ * Initialization Procedure (RC)), hardware requires >= 500us delay
+ * before final reset deassert.
+ */
+ fsleep(500);
+
+ /* De-assert OUT_B and RSM_B */
+ mask = RZG3S_PCI_RESET_RST_OUT_B | RZG3S_PCI_RESET_RST_RSM_B;
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_RESET, mask, mask);
+
+ return 0;
+}
+
static void rzg3s_pcie_irq_init(struct rzg3s_pcie_host *host)
{
/*
@@ -1135,9 +1282,9 @@ static int rzg3s_pcie_resets_prepare_and_get(struct rzg3s_pcie_host *host)
if (ret)
return ret;
- return devm_reset_control_bulk_get_exclusive(host->dev,
- data->num_cfg_resets,
- host->cfg_resets);
+ return devm_reset_control_bulk_get_optional_exclusive(host->dev,
+ data->num_cfg_resets,
+ host->cfg_resets);
}
static int rzg3s_pcie_host_parse_port(struct rzg3s_pcie_host *host)
@@ -1204,22 +1351,32 @@ static int rzg3s_pcie_host_init(struct rzg3s_pcie_host *host)
u32 val;
int ret;
+ /* SoC-specific pre-configuration */
+ if (host->data->config_pre_init)
+ host->data->config_pre_init(host);
+
/* Initialize the PCIe related registers */
ret = rzg3s_pcie_config_init(host);
if (ret)
- return ret;
+ goto config_deinit;
ret = rzg3s_pcie_host_init_port(host);
if (ret)
- return ret;
+ goto config_deinit;
+
+ /* Enable ASPM L1 transition for SoCs that use it */
+ ret = rzg3s_sysc_config_func(host->sysc,
+ RZG3S_SYSC_FUNC_ID_L1_ALLOW, 1);
+ if (ret)
+ goto config_deinit_and_refclk;
/* Initialize the interrupts */
rzg3s_pcie_irq_init(host);
- ret = reset_control_bulk_deassert(host->data->num_cfg_resets,
- host->cfg_resets);
+ /* SoC-specific post-configuration */
+ ret = host->data->config_post_init(host);
if (ret)
- goto disable_port_refclk;
+ goto config_deinit_and_refclk;
/* Wait for link up */
ret = readl_poll_timeout(host->axi + RZG3S_PCI_PCSTAT1, val,
@@ -1228,18 +1385,20 @@ static int rzg3s_pcie_host_init(struct rzg3s_pcie_host *host)
PCIE_LINK_WAIT_SLEEP_MS * MILLI *
PCIE_LINK_WAIT_MAX_RETRIES);
if (ret)
- goto cfg_resets_deassert;
+ goto config_deinit_post;
val = readl_relaxed(host->axi + RZG3S_PCI_PCSTAT2);
dev_info(host->dev, "PCIe link status [0x%x]\n", val);
return 0;
-cfg_resets_deassert:
- reset_control_bulk_assert(host->data->num_cfg_resets,
- host->cfg_resets);
-disable_port_refclk:
+config_deinit_post:
+ host->data->config_deinit(host);
+config_deinit_and_refclk:
clk_disable_unprepare(host->port.refclk);
+config_deinit:
+ if (host->data->config_pre_init)
+ host->data->config_deinit(host);
return ret;
}
@@ -1271,50 +1430,55 @@ static int rzg3s_pcie_set_inbound_windows(struct rzg3s_pcie_host *host,
u64 pci_addr = entry->res->start - entry->offset;
u64 cpu_addr = entry->res->start;
u64 cpu_end = entry->res->end;
- u64 size_id = 0;
int id = *index;
u64 size;
- while (cpu_addr < cpu_end) {
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section 34.6.6.7) and
+ * RZ/G3E HW manual (Rev.1.15, section 6.6.7.6):
+ * - Each window must be a single memory size of power of two
+ * - Mask registers must be set to (2^N - 1)
+ * - Bit carry must not occur when adding base and mask registers,
+ * meaning the base address must be aligned to the window size
+ *
+ * Split non-power-of-2 regions into multiple windows to satisfy
+ * these constraints without over-mapping.
+ */
+ while (cpu_addr <= cpu_end) {
+ u64 remaining_size = cpu_end - cpu_addr + 1;
+ u64 align_limit;
+
if (id >= RZG3S_MAX_WINDOWS)
return dev_err_probe(host->dev, -ENOSPC,
"Failed to map inbound window for resource (%s)\n",
entry->res->name);
- size = resource_size(entry->res) - size_id;
-
- /*
- * According to the RZ/G3S HW manual (Rev.1.10,
- * section 34.3.1.71 AXI Window Mask (Lower) Registers) the min
- * size is 4K.
- */
- size = max(size, SZ_4K);
+ /* Start with largest power-of-two that fits in remaining size */
+ size = 1ULL << __fls(remaining_size);
/*
- * According the RZ/G3S HW manual (Rev.1.10, sections:
- * - 34.3.1.69 AXI Window Base (Lower) Registers
- * - 34.3.1.71 AXI Window Mask (Lower) Registers
- * - 34.3.1.73 AXI Destination (Lower) Registers)
- * the CPU addr, PCIe addr, size should be 4K aligned and be a
- * power of 2.
+ * The "no bit carry" rule requires base addresses to be
+ * aligned to the window size. Find the maximum window size
+ * that both addresses can support based on their natural
+ * alignment (lowest set bit).
*/
- size = ALIGN(size, SZ_4K);
- size = roundup_pow_of_two(size);
+ align_limit = min(cpu_addr ? (1ULL << __ffs(cpu_addr)) : ~0ULL,
+ pci_addr ? (1ULL << __ffs(pci_addr)) : ~0ULL);
- cpu_addr = ALIGN(cpu_addr, SZ_4K);
- pci_addr = ALIGN(pci_addr, SZ_4K);
+ size = min(size, align_limit);
/*
- * According to the RZ/G3S HW manual (Rev.1.10, section
- * 34.3.1.71 AXI Window Mask (Lower) Registers) HW expects first
- * 12 LSB bits to be 0xfff. Subtract 1 from size for this.
+ * Minimum window size is 4KB.
+ * See RZ/G3S HW manual (Rev.1.10, section 34.3.1.71) and
+ * RZ/G3E HW manual (Rev.1.15, section 6.6.4.1.3.(74)).
*/
+ size = max(size, SZ_4K);
+
rzg3s_pcie_set_inbound_window(host, cpu_addr, pci_addr,
size - 1, id);
pci_addr += size;
cpu_addr += size;
- size_id = size;
id++;
}
*index = id;
@@ -1517,6 +1681,7 @@ static int rzg3s_pcie_probe(struct platform_device *pdev)
struct device_node *sysc_np __free(device_node) =
of_parse_phandle(np, "renesas,sysc", 0);
struct rzg3s_pcie_host *host;
+ struct rzg3s_sysc *sysc;
int ret;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
@@ -1528,28 +1693,36 @@ static int rzg3s_pcie_probe(struct platform_device *pdev)
host->data = device_get_match_data(dev);
platform_set_drvdata(pdev, host);
+ host->sysc = devm_kzalloc(dev, sizeof(*host->sysc), GFP_KERNEL);
+ if (!host->sysc)
+ return -ENOMEM;
+
+ sysc = host->sysc;
+ sysc->info = &host->data->sysc_info;
+
host->axi = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->axi))
return PTR_ERR(host->axi);
host->pcie = host->axi + RZG3S_PCI_CFG_BASE;
host->max_link_speed = of_pci_get_max_link_speed(np);
- if (host->max_link_speed < 0)
- host->max_link_speed = 2;
ret = rzg3s_pcie_host_parse_port(host);
if (ret)
return ret;
- host->sysc = syscon_node_to_regmap(sysc_np);
- if (IS_ERR(host->sysc)) {
- ret = PTR_ERR(host->sysc);
+ sysc->regmap = syscon_node_to_regmap(sysc_np);
+ if (IS_ERR(sysc->regmap)) {
+ ret = PTR_ERR(sysc->regmap);
goto port_refclk_put;
}
- ret = regmap_update_bits(host->sysc, RZG3S_SYS_PCIE_RST_RSM_B,
- RZG3S_SYS_PCIE_RST_RSM_B_MASK,
- FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 1));
+ /* Put controller in RC mode */
+ ret = rzg3s_sysc_config_func(sysc, RZG3S_SYSC_FUNC_ID_MODE, 1);
+ if (ret)
+ goto port_refclk_put;
+
+ ret = rzg3s_sysc_config_func(sysc, RZG3S_SYSC_FUNC_ID_RST_RSM_B, 1);
if (ret)
goto port_refclk_put;
@@ -1589,8 +1762,7 @@ static int rzg3s_pcie_probe(struct platform_device *pdev)
host_probe_teardown:
rzg3s_pcie_teardown_irqdomain(host);
- reset_control_bulk_deassert(host->data->num_cfg_resets,
- host->cfg_resets);
+ host->data->config_deinit(host);
rpm_put:
pm_runtime_put_sync(dev);
rpm_disable:
@@ -1602,9 +1774,7 @@ sysc_signal_restore:
* SYSC RST_RSM_B signal need to be asserted before turning off the
* power to the PHY.
*/
- regmap_update_bits(host->sysc, RZG3S_SYS_PCIE_RST_RSM_B,
- RZG3S_SYS_PCIE_RST_RSM_B_MASK,
- FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0));
+ rzg3s_sysc_config_func(sysc, RZG3S_SYSC_FUNC_ID_RST_RSM_B, 0);
port_refclk_put:
clk_put(host->port.refclk);
@@ -1616,7 +1786,7 @@ static int rzg3s_pcie_suspend_noirq(struct device *dev)
struct rzg3s_pcie_host *host = dev_get_drvdata(dev);
const struct rzg3s_pcie_soc_data *data = host->data;
struct rzg3s_pcie_port *port = &host->port;
- struct regmap *sysc = host->sysc;
+ struct rzg3s_sysc *sysc = host->sysc;
int ret;
ret = pm_runtime_put_sync(dev);
@@ -1625,31 +1795,30 @@ static int rzg3s_pcie_suspend_noirq(struct device *dev)
clk_disable_unprepare(port->refclk);
- ret = reset_control_bulk_assert(data->num_power_resets,
- host->power_resets);
+ /* SoC-specific de-initialization */
+ ret = data->config_deinit(host);
if (ret)
goto refclk_restore;
- ret = reset_control_bulk_assert(data->num_cfg_resets,
- host->cfg_resets);
+ ret = reset_control_bulk_assert(data->num_power_resets,
+ host->power_resets);
if (ret)
- goto power_resets_restore;
+ goto config_reinit;
- ret = regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B,
- RZG3S_SYS_PCIE_RST_RSM_B_MASK,
- FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0));
+ ret = rzg3s_sysc_config_func(sysc, RZG3S_SYSC_FUNC_ID_RST_RSM_B, 0);
if (ret)
- goto cfg_resets_restore;
+ goto power_resets_restore;
return 0;
/* Restore the previous state if any error happens */
-cfg_resets_restore:
- reset_control_bulk_deassert(data->num_cfg_resets,
- host->cfg_resets);
power_resets_restore:
reset_control_bulk_deassert(data->num_power_resets,
host->power_resets);
+config_reinit:
+ if (data->config_pre_init)
+ data->config_pre_init(host);
+ data->config_post_init(host);
refclk_restore:
clk_prepare_enable(port->refclk);
pm_runtime_resume_and_get(dev);
@@ -1660,12 +1829,14 @@ static int rzg3s_pcie_resume_noirq(struct device *dev)
{
struct rzg3s_pcie_host *host = dev_get_drvdata(dev);
const struct rzg3s_pcie_soc_data *data = host->data;
- struct regmap *sysc = host->sysc;
+ struct rzg3s_sysc *sysc = host->sysc;
int ret;
- ret = regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B,
- RZG3S_SYS_PCIE_RST_RSM_B_MASK,
- FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 1));
+ ret = rzg3s_sysc_config_func(sysc, RZG3S_SYSC_FUNC_ID_MODE, 1);
+ if (ret)
+ return ret;
+
+ ret = rzg3s_sysc_config_func(sysc, RZG3S_SYSC_FUNC_ID_RST_RSM_B, 1);
if (ret)
return ret;
@@ -1694,9 +1865,7 @@ assert_power_resets:
reset_control_bulk_assert(data->num_power_resets,
host->power_resets);
assert_rst_rsm_b:
- regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B,
- RZG3S_SYS_PCIE_RST_RSM_B_MASK,
- FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0));
+ rzg3s_sysc_config_func(sysc, RZG3S_SYSC_FUNC_ID_RST_RSM_B, 0);
return ret;
}
@@ -1718,7 +1887,39 @@ static const struct rzg3s_pcie_soc_data rzg3s_soc_data = {
.num_power_resets = ARRAY_SIZE(rzg3s_soc_power_resets),
.cfg_resets = rzg3s_soc_cfg_resets,
.num_cfg_resets = ARRAY_SIZE(rzg3s_soc_cfg_resets),
+ .config_post_init = rzg3s_pcie_config_post_init,
+ .config_deinit = rzg3s_pcie_config_deinit,
.init_phy = rzg3s_soc_pcie_init_phy,
+ .sysc_info = {
+ .functions = {
+ [RZG3S_SYSC_FUNC_ID_RST_RSM_B] = {
+ .offset = 0xd74,
+ .mask = BIT(0),
+ },
+ },
+ },
+};
+
+static const char * const rzg3e_soc_power_resets[] = { "aresetn" };
+
+static const struct rzg3s_pcie_soc_data rzg3e_soc_data = {
+ .power_resets = rzg3e_soc_power_resets,
+ .num_power_resets = ARRAY_SIZE(rzg3e_soc_power_resets),
+ .config_pre_init = rzg3e_pcie_config_pre_init,
+ .config_post_init = rzg3e_pcie_config_post_init,
+ .config_deinit = rzg3e_pcie_config_deinit,
+ .sysc_info = {
+ .functions = {
+ [RZG3S_SYSC_FUNC_ID_L1_ALLOW] = {
+ .offset = 0x1020,
+ .mask = BIT(0),
+ },
+ [RZG3S_SYSC_FUNC_ID_MODE] = {
+ .offset = 0x1024,
+ .mask = BIT(0),
+ },
+ },
+ },
};
static const struct of_device_id rzg3s_pcie_of_match[] = {
@@ -1726,6 +1927,10 @@ static const struct of_device_id rzg3s_pcie_of_match[] = {
.compatible = "renesas,r9a08g045-pcie",
.data = &rzg3s_soc_data,
},
+ {
+ .compatible = "renesas,r9a09g047-pcie",
+ .data = &rzg3e_soc_data,
+ },
{}
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index f9cf18aa5b34..7f5326925ed5 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -367,6 +367,8 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
dev_err(dev, "DMA transfer timeout\n");
dmaengine_terminate_sync(chan);
ret = -ETIMEDOUT;
+ } else {
+ ret = 0;
}
err_unmap:
@@ -438,6 +440,8 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
dev_err(dev, "DMA transfer timeout\n");
dmaengine_terminate_sync(chan);
ret = -ETIMEDOUT;
+ } else {
+ ret = 0;
}
err_unmap:
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index a3a588e522e7..2bdcc35b652c 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -1495,47 +1495,6 @@ err_alloc_peer_mem:
}
/**
- * epf_ntb_epc_destroy_interface() - Cleanup NTB EPC interface
- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
- * @type: PRIMARY interface or SECONDARY interface
- *
- * Unbind NTB function device from EPC and relinquish reference to pci_epc
- * for each of the interface.
- */
-static void epf_ntb_epc_destroy_interface(struct epf_ntb *ntb,
- enum pci_epc_interface_type type)
-{
- struct epf_ntb_epc *ntb_epc;
- struct pci_epc *epc;
- struct pci_epf *epf;
-
- if (type < 0)
- return;
-
- epf = ntb->epf;
- ntb_epc = ntb->epc[type];
- if (!ntb_epc)
- return;
- epc = ntb_epc->epc;
- pci_epc_remove_epf(epc, epf, type);
- pci_epc_put(epc);
-}
-
-/**
- * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
- *
- * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
- */
-static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
-{
- enum pci_epc_interface_type type;
-
- for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
- epf_ntb_epc_destroy_interface(ntb, type);
-}
-
-/**
* epf_ntb_epc_create_interface() - Create and initialize NTB EPC interface
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @epc: struct pci_epc to which a particular NTB interface should be associated
@@ -1614,15 +1573,8 @@ static int epf_ntb_epc_create(struct epf_ntb *ntb)
ret = epf_ntb_epc_create_interface(ntb, epf->sec_epc,
SECONDARY_INTERFACE);
- if (ret) {
+ if (ret)
dev_err(dev, "SECONDARY intf: Fail to create NTB EPC\n");
- goto err_epc_create;
- }
-
- return 0;
-
-err_epc_create:
- epf_ntb_epc_destroy_interface(ntb, PRIMARY_INTERFACE);
return ret;
}
@@ -1887,7 +1839,7 @@ static int epf_ntb_bind(struct pci_epf *epf)
ret = epf_ntb_init_epc_bar(ntb);
if (ret) {
dev_err(dev, "Failed to create NTB EPC\n");
- goto err_bar_init;
+ return ret;
}
ret = epf_ntb_config_spad_bar_alloc_interface(ntb);
@@ -1909,9 +1861,6 @@ static int epf_ntb_bind(struct pci_epf *epf)
err_bar_alloc:
epf_ntb_config_spad_bar_free(ntb);
-err_bar_init:
- epf_ntb_epc_destroy(ntb);
-
return ret;
}
@@ -1927,7 +1876,6 @@ static void epf_ntb_unbind(struct pci_epf *epf)
epf_ntb_epc_cleanup(ntb);
epf_ntb_config_spad_bar_free(ntb);
- epf_ntb_epc_destroy(ntb);
}
#define EPF_NTB_R(_name) \
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 33548935765e..591d301fa89d 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -54,6 +54,7 @@
#define STATUS_BAR_SUBRANGE_SETUP_FAIL BIT(15)
#define STATUS_BAR_SUBRANGE_CLEAR_SUCCESS BIT(16)
#define STATUS_BAR_SUBRANGE_CLEAR_FAIL BIT(17)
+#define STATUS_NO_RESOURCE BIT(18)
#define FLAG_USE_DMA BIT(0)
@@ -64,6 +65,13 @@
#define CAP_MSIX BIT(2)
#define CAP_INTX BIT(3)
#define CAP_SUBRANGE_MAPPING BIT(4)
+#define CAP_DYNAMIC_INBOUND_MAPPING BIT(5)
+#define CAP_BAR0_RESERVED BIT(6)
+#define CAP_BAR1_RESERVED BIT(7)
+#define CAP_BAR2_RESERVED BIT(8)
+#define CAP_BAR3_RESERVED BIT(9)
+#define CAP_BAR4_RESERVED BIT(10)
+#define CAP_BAR5_RESERVED BIT(11)
#define PCI_EPF_TEST_BAR_SUBRANGE_NSUB 2
@@ -715,7 +723,6 @@ static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test)
struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar];
struct pci_epf *epf = epf_test->epf;
- free_irq(epf->db_msg[0].virq, epf_test);
reg->doorbell_bar = cpu_to_le32(NO_BAR);
pci_epf_free_doorbell(epf);
@@ -759,7 +766,7 @@ static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
&epf_test->db_bar.phys_addr, &offset);
if (ret)
- goto err_doorbell_cleanup;
+ goto err_free_irq;
reg->doorbell_offset = cpu_to_le32(offset);
@@ -769,12 +776,14 @@ static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
if (ret)
- goto err_doorbell_cleanup;
+ goto err_free_irq;
status |= STATUS_DOORBELL_ENABLE_SUCCESS;
reg->status = cpu_to_le32(status);
return;
+err_free_irq:
+ free_irq(epf->db_msg[0].virq, epf_test);
err_doorbell_cleanup:
pci_epf_test_doorbell_cleanup(epf_test);
set_status_err:
@@ -794,6 +803,7 @@ static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
if (bar < BAR_0)
goto set_status_err;
+ free_irq(epf->db_msg[0].virq, epf_test);
pci_epf_test_doorbell_cleanup(epf_test);
/*
@@ -892,6 +902,8 @@ static void pci_epf_test_bar_subrange_setup(struct pci_epf_test *epf_test,
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, bar);
if (ret) {
dev_err(&epf->dev, "pci_epc_set_bar() failed: %d\n", ret);
+ if (ret == -ENOSPC)
+ status |= STATUS_NO_RESOURCE;
bar->submap = old_submap;
bar->num_submap = old_nsub;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, bar);
@@ -1107,10 +1119,31 @@ static void pci_epf_test_set_capabilities(struct pci_epf *epf)
if (epf_test->epc_features->intx_capable)
caps |= CAP_INTX;
+ if (epf_test->epc_features->dynamic_inbound_mapping)
+ caps |= CAP_DYNAMIC_INBOUND_MAPPING;
+
if (epf_test->epc_features->dynamic_inbound_mapping &&
epf_test->epc_features->subrange_mapping)
caps |= CAP_SUBRANGE_MAPPING;
+ if (epf_test->epc_features->bar[BAR_0].type == BAR_RESERVED)
+ caps |= CAP_BAR0_RESERVED;
+
+ if (epf_test->epc_features->bar[BAR_1].type == BAR_RESERVED)
+ caps |= CAP_BAR1_RESERVED;
+
+ if (epf_test->epc_features->bar[BAR_2].type == BAR_RESERVED)
+ caps |= CAP_BAR2_RESERVED;
+
+ if (epf_test->epc_features->bar[BAR_3].type == BAR_RESERVED)
+ caps |= CAP_BAR3_RESERVED;
+
+ if (epf_test->epc_features->bar[BAR_4].type == BAR_RESERVED)
+ caps |= CAP_BAR4_RESERVED;
+
+ if (epf_test->epc_features->bar[BAR_5].type == BAR_RESERVED)
+ caps |= CAP_BAR5_RESERVED;
+
reg->caps = cpu_to_le32(caps);
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 20a400e83439..2256c3062b1a 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -527,20 +527,20 @@ static int epf_ntb_db_bar_init_msi_doorbell(struct epf_ntb *ntb,
struct msi_msg *msg;
size_t sz;
int ret;
- int i;
+ int i, req;
ret = pci_epf_alloc_doorbell(epf, ntb->db_count);
if (ret)
return ret;
- for (i = 0; i < ntb->db_count; i++) {
- ret = request_irq(epf->db_msg[i].virq, epf_ntb_doorbell_handler,
+ for (req = 0; req < ntb->db_count; req++) {
+ ret = request_irq(epf->db_msg[req].virq, epf_ntb_doorbell_handler,
0, "pci_epf_vntb_db", ntb);
if (ret) {
dev_err(&epf->dev,
"Failed to request doorbell IRQ: %d\n",
- epf->db_msg[i].virq);
+ epf->db_msg[req].virq);
goto err_free_irq;
}
}
@@ -598,8 +598,8 @@ static int epf_ntb_db_bar_init_msi_doorbell(struct epf_ntb *ntb,
return 0;
err_free_irq:
- for (i--; i >= 0; i--)
- free_irq(epf->db_msg[i].virq, ntb);
+ for (req--; req >= 0; req--)
+ free_irq(epf->db_msg[req].virq, ntb);
pci_epf_free_doorbell(ntb->epf);
return ret;
@@ -764,19 +764,6 @@ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
}
/**
- * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
- * @ntb: NTB device that facilitates communication between HOST and VHOST
- *
- * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
- */
-static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
-{
- pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0);
- pci_epc_put(ntb->epf->epc);
-}
-
-
-/**
* epf_ntb_is_bar_used() - Check if a bar is used in the ntb configuration
* @ntb: NTB device that facilitates communication between HOST and VHOST
* @barno: Checked bar number
@@ -955,6 +942,7 @@ err_config_interrupt:
*/
static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
{
+ disable_delayed_work_sync(&ntb->cmd_handler);
epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
epf_ntb_db_bar_clear(ntb);
epf_ntb_config_sspad_bar_clear(ntb);
@@ -995,17 +983,19 @@ static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
struct device *dev = &ntb->epf->dev; \
- int win_no; \
+ int win_no, idx; \
\
if (sscanf(#_name, "mw%d", &win_no) != 1) \
return -EINVAL; \
\
- if (win_no <= 0 || win_no > ntb->num_mws) { \
- dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
- return -EINVAL; \
+ idx = win_no - 1; \
+ if (idx < 0 || idx >= ntb->num_mws) { \
+ dev_err(dev, "MW%d out of range (num_mws=%d)\n", \
+ win_no, ntb->num_mws); \
+ return -ERANGE; \
} \
- \
- return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
+ idx = array_index_nospec(idx, ntb->num_mws); \
+ return sprintf(page, "%llu\n", ntb->mws_size[idx]); \
}
#define EPF_NTB_MW_W(_name) \
@@ -1015,7 +1005,7 @@ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
struct device *dev = &ntb->epf->dev; \
- int win_no; \
+ int win_no, idx; \
u64 val; \
int ret; \
\
@@ -1026,12 +1016,14 @@ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
if (sscanf(#_name, "mw%d", &win_no) != 1) \
return -EINVAL; \
\
- if (win_no <= 0 || win_no > ntb->num_mws) { \
- dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
- return -EINVAL; \
+ idx = win_no - 1; \
+ if (idx < 0 || idx >= ntb->num_mws) { \
+ dev_err(dev, "MW%d out of range (num_mws=%d)\n", \
+ win_no, ntb->num_mws); \
+ return -ERANGE; \
} \
- \
- ntb->mws_size[win_no - 1] = val; \
+ idx = array_index_nospec(idx, ntb->num_mws); \
+ ntb->mws_size[idx] = val; \
\
return len; \
}
@@ -1436,6 +1428,14 @@ static int vntb_epf_link_disable(struct ntb_dev *ntb)
return 0;
}
+static struct device *vntb_epf_get_dma_dev(struct ntb_dev *ndev)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct pci_epc *epc = ntb->epf->epc;
+
+ return epc->dev.parent;
+}
+
static const struct ntb_dev_ops vntb_epf_ops = {
.mw_count = vntb_epf_mw_count,
.spad_count = vntb_epf_spad_count,
@@ -1457,6 +1457,7 @@ static const struct ntb_dev_ops vntb_epf_ops = {
.db_clear_mask = vntb_epf_db_clear_mask,
.db_clear = vntb_epf_db_clear,
.link_disable = vntb_epf_link_disable,
+ .get_dma_dev = vntb_epf_get_dma_dev,
};
static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -1525,7 +1526,7 @@ static int epf_ntb_bind(struct pci_epf *epf)
ret = epf_ntb_init_epc_bar(ntb);
if (ret) {
dev_err(dev, "Failed to create NTB EPC\n");
- goto err_bar_init;
+ return ret;
}
ret = epf_ntb_config_spad_bar_alloc(ntb);
@@ -1565,9 +1566,6 @@ err_epc_cleanup:
err_bar_alloc:
epf_ntb_config_spad_bar_free(ntb);
-err_bar_init:
- epf_ntb_epc_destroy(ntb);
-
return ret;
}
@@ -1583,7 +1581,6 @@ static void epf_ntb_unbind(struct pci_epf *epf)
epf_ntb_epc_cleanup(ntb);
epf_ntb_config_spad_bar_free(ntb);
- epf_ntb_epc_destroy(ntb);
pci_unregister_driver(&vntb_pci_driver);
}
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 0f3921f28f17..590e5bde57ef 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -84,7 +84,7 @@ static void pci_secondary_epc_epf_unlink(struct config_item *epf_item,
pci_epc_remove_epf(epc, epf, SECONDARY_INTERFACE);
}
-static struct configfs_item_operations pci_secondary_epc_item_ops = {
+static const struct configfs_item_operations pci_secondary_epc_item_ops = {
.allow_link = pci_secondary_epc_epf_link,
.drop_link = pci_secondary_epc_epf_unlink,
};
@@ -148,7 +148,7 @@ static void pci_primary_epc_epf_unlink(struct config_item *epf_item,
pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
}
-static struct configfs_item_operations pci_primary_epc_item_ops = {
+static const struct configfs_item_operations pci_primary_epc_item_ops = {
.allow_link = pci_primary_epc_epf_link,
.drop_link = pci_primary_epc_epf_unlink,
};
@@ -256,7 +256,7 @@ static void pci_epc_epf_unlink(struct config_item *epc_item,
pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
}
-static struct configfs_item_operations pci_epc_item_ops = {
+static const struct configfs_item_operations pci_epc_item_ops = {
.allow_link = pci_epc_epf_link,
.drop_link = pci_epc_epf_unlink,
};
@@ -507,7 +507,7 @@ static void pci_epf_release(struct config_item *item)
kfree(epf_group);
}
-static struct configfs_item_operations pci_epf_ops = {
+static const struct configfs_item_operations pci_epf_ops = {
.allow_link = pci_epf_vepf_link,
.drop_link = pci_epf_vepf_unlink,
.release = pci_epf_release,
@@ -565,7 +565,8 @@ static void pci_ep_cfs_add_type_group(struct pci_epf_group *epf_group)
if (IS_ERR(group)) {
dev_err(&epf_group->epf->dev,
- "failed to create epf type specific attributes\n");
+ "failed to create epf type specific attributes: %pe\n",
+ group);
return;
}
@@ -578,13 +579,17 @@ static void pci_epf_cfs_add_sub_groups(struct pci_epf_group *epf_group)
group = pci_ep_cfs_add_primary_group(epf_group);
if (IS_ERR(group)) {
- pr_err("failed to create 'primary' EPC interface\n");
+ dev_err(&epf_group->epf->dev,
+ "failed to create 'primary' EPC interface: %pe\n",
+ group);
return;
}
group = pci_ep_cfs_add_secondary_group(epf_group);
if (IS_ERR(group)) {
- pr_err("failed to create 'secondary' EPC interface\n");
+ dev_err(&epf_group->epf->dev,
+ "failed to create 'secondary' EPC interface: %pe\n",
+ group);
return;
}
@@ -624,8 +629,9 @@ static struct config_group *pci_epf_make(struct config_group *group,
epf = pci_epf_create(epf_name);
if (IS_ERR(epf)) {
- pr_err("failed to create endpoint function device\n");
- err = -EINVAL;
+ err = PTR_ERR(epf);
+ pr_err("failed to create endpoint function device (%s): %d\n",
+ epf_name, err);
goto free_name;
}
@@ -657,7 +663,7 @@ static void pci_epf_drop(struct config_group *group, struct config_item *item)
config_item_put(item);
}
-static struct configfs_group_operations pci_epf_group_ops = {
+static const struct configfs_group_operations pci_epf_group_ops = {
.make_group = &pci_epf_make,
.drop_item = &pci_epf_drop,
};
@@ -674,8 +680,8 @@ struct config_group *pci_ep_cfs_add_epf_group(const char *name)
group = configfs_register_default_group(functions_group, name,
&pci_epf_group_type);
if (IS_ERR(group))
- pr_err("failed to register configfs group for %s function\n",
- name);
+ pr_err("failed to register configfs group for %s function: %pe\n",
+ name, group);
return group;
}
diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c
index 51c19942a81e..1395919571f8 100644
--- a/drivers/pci/endpoint/pci-ep-msi.c
+++ b/drivers/pci/endpoint/pci-ep-msi.c
@@ -50,6 +50,9 @@ int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db)
return -EINVAL;
}
+ if (epf->db_msg)
+ return -EBUSY;
+
domain = of_msi_map_get_device_domain(epc->dev.parent, 0,
DOMAIN_BUS_PLATFORM_MSI);
if (!domain) {
@@ -79,6 +82,8 @@ int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db)
if (ret) {
dev_err(dev, "Failed to allocate MSI\n");
kfree(msg);
+ epf->db_msg = NULL;
+ epf->num_db = 0;
return ret;
}
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index e546b3dbb240..6c3c58185fc5 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -103,8 +103,9 @@ enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
bar++;
for (i = bar; i < PCI_STD_NUM_BARS; i++) {
- /* If the BAR is not reserved, return it. */
- if (epc_features->bar[i].type != BAR_RESERVED)
+ /* If the BAR is not reserved or disabled, return it. */
+ if (epc_features->bar[i].type != BAR_RESERVED &&
+ epc_features->bar[i].type != BAR_DISABLED)
return i;
}
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 83fd20c11238..6987343c9e61 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(pci_epf_bind);
* @epf_vf: the virtual EP function to be added
*
* A physical endpoint function can be associated with multiple virtual
- * endpoint functions. Invoke pci_epf_add_epf() to add a virtual PCI endpoint
+ * endpoint functions. Invoke pci_epf_add_vepf() to add a virtual PCI endpoint
* function to a physical PCI endpoint function.
*/
int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 1e9158d7bac7..2cafd3b26f34 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -79,7 +79,8 @@ static int init_slot(struct controller *ctrl)
snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
retval = pci_hp_initialize(&ctrl->hotplug_slot,
- ctrl->pcie->port->subordinate, 0, name);
+ ctrl->pcie->port->subordinate,
+ PCI_SLOT_ALL_DEVICES, name);
if (retval) {
ctrl_err(ctrl, "pci_hp_initialize failed: error %d\n", retval);
kfree(ops);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 5c020831e318..ff92a5c301b8 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -215,24 +215,19 @@ static void pnv_php_reverse_nodes(struct device_node *parent)
static int pnv_php_populate_changeset(struct of_changeset *ocs,
struct device_node *dn)
{
- struct device_node *child;
- int ret = 0;
+ int ret;
- for_each_child_of_node(dn, child) {
+ for_each_child_of_node_scoped(dn, child) {
ret = of_changeset_attach_node(ocs, child);
- if (ret) {
- of_node_put(child);
- break;
- }
+ if (ret)
+ return ret;
ret = pnv_php_populate_changeset(ocs, child);
- if (ret) {
- of_node_put(child);
- break;
- }
+ if (ret)
+ return ret;
}
- return ret;
+ return 0;
}
static void *pnv_php_add_one_pdn(struct device_node *dn, void *data)
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 33ca19200c1b..67362e5b9971 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -82,7 +82,6 @@ EXPORT_SYMBOL_GPL(rpaphp_deregister_slot);
int rpaphp_register_slot(struct slot *slot)
{
struct hotplug_slot *php_slot = &slot->hotplug_slot;
- struct device_node *child;
u32 my_index;
int retval;
int slotno = -1;
@@ -97,11 +96,10 @@ int rpaphp_register_slot(struct slot *slot)
return -EAGAIN;
}
- for_each_child_of_node(slot->dn, child) {
+ for_each_child_of_node_scoped(slot->dn, child) {
retval = of_property_read_u32(child, "ibm,my-drc-index", &my_index);
if (my_index == slot->index) {
slotno = PCI_SLOT(PCI_DN(child)->devfn);
- of_node_put(child);
break;
}
}
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index 818d55fbad0d..c18559b6272c 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -370,6 +370,11 @@ EXPORT_SYMBOL(pci_irq_get_affinity);
* Undo the interrupt vector allocations and possible device MSI/MSI-X
* enablement earlier done through pci_alloc_irq_vectors_affinity() or
* pci_alloc_irq_vectors().
+ *
+ * WARNING: Do not call this function if the device has been enabled
+ * with pcim_enable_device(). In that case, IRQ vectors are automatically
+ * managed via pcim_msi_release() and calling pci_free_irq_vectors() can
+ * lead to double-free issues.
*/
void pci_free_irq_vectors(struct pci_dev *dev)
{
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index e2412175d7af..81d24a270a79 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -77,6 +77,16 @@ static void pcim_msi_release(void *pcidev)
/*
* Needs to be separate from pcim_release to prevent an ordering problem
* vs. msi_device_data_release() in the MSI core code.
+ *
+ * TODO: Remove the legacy side-effect of pcim_enable_device() that
+ * activates automatic IRQ vector management. This design is dangerous
+ * and confusing because it switches normally un-managed functions
+ * into managed mode. Drivers should explicitly manage their IRQ vectors
+ * without this implicit behavior.
+ *
+ * The current implementation uses both pdev->is_managed and
+ * pdev->is_msi_managed flags, which adds unnecessary complexity.
+ * This should be simplified in a future kernel version.
*/
static int pcim_setup_msi_release(struct pci_dev *dev)
{
diff --git a/drivers/pci/npem.c b/drivers/pci/npem.c
index ffeeedf6e311..c51879fcd438 100644
--- a/drivers/pci/npem.c
+++ b/drivers/pci/npem.c
@@ -504,7 +504,7 @@ static int pci_npem_set_led_classdev(struct npem *npem, struct npem_led *nled)
led->brightness_get = brightness_get;
led->max_brightness = 1;
led->default_trigger = "none";
- led->flags = 0;
+ led->flags = LED_HW_PLUGGABLE;
ret = led_classdev_register(&npem->dev->dev, led);
if (ret)
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 9f8eb5df279e..6da569fd3b8f 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -775,7 +775,7 @@ void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge)
/* Check if there is a DT root node to attach the created node */
if (!of_root) {
- pr_err("of_root node is NULL, cannot create PCI host bridge node\n");
+ pr_debug("of_root node is NULL, cannot create PCI host bridge node\n");
return;
}
@@ -875,24 +875,19 @@ EXPORT_SYMBOL_GPL(of_pci_supply_present);
* of_pci_get_max_link_speed - Find the maximum link speed of the given device node.
* @node: Device tree node with the maximum link speed information.
*
- * This function will try to find the limitation of link speed by finding
- * a property called "max-link-speed" of the given device node.
+ * This function will try to read the "max-link-speed" property of the given
+ * device tree node. It does NOT validate the value of the property.
*
- * Return:
- * * > 0 - On success, a maximum link speed.
- * * -EINVAL - Invalid "max-link-speed" property value, or failure to access
- * the property of the device tree node.
- *
- * Returns the associated max link speed from DT, or a negative value if the
- * required property is not found or is invalid.
+ * Return: Maximum link speed value on success, errno on failure.
*/
int of_pci_get_max_link_speed(struct device_node *node)
{
u32 max_link_speed;
+ int ret;
- if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
- max_link_speed == 0 || max_link_speed > 4)
- return -EINVAL;
+ ret = of_property_read_u32(node, "max-link-speed", &max_link_speed);
+ if (ret)
+ return ret;
return max_link_speed;
}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index e0f546166eb8..7c898542af8d 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -530,7 +530,7 @@ static bool cpu_supports_p2pdma(void)
static const struct pci_p2pdma_whitelist_entry {
unsigned short vendor;
- unsigned short device;
+ int device;
enum {
REQ_SAME_HOST_BRIDGE = 1 << 0,
} flags;
@@ -548,6 +548,8 @@ static const struct pci_p2pdma_whitelist_entry {
{PCI_VENDOR_ID_INTEL, 0x2033, 0},
{PCI_VENDOR_ID_INTEL, 0x2020, 0},
{PCI_VENDOR_ID_INTEL, 0x09a2, 0},
+ /* Google SoCs. */
+ {PCI_VENDOR_ID_GOOGLE, PCI_ANY_ID, 0},
{}
};
@@ -601,8 +603,12 @@ static bool __host_bridge_whitelist(struct pci_host_bridge *host,
device = root->device;
for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
- if (vendor != entry->vendor || device != entry->device)
+ if (vendor != entry->vendor)
continue;
+
+ if (entry->device != PCI_ANY_ID && device != entry->device)
+ continue;
+
if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge)
return false;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index dd9075403987..d10ece0889f0 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -138,9 +138,11 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
{
struct pci_dynid *dynid;
const struct pci_device_id *found_id = NULL, *ids;
+ int ret;
/* When driver_override is set, only bind to the matching driver */
- if (dev->driver_override && strcmp(dev->driver_override, drv->name))
+ ret = device_match_driver_override(&dev->dev, &drv->driver);
+ if (ret == 0)
return NULL;
/* Look at the dynamic ids first, before the static ones */
@@ -164,7 +166,7 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
* matching.
*/
if (found_id->override_only) {
- if (dev->driver_override)
+ if (ret > 0)
return found_id;
} else {
return found_id;
@@ -172,7 +174,7 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
}
/* driver_override will always match, send a dummy id */
- if (dev->driver_override)
+ if (ret > 0)
return &pci_device_id_any;
return NULL;
}
@@ -452,7 +454,7 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
static inline bool pci_device_can_probe(struct pci_dev *pdev)
{
return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
- pdev->driver_override);
+ device_has_driver_override(&pdev->dev));
}
#else
static inline bool pci_device_can_probe(struct pci_dev *pdev)
@@ -1722,6 +1724,7 @@ static const struct cpumask *pci_device_irq_get_affinity(struct device *dev,
const struct bus_type pci_bus_type = {
.name = "pci",
+ .driver_override = true,
.match = pci_bus_match,
.uevent = pci_uevent,
.probe = pci_device_probe,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 16eaaf749ba9..d37860841260 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -378,6 +378,9 @@ static ssize_t numa_node_store(struct device *dev,
if (node != NUMA_NO_NODE && !node_online(node))
return -EINVAL;
+ if (node == dev->numa_node)
+ return count;
+
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
node);
@@ -553,7 +556,6 @@ static ssize_t reset_subordinate_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct pci_bus *bus = pdev->subordinate;
unsigned long val;
if (!capable(CAP_SYS_ADMIN))
@@ -563,7 +565,7 @@ static ssize_t reset_subordinate_store(struct device *dev,
return -EINVAL;
if (val) {
- int ret = __pci_reset_bus(bus);
+ int ret = pci_try_reset_bridge(pdev);
if (ret)
return ret;
@@ -615,33 +617,6 @@ static ssize_t devspec_show(struct device *dev,
static DEVICE_ATTR_RO(devspec);
#endif
-static ssize_t driver_override_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = driver_set_override(dev, &pdev->driver_override, buf, count);
- if (ret)
- return ret;
-
- return count;
-}
-
-static ssize_t driver_override_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- ssize_t len;
-
- device_lock(dev);
- len = sysfs_emit(buf, "%s\n", pdev->driver_override);
- device_unlock(dev);
- return len;
-}
-static DEVICE_ATTR_RW(driver_override);
-
static struct attribute *pci_dev_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_resource.attr,
@@ -669,7 +644,6 @@ static struct attribute *pci_dev_attrs[] = {
#ifdef CONFIG_OF
&dev_attr_devspec.attr,
#endif
- &dev_attr_driver_override.attr,
&dev_attr_ari_enabled.attr,
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8479c2e1f74f..8f7cfcc00090 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -949,7 +949,7 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
ret = pci_dev_str_match(dev, p, &p);
if (ret < 0) {
- pr_info_once("PCI: Can't parse ACS command line parameter\n");
+ pr_warn_once("PCI: Can't parse ACS command line parameter\n");
break;
} else if (ret == 1) {
/* Found a match */
@@ -2241,10 +2241,9 @@ EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
#ifdef CONFIG_PCIEAER
void pcie_clear_device_status(struct pci_dev *dev)
{
- u16 sta;
-
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
- pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
+ pcie_capability_write_word(dev, PCI_EXP_DEVSTA,
+ PCI_EXP_DEVSTA_CED | PCI_EXP_DEVSTA_NFED |
+ PCI_EXP_DEVSTA_FED | PCI_EXP_DEVSTA_URD);
}
#endif
@@ -3675,12 +3674,11 @@ void pci_acs_init(struct pci_dev *dev)
*/
int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
{
- struct pci_bus *bus = dev->bus;
- struct pci_dev *bridge;
+ struct pci_dev *root, *bridge;
u32 cap, ctl2;
/*
- * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
+ * Per PCIe r7.0, sec 7.5.3.16, the AtomicOp Requester Enable bit
* in Device Control 2 is reserved in VFs and the PF value applies
* to all associated VFs.
*/
@@ -3691,50 +3689,49 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
return -EINVAL;
/*
- * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
- * AtomicOp requesters. For now, we only support endpoints as
- * requesters and root ports as completers. No endpoints as
+ * Per PCIe r7.0, sec 6.15, endpoints and root ports may be
+ * AtomicOp requesters. For now, we only support (legacy) endpoints
+ * as requesters and root ports as completers. No endpoints as
* completers, and no peer-to-peer.
*/
switch (pci_pcie_type(dev)) {
case PCI_EXP_TYPE_ENDPOINT:
case PCI_EXP_TYPE_LEG_END:
- case PCI_EXP_TYPE_RC_END:
break;
default:
return -EINVAL;
}
- while (bus->parent) {
- bridge = bus->self;
+ root = pcie_find_root_port(dev);
+ if (!root)
+ return -EINVAL;
- pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
+ pcie_capability_read_dword(root, PCI_EXP_DEVCAP2, &cap);
+ if ((cap & cap_mask) != cap_mask)
+ return -EINVAL;
+ bridge = pci_upstream_bridge(dev);
+ while (bridge != root) {
switch (pci_pcie_type(bridge)) {
- /* Ensure switch ports support AtomicOp routing */
case PCI_EXP_TYPE_UPSTREAM:
- case PCI_EXP_TYPE_DOWNSTREAM:
- if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
- return -EINVAL;
- break;
-
- /* Ensure root port supports all the sizes we care about */
- case PCI_EXP_TYPE_ROOT_PORT:
- if ((cap & cap_mask) != cap_mask)
- return -EINVAL;
- break;
- }
-
- /* Ensure upstream ports don't block AtomicOps on egress */
- if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
+ /* Upstream ports must not block AtomicOps on egress */
pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
&ctl2);
if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
return -EINVAL;
+ fallthrough;
+
+ /* All switch ports need to route AtomicOps */
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2,
+ &cap);
+ if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
+ return -EINVAL;
+ break;
}
- bus = bus->parent;
+ bridge = pci_upstream_bridge(bridge);
}
pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
@@ -4914,12 +4911,8 @@ static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
* If "dev" is below a CXL port that has SBR control masked, SBR
* won't do anything, so return error.
*/
- if (bridge && cxl_sbr_masked(bridge)) {
- if (probe)
- return 0;
-
+ if (bridge && pcie_is_cxl(bridge) && cxl_sbr_masked(bridge))
return -ENOTTY;
- }
rc = pci_dev_reset_iommu_prepare(dev);
if (rc) {
@@ -5292,13 +5285,21 @@ static bool pci_bus_resettable(struct pci_bus *bus)
return true;
}
+static void pci_bus_lock(struct pci_bus *bus);
+static void pci_bus_unlock(struct pci_bus *bus);
+static int pci_bus_trylock(struct pci_bus *bus);
+
/* Lock devices from the top of the tree down */
-static void pci_bus_lock(struct pci_bus *bus)
+static void __pci_bus_lock(struct pci_bus *bus, struct pci_slot *slot)
{
- struct pci_dev *dev;
+ struct pci_dev *dev, *bridge = bus->self;
+
+ if (bridge)
+ pci_dev_lock(bridge);
- pci_dev_lock(bus->self);
list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (slot && (!dev->slot || dev->slot != slot))
+ continue;
if (dev->subordinate)
pci_bus_lock(dev->subordinate);
else
@@ -5307,28 +5308,34 @@ static void pci_bus_lock(struct pci_bus *bus)
}
/* Unlock devices from the bottom of the tree up */
-static void pci_bus_unlock(struct pci_bus *bus)
+static void __pci_bus_unlock(struct pci_bus *bus, struct pci_slot *slot)
{
- struct pci_dev *dev;
+ struct pci_dev *dev, *bridge = bus->self;
list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (slot && (!dev->slot || dev->slot != slot))
+ continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
else
pci_dev_unlock(dev);
}
- pci_dev_unlock(bus->self);
+
+ if (bridge)
+ pci_dev_unlock(bridge);
}
/* Return 1 on successful lock, 0 on contention */
-static int pci_bus_trylock(struct pci_bus *bus)
+static int __pci_bus_trylock(struct pci_bus *bus, struct pci_slot *slot)
{
- struct pci_dev *dev;
+ struct pci_dev *dev, *bridge = bus->self;
- if (!pci_dev_trylock(bus->self))
+ if (bridge && !pci_dev_trylock(bridge))
return 0;
list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (slot && (!dev->slot || dev->slot != slot))
+ continue;
if (dev->subordinate) {
if (!pci_bus_trylock(dev->subordinate))
goto unlock;
@@ -5339,15 +5346,37 @@ static int pci_bus_trylock(struct pci_bus *bus)
unlock:
list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
+ if (slot && (!dev->slot || dev->slot != slot))
+ continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
else
pci_dev_unlock(dev);
}
- pci_dev_unlock(bus->self);
+
+ if (bridge)
+ pci_dev_unlock(bridge);
return 0;
}
+/* Lock devices from the top of the tree down */
+static void pci_bus_lock(struct pci_bus *bus)
+{
+ __pci_bus_lock(bus, NULL);
+}
+
+/* Unlock devices from the bottom of the tree up */
+static void pci_bus_unlock(struct pci_bus *bus)
+{
+ __pci_bus_unlock(bus, NULL);
+}
+
+/* Return 1 on successful lock, 0 on contention */
+static int pci_bus_trylock(struct pci_bus *bus)
+{
+ return __pci_bus_trylock(bus, NULL);
+}
+
/* Do any devices on or below this slot prevent a bus reset? */
static bool pci_slot_resettable(struct pci_slot *slot)
{
@@ -5370,72 +5399,19 @@ static bool pci_slot_resettable(struct pci_slot *slot)
/* Lock devices from the top of the tree down */
static void pci_slot_lock(struct pci_slot *slot)
{
- struct pci_dev *dev, *bridge = slot->bus->self;
-
- if (bridge)
- pci_dev_lock(bridge);
-
- list_for_each_entry(dev, &slot->bus->devices, bus_list) {
- if (!dev->slot || dev->slot != slot)
- continue;
- if (dev->subordinate)
- pci_bus_lock(dev->subordinate);
- else
- pci_dev_lock(dev);
- }
+ __pci_bus_lock(slot->bus, slot);
}
/* Unlock devices from the bottom of the tree up */
static void pci_slot_unlock(struct pci_slot *slot)
{
- struct pci_dev *dev, *bridge = slot->bus->self;
-
- list_for_each_entry(dev, &slot->bus->devices, bus_list) {
- if (!dev->slot || dev->slot != slot)
- continue;
- if (dev->subordinate)
- pci_bus_unlock(dev->subordinate);
- else
- pci_dev_unlock(dev);
- }
-
- if (bridge)
- pci_dev_unlock(bridge);
+ __pci_bus_unlock(slot->bus, slot);
}
/* Return 1 on successful lock, 0 on contention */
static int pci_slot_trylock(struct pci_slot *slot)
{
- struct pci_dev *dev, *bridge = slot->bus->self;
-
- if (bridge && !pci_dev_trylock(bridge))
- return 0;
-
- list_for_each_entry(dev, &slot->bus->devices, bus_list) {
- if (!dev->slot || dev->slot != slot)
- continue;
- if (dev->subordinate) {
- if (!pci_bus_trylock(dev->subordinate))
- goto unlock;
- } else if (!pci_dev_trylock(dev))
- goto unlock;
- }
- return 1;
-
-unlock:
- list_for_each_entry_continue_reverse(dev,
- &slot->bus->devices, bus_list) {
- if (!dev->slot || dev->slot != slot)
- continue;
- if (dev->subordinate)
- pci_bus_unlock(dev->subordinate);
- else
- pci_dev_unlock(dev);
- }
-
- if (bridge)
- pci_dev_unlock(bridge);
- return 0;
+ return __pci_bus_trylock(slot->bus, slot);
}
/*
@@ -5541,7 +5517,7 @@ int pci_probe_reset_slot(struct pci_slot *slot)
EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
/**
- * __pci_reset_slot - Try to reset a PCI slot
+ * pci_try_reset_slot - Try to reset a PCI slot
* @slot: PCI slot to reset
*
* A PCI bus may host multiple slots, each slot may support a reset mechanism
@@ -5555,7 +5531,7 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
*
* Same as above except return -EAGAIN if the slot cannot be locked
*/
-static int __pci_reset_slot(struct pci_slot *slot)
+static int pci_try_reset_slot(struct pci_slot *slot)
{
int rc;
@@ -5597,14 +5573,44 @@ static int pci_bus_reset(struct pci_bus *bus, bool probe)
}
/**
- * pci_bus_error_reset - reset the bridge's subordinate bus
- * @bridge: The parent device that connects to the bus to reset
+ * pci_try_reset_bus - Try to reset a PCI bus
+ * @bus: top level PCI bus to reset
+ *
+ * Same as above except return -EAGAIN if the bus cannot be locked
+ */
+static int pci_try_reset_bus(struct pci_bus *bus)
+{
+ int rc;
+
+ rc = pci_bus_reset(bus, PCI_RESET_PROBE);
+ if (rc)
+ return rc;
+
+ if (pci_bus_trylock(bus)) {
+ pci_bus_save_and_disable_locked(bus);
+ might_sleep();
+ rc = pci_bridge_secondary_bus_reset(bus->self);
+ pci_bus_restore_locked(bus);
+ pci_bus_unlock(bus);
+ } else
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+#define PCI_RESET_RESTORE true
+#define PCI_RESET_NO_RESTORE false
+/**
+ * pci_reset_bridge - reset a bridge's subordinate bus
+ * @bridge: bridge that connects to the bus to reset
+ * @restore: when true use a reset method that invokes pci_dev_restore() post
+ * reset for affected devices
*
* This function will first try to reset the slots on this bus if the method is
* available. If slot reset fails or is not available, this will fall back to a
* secondary bus reset.
*/
-int pci_bus_error_reset(struct pci_dev *bridge)
+static int pci_reset_bridge(struct pci_dev *bridge, bool restore)
{
struct pci_bus *bus = bridge->subordinate;
struct pci_slot *slot;
@@ -5620,18 +5626,43 @@ int pci_bus_error_reset(struct pci_dev *bridge)
if (pci_probe_reset_slot(slot))
goto bus_reset;
- list_for_each_entry(slot, &bus->slots, list)
- if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
+ list_for_each_entry(slot, &bus->slots, list) {
+ int ret;
+
+ if (restore)
+ ret = pci_try_reset_slot(slot);
+ else
+ ret = pci_slot_reset(slot, PCI_RESET_DO_RESET);
+
+ if (ret)
goto bus_reset;
+ }
mutex_unlock(&pci_slot_mutex);
return 0;
bus_reset:
mutex_unlock(&pci_slot_mutex);
+
+ if (restore)
+ return pci_try_reset_bus(bus);
return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
}
/**
+ * pci_bus_error_reset - reset the bridge's subordinate bus
+ * @bridge: The parent device that connects to the bus to reset
+ */
+int pci_bus_error_reset(struct pci_dev *bridge)
+{
+ return pci_reset_bridge(bridge, PCI_RESET_NO_RESTORE);
+}
+
+int pci_try_reset_bridge(struct pci_dev *bridge)
+{
+ return pci_reset_bridge(bridge, PCI_RESET_RESTORE);
+}
+
+/**
* pci_probe_reset_bus - probe whether a PCI bus can be reset
* @bus: PCI bus to probe
*
@@ -5644,32 +5675,6 @@ int pci_probe_reset_bus(struct pci_bus *bus)
EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
/**
- * __pci_reset_bus - Try to reset a PCI bus
- * @bus: top level PCI bus to reset
- *
- * Same as above except return -EAGAIN if the bus cannot be locked
- */
-int __pci_reset_bus(struct pci_bus *bus)
-{
- int rc;
-
- rc = pci_bus_reset(bus, PCI_RESET_PROBE);
- if (rc)
- return rc;
-
- if (pci_bus_trylock(bus)) {
- pci_bus_save_and_disable_locked(bus);
- might_sleep();
- rc = pci_bridge_secondary_bus_reset(bus->self);
- pci_bus_restore_locked(bus);
- pci_bus_unlock(bus);
- } else
- rc = -EAGAIN;
-
- return rc;
-}
-
-/**
* pci_reset_bus - Try to reset a PCI bus
* @pdev: top level PCI device to reset via slot/bus
*
@@ -5678,7 +5683,7 @@ int __pci_reset_bus(struct pci_bus *bus)
int pci_reset_bus(struct pci_dev *pdev)
{
return (!pci_probe_reset_slot(pdev->slot)) ?
- __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
+ pci_try_reset_slot(pdev->slot) : pci_try_reset_bus(pdev->bus);
}
EXPORT_SYMBOL_GPL(pci_reset_bus);
@@ -6197,6 +6202,18 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
cmd &= ~PCI_BRIDGE_CTL_VGA;
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
cmd);
+
+
+ /*
+ * VGA Enable may not be writable if bridge doesn't
+ * support it.
+ */
+ if (decode) {
+ pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
+ &cmd);
+ if (!(cmd & PCI_BRIDGE_CTL_VGA))
+ return -EIO;
+ }
}
bus = bus->parent;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 13d998fbacce..4a14f88e543a 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -108,6 +108,8 @@ struct pcie_tlp_log;
PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
extern const unsigned char pcie_link_speed[];
+unsigned char pcie_get_link_speed(unsigned int speed);
+
extern bool pci_early_dump;
extern struct mutex pci_rescan_remove_lock;
@@ -231,7 +233,7 @@ bool pci_reset_supported(struct pci_dev *dev);
void pci_init_reset_methods(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
int pci_bus_error_reset(struct pci_dev *dev);
-int __pci_reset_bus(struct pci_bus *bus);
+int pci_try_reset_bridge(struct pci_dev *bridge);
struct pci_cap_saved_data {
u16 cap_nr;
@@ -1053,6 +1055,9 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
return resource_alignment(res);
}
+resource_size_t pci_min_window_alignment(struct pci_bus *bus,
+ unsigned long type);
+
void pci_acs_init(struct pci_dev *dev);
void pci_enable_acs(struct pci_dev *dev);
#ifdef CONFIG_PCI_QUIRKS
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index d916378bc707..c4fd9c0b2a54 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1041,8 +1041,6 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
* 3) There are multiple errors and prior ID comparing fails;
* We check AER status registers to find possible reporter.
*/
- if (atomic_read(&dev->enable_cnt) == 0)
- return false;
/* Check if AER is enabled */
pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 21f5d23e0b61..925373b98dff 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -706,22 +706,29 @@ static void aspm_calc_l12_info(struct pcie_link_state *link,
}
/* Program T_POWER_ON times in both ports */
- pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
- pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2,
+ PCI_L1SS_CTL2_T_PWR_ON_VALUE |
+ PCI_L1SS_CTL2_T_PWR_ON_SCALE, ctl2);
+ pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL2,
+ PCI_L1SS_CTL2_T_PWR_ON_VALUE |
+ PCI_L1SS_CTL2_T_PWR_ON_SCALE, ctl2);
/* Program Common_Mode_Restore_Time in upstream device */
pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+ PCI_L1SS_CTL1_CM_RESTORE_TIME,
+ ctl1 & PCI_L1SS_CTL1_CM_RESTORE_TIME);
/* Program LTR_L1.2_THRESHOLD time in both ports */
pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
- ctl1);
+ ctl1 & (PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
- ctl1);
+ ctl1 & (PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
if (pl1_2_enables || cl1_2_enables) {
pci_clear_and_set_config_dword(parent,
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index fc18349614d7..2b779bd1d861 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -256,6 +256,7 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
info->dev[0] = dev;
info->error_dev_num = 1;
+ info->ratelimit_print[0] = 1;
return 1;
}
@@ -372,11 +373,13 @@ static irqreturn_t dpc_handler(int irq, void *context)
return IRQ_HANDLED;
}
+ pci_dev_get(pdev);
dpc_process_error(pdev);
/* We configure DPC so it only triggers on ERR_FATAL */
pcie_do_recovery(pdev, pci_channel_io_frozen, dpc_reset_link);
+ pci_dev_put(pdev);
return IRQ_HANDLED;
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 91a598ed534c..a41ffd1914de 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -52,6 +52,7 @@ void pci_ptm_init(struct pci_dev *dev)
return;
dev->ptm_cap = ptm;
+ atomic_set(&dev->ptm_enable_cnt, 0);
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u32));
pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap);
@@ -85,10 +86,6 @@ void pci_ptm_init(struct pci_dev *dev)
dev->ptm_responder = 1;
if (cap & PCI_PTM_CAP_REQ)
dev->ptm_requester = 1;
-
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM)
- pci_enable_ptm(dev, NULL);
}
void pci_save_ptm_state(struct pci_dev *dev)
@@ -129,26 +126,11 @@ void pci_restore_ptm_state(struct pci_dev *dev)
static int __pci_enable_ptm(struct pci_dev *dev)
{
u16 ptm = dev->ptm_cap;
- struct pci_dev *ups;
u32 ctrl;
if (!ptm)
return -EINVAL;
- /*
- * A device uses local PTM Messages to request time information
- * from a PTM Root that's farther upstream. Every device along the
- * path must support PTM and have it enabled so it can handle the
- * messages. Therefore, if this device is not a PTM Root, the
- * upstream link partner must have PTM enabled before we can enable
- * PTM.
- */
- if (!dev->ptm_root) {
- ups = pci_upstream_ptm(dev);
- if (!ups || !ups->ptm_enabled)
- return -EINVAL;
- }
-
switch (pci_pcie_type(dev)) {
case PCI_EXP_TYPE_ROOT_PORT:
if (!dev->ptm_root)
@@ -182,27 +164,46 @@ static int __pci_enable_ptm(struct pci_dev *dev)
/**
* pci_enable_ptm() - Enable Precision Time Measurement
* @dev: PCI device
- * @granularity: pointer to return granularity
*
- * Enable Precision Time Measurement for @dev. If successful and
- * @granularity is non-NULL, return the Effective Granularity.
+ * Enable Precision Time Measurement for @dev.
*
* Return: zero if successful, or -EINVAL if @dev lacks a PTM Capability or
* is not a PTM Root and lacks an upstream path of PTM-enabled devices.
*/
-int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
+int pci_enable_ptm(struct pci_dev *dev)
{
int rc;
char clock_desc[8];
- rc = __pci_enable_ptm(dev);
- if (rc)
- return rc;
+ /*
+ * A device uses local PTM Messages to request time information
+ * from a PTM Root that's farther upstream. Every device along
+ * the path must support PTM and have it enabled so it can
+ * handle the messages. Therefore, if this device is not a PTM
+ * Root, the upstream link partner must have PTM enabled before
+ * we can enable PTM.
+ */
+ if (!dev->ptm_root) {
+ struct pci_dev *parent;
+
+ parent = pci_upstream_ptm(dev);
+ if (!parent)
+ return -EINVAL;
+ /* Enable PTM for the parent */
+ rc = pci_enable_ptm(parent);
+ if (rc)
+ return rc;
+ }
- dev->ptm_enabled = 1;
+ /* Already enabled? */
+ if (atomic_inc_return(&dev->ptm_enable_cnt) > 1)
+ return 0;
- if (granularity)
- *granularity = dev->ptm_granularity;
+ rc = __pci_enable_ptm(dev);
+ if (rc) {
+ atomic_dec(&dev->ptm_enable_cnt);
+ return rc;
+ }
switch (dev->ptm_granularity) {
case 0:
@@ -244,27 +245,31 @@ static void __pci_disable_ptm(struct pci_dev *dev)
*/
void pci_disable_ptm(struct pci_dev *dev)
{
- if (dev->ptm_enabled) {
+ struct pci_dev *parent;
+
+ if (atomic_dec_and_test(&dev->ptm_enable_cnt))
__pci_disable_ptm(dev);
- dev->ptm_enabled = 0;
- }
+
+ parent = pci_upstream_ptm(dev);
+ if (parent)
+ pci_disable_ptm(parent);
}
EXPORT_SYMBOL(pci_disable_ptm);
/*
- * Disable PTM, but preserve dev->ptm_enabled so we silently re-enable it on
+ * Disable PTM, but preserve dev->ptm_enable_cnt so we silently re-enable it on
* resume if necessary.
*/
void pci_suspend_ptm(struct pci_dev *dev)
{
- if (dev->ptm_enabled)
+ if (atomic_read(&dev->ptm_enable_cnt))
__pci_disable_ptm(dev);
}
/* If PTM was enabled before suspend, re-enable it when resuming */
void pci_resume_ptm(struct pci_dev *dev)
{
- if (dev->ptm_enabled)
+ if (atomic_read(&dev->ptm_enable_cnt))
__pci_enable_ptm(dev);
}
@@ -273,7 +278,7 @@ bool pcie_ptm_enabled(struct pci_dev *dev)
if (!dev)
return false;
- return dev->ptm_enabled;
+ return atomic_read(&dev->ptm_enable_cnt);
}
EXPORT_SYMBOL(pcie_ptm_enabled);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index bccc7a4bdd79..b63cd0c310bc 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -68,23 +68,6 @@ static struct resource *get_pci_domain_busn_res(int domain_nr)
}
/*
- * Some device drivers need know if PCI is initiated.
- * Basically, we think PCI is not initiated when there
- * is no device to be found on the pci_bus_type.
- */
-int no_pci_devices(void)
-{
- struct device *dev;
- int no_devices;
-
- dev = bus_find_next_device(&pci_bus_type, NULL);
- no_devices = (dev == NULL);
- put_device(dev);
- return no_devices;
-}
-EXPORT_SYMBOL(no_pci_devices);
-
-/*
* PCI Bus Class
*/
static void release_pcibus_dev(struct device *dev)
@@ -395,6 +378,9 @@ static void pci_read_bridge_io(struct pci_dev *dev, struct resource *res,
unsigned long io_mask, io_granularity, base, limit;
struct pci_bus_region region;
+ if (!dev->io_window)
+ return;
+
io_mask = PCI_IO_RANGE_MASK;
io_granularity = 0x1000;
if (dev->io_window_1k) {
@@ -465,6 +451,9 @@ static void pci_read_bridge_mmio_pref(struct pci_dev *dev, struct resource *res,
pci_bus_addr_t base, limit;
struct pci_bus_region region;
+ if (!dev->pref_window)
+ return;
+
pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
@@ -783,6 +772,22 @@ const unsigned char pcie_link_speed[] = {
};
EXPORT_SYMBOL_GPL(pcie_link_speed);
+/**
+ * pcie_get_link_speed - Get speed value from PCIe generation number
+ * @speed: PCIe speed (1-based: 1 = 2.5GT, 2 = 5GT, ...)
+ *
+ * Returns the speed value (e.g., PCIE_SPEED_2_5GT) if @speed is valid,
+ * otherwise returns PCI_SPEED_UNKNOWN.
+ */
+unsigned char pcie_get_link_speed(unsigned int speed)
+{
+ if (speed >= ARRAY_SIZE(pcie_link_speed))
+ return PCI_SPEED_UNKNOWN;
+
+ return pcie_link_speed[speed];
+}
+EXPORT_SYMBOL_GPL(pcie_get_link_speed);
+
const char *pci_speed_string(enum pci_bus_speed speed)
{
/* Indexed by the pci_bus_speed enum */
@@ -2488,7 +2493,6 @@ static void pci_release_dev(struct device *dev)
pci_release_of_node(pci_dev);
pcibios_release_device(pci_dev);
pci_bus_put(pci_dev->bus);
- kfree(pci_dev->driver_override);
bitmap_free(pci_dev->dma_alias_mask);
dev_dbg(dev, "device released\n");
kfree(pci_dev);
diff --git a/drivers/pci/pwrctrl/Kconfig b/drivers/pci/pwrctrl/Kconfig
index cd3aa15bad00..9eec767cda86 100644
--- a/drivers/pci/pwrctrl/Kconfig
+++ b/drivers/pci/pwrctrl/Kconfig
@@ -11,17 +11,18 @@ config PCI_PWRCTRL_PWRSEQ
select POWER_SEQUENCING
select PCI_PWRCTRL
-config PCI_PWRCTRL_SLOT
- tristate "PCI Power Control driver for PCI slots"
+config PCI_PWRCTRL_GENERIC
+ tristate "Generic PCI Power Control driver for PCI slots and endpoints"
select POWER_SEQUENCING
select PCI_PWRCTRL
help
- Say Y here to enable the PCI Power Control driver to control the power
- state of PCI slots.
+ Say Y here to enable the generic PCI Power Control driver to control
+ the power state of PCI slots and endpoints.
This is a generic driver that controls the power state of different
- PCI slots. The voltage regulators powering the rails of the PCI slots
- are expected to be defined in the devicetree node of the PCI bridge.
+ PCI slots and endpoints. The voltage regulators powering the rails
+ of the PCI slots or endpoints are expected to be defined in the
+ devicetree node of the PCI bridge or endpoint.
config PCI_PWRCTRL_TC9563
tristate "PCI Power Control driver for TC9563 PCIe switch"
diff --git a/drivers/pci/pwrctrl/Makefile b/drivers/pci/pwrctrl/Makefile
index 13b02282106c..f6bb4fb9a410 100644
--- a/drivers/pci/pwrctrl/Makefile
+++ b/drivers/pci/pwrctrl/Makefile
@@ -5,7 +5,7 @@ pci-pwrctrl-core-y := core.o
obj-$(CONFIG_PCI_PWRCTRL_PWRSEQ) += pci-pwrctrl-pwrseq.o
-obj-$(CONFIG_PCI_PWRCTRL_SLOT) += pci-pwrctrl-slot.o
-pci-pwrctrl-slot-y := slot.o
+obj-$(CONFIG_PCI_PWRCTRL_GENERIC) += pci-pwrctrl-generic.o
+pci-pwrctrl-generic-y := generic.o
obj-$(CONFIG_PCI_PWRCTRL_TC9563) += pci-pwrctrl-tc9563.o
diff --git a/drivers/pci/pwrctrl/slot.c b/drivers/pci/pwrctrl/generic.c
index b87639253ae2..1ae19450a455 100644
--- a/drivers/pci/pwrctrl/slot.c
+++ b/drivers/pci/pwrctrl/generic.c
@@ -87,18 +87,15 @@ static int slot_pwrctrl_probe(struct platform_device *pdev)
ret = of_regulator_bulk_get_all(dev, dev_of_node(dev),
&slot->supplies);
- if (ret < 0) {
- dev_err_probe(dev, ret, "Failed to get slot regulators\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get slot regulators\n");
slot->num_supplies = ret;
slot->clk = devm_clk_get_optional(dev, NULL);
- if (IS_ERR(slot->clk)) {
+ if (IS_ERR(slot->clk))
return dev_err_probe(dev, PTR_ERR(slot->clk),
"Failed to enable slot clock\n");
- }
skip_resources:
slot->pwrctrl.power_on = slot_pwrctrl_power_on;
@@ -121,6 +118,10 @@ static const struct of_device_id slot_pwrctrl_of_match[] = {
{
.compatible = "pciclass,0604",
},
+ /* Renesas UPD720201/UPD720202 USB 3.0 xHCI Host Controller */
+ {
+ .compatible = "pci1912,0014",
+ },
{ }
};
MODULE_DEVICE_TABLE(of, slot_pwrctrl_of_match);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 48946cca4be7..caaed1a01dc0 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1296,9 +1296,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt
/*
* CardBus controllers have a legacy base address that enables them to
- * respond as i82365 pcmcia controllers. We don't want them to do this
- * even if the Linux CardBus driver is not loaded, because the Linux i82365
- * driver does not (and should not) handle CardBus.
+ * respond as i82365 PCMCIA controllers. We don't want them to do this.
*/
static void quirk_cardbus_legacy(struct pci_dev *dev)
{
@@ -5603,6 +5601,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
* AMD Starship/Matisse HD Audio Controller 0x1487
* AMD Starship USB 3.0 Host Controller 0x148c
* AMD Matisse USB 3.0 Host Controller 0x149c
+ * AMD Neural Processing Unit 0x1502 0x17f0
* Intel 82579LM Gigabit Ethernet Controller 0x1502
* Intel 82579V Gigabit Ethernet Controller 0x1503
* Mediatek MT7922 802.11ax PCI Express Wireless Network Adapter
@@ -5615,6 +5614,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1502, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x17f0, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MEDIATEK, 0x0616, quirk_no_flr);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 61f769aaa2f6..4cf120ebe5ad 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -434,6 +434,10 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
dev = add_res->dev;
idx = pci_resource_num(dev, res);
+ /* Skip this resource if not found in head list */
+ if (!res_to_dev_res(head, res))
+ continue;
+
/*
* Skip resource that failed the earlier assignment and is
* not optional as it would just fail again.
@@ -442,10 +446,6 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
!pci_resource_is_optional(dev, idx))
goto out;
- /* Skip this resource if not found in head list */
- if (!res_to_dev_res(head, res))
- continue;
-
res_name = pci_resource_name(dev, idx);
add_size = add_res->add_size;
align = add_res->min_align;
@@ -1035,7 +1035,7 @@ resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
#define PCI_P2P_DEFAULT_IO_ALIGN SZ_4K
#define PCI_P2P_DEFAULT_IO_ALIGN_1K SZ_1K
-static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
+resource_size_t pci_min_window_alignment(struct pci_bus *bus, unsigned long type)
{
resource_size_t align = 1, arch_align;
@@ -1084,7 +1084,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t add_size,
if (resource_assigned(b_res))
return;
- min_align = window_alignment(bus, IORESOURCE_IO);
+ min_align = pci_min_window_alignment(bus, IORESOURCE_IO);
list_for_each_entry(dev, &bus->devices, bus_list) {
struct resource *r;
@@ -1333,13 +1333,20 @@ static void pbus_size_mem(struct pci_bus *bus, struct resource *b_res,
r_size = resource_size(r);
size += max(r_size, align);
- aligns[order] += align;
+ /*
+ * If resource's size is larger than its alignment,
+ * some configurations result in an unwanted gap in
+ * the head space that the larger resource cannot
+ * fill.
+ */
+ if (r_size <= align)
+ aligns[order] += align;
if (order > max_order)
max_order = order;
}
}
- win_align = window_alignment(bus, b_res->flags);
+ win_align = pci_min_window_alignment(bus, b_res->flags);
min_align = calculate_head_align(aligns, max_order);
min_align = max(min_align, win_align);
size0 = calculate_memsize(size, realloc_head ? 0 : add_size,
@@ -1837,6 +1844,7 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
resource_size_t new_size)
{
resource_size_t add_size, size = resource_size(res);
+ struct pci_dev_resource *dev_res;
if (resource_assigned(res))
return;
@@ -1849,9 +1857,46 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
pci_dbg(bridge, "bridge window %pR extended by %pa\n", res,
&add_size);
} else if (new_size < size) {
+ int idx = pci_resource_num(bridge, res);
+
+ /*
+ * hpio/mmio/mmioprefsize hasn't been included at all? See the
+ * add_size param at the callsites of calculate_memsize().
+ */
+ if (!add_list)
+ return;
+
+ /* Only shrink if the hotplug extra relates to window size. */
+ switch (idx) {
+ case PCI_BRIDGE_IO_WINDOW:
+ if (size > pci_hotplug_io_size)
+ return;
+ break;
+ case PCI_BRIDGE_MEM_WINDOW:
+ if (size > pci_hotplug_mmio_size)
+ return;
+ break;
+ case PCI_BRIDGE_PREF_MEM_WINDOW:
+ if (size > pci_hotplug_mmio_pref_size)
+ return;
+ break;
+ default:
+ break;
+ }
+
+ dev_res = res_to_dev_res(add_list, res);
add_size = size - new_size;
- pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res,
- &add_size);
+ if (add_size < dev_res->add_size) {
+ dev_res->add_size -= add_size;
+ pci_dbg(bridge, "bridge window %pR optional size shrunken by %pa\n",
+ res, &add_size);
+ } else {
+ pci_dbg(bridge, "bridge window %pR optional size removed\n",
+ res);
+ pci_dev_res_remove_from_list(add_list, res);
+ }
+ return;
+
} else {
return;
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index bb2aef373d6f..fbc05cda96ee 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -245,16 +245,54 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
}
/*
+ * For mem bridge windows, try to relocate tail remainder space to space
+ * before res->start if there's enough free space there. This enables
+ * tighter packing for resources.
+ */
+resource_size_t pci_align_resource(struct pci_dev *dev,
+ const struct resource *res,
+ const struct resource *empty_res,
+ resource_size_t size,
+ resource_size_t align)
+{
+ resource_size_t remainder, start_addr;
+
+ if (!(res->flags & IORESOURCE_MEM))
+ return res->start;
+
+ if (IS_ALIGNED(size, align))
+ return res->start;
+
+ remainder = size - ALIGN_DOWN(size, align);
+ /* Don't mess with size that doesn't align with window size granularity */
+ if (!IS_ALIGNED(remainder, pci_min_window_alignment(dev->bus, res->flags)))
+ return res->start;
+ /* Try to place remainder that doesn't fill align before */
+ if (res->start < remainder)
+ return res->start;
+ start_addr = res->start - remainder;
+ if (empty_res->start > start_addr)
+ return res->start;
+
+ pci_dbg(dev, "%pR: moving candidate start address below align to %llx\n",
+ res, (unsigned long long)start_addr);
+ return start_addr;
+}
+
+/*
* We don't have to worry about legacy ISA devices, so nothing to do here.
* This is marked as __weak because multiple architectures define it; it should
* eventually go away.
*/
resource_size_t __weak pcibios_align_resource(void *data,
const struct resource *res,
+ const struct resource *empty_res,
resource_size_t size,
resource_size_t align)
{
- return res->start;
+ struct pci_dev *dev = data;
+
+ return pci_align_resource(dev, res, empty_res, size, align);
}
static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 787311614e5b..6d5cd37bfb1e 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -42,6 +42,15 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
pci_domain_nr(slot->bus),
slot->bus->number);
+ /*
+ * Preserve legacy ABI expectations that hotplug drivers that manage
+ * multiple devices per slot emit 0 for the device number.
+ */
+ if (slot->number == PCI_SLOT_ALL_DEVICES)
+ return sysfs_emit(buf, "%04x:%02x:00\n",
+ pci_domain_nr(slot->bus),
+ slot->bus->number);
+
return sysfs_emit(buf, "%04x:%02x:%02x\n",
pci_domain_nr(slot->bus),
slot->bus->number,
@@ -73,7 +82,8 @@ static void pci_slot_release(struct kobject *kobj)
down_read(&pci_bus_sem);
list_for_each_entry(dev, &slot->bus->devices, bus_list)
- if (PCI_SLOT(dev->devfn) == slot->number)
+ if (slot->number == PCI_SLOT_ALL_DEVICES ||
+ PCI_SLOT(dev->devfn) == slot->number)
dev->slot = NULL;
up_read(&pci_bus_sem);
@@ -96,7 +106,18 @@ static struct attribute *pci_slot_default_attrs[] = {
&pci_slot_attr_cur_speed.attr,
NULL,
};
-ATTRIBUTE_GROUPS(pci_slot_default);
+
+static const struct attribute_group pci_slot_default_group = {
+ .attrs = pci_slot_default_attrs,
+};
+
+static const struct attribute_group *pci_slot_default_groups[] = {
+ &pci_slot_default_group,
+#ifdef ARCH_PCI_SLOT_GROUPS
+ ARCH_PCI_SLOT_GROUPS,
+#endif
+ NULL,
+};
static const struct kobj_type pci_slot_ktype = {
.sysfs_ops = &pci_slot_sysfs_ops,
@@ -166,7 +187,8 @@ void pci_dev_assign_slot(struct pci_dev *dev)
mutex_lock(&pci_slot_mutex);
list_for_each_entry(slot, &dev->bus->slots, list)
- if (PCI_SLOT(dev->devfn) == slot->number)
+ if (slot->number == PCI_SLOT_ALL_DEVICES ||
+ PCI_SLOT(dev->devfn) == slot->number)
dev->slot = slot;
mutex_unlock(&pci_slot_mutex);
}
@@ -188,7 +210,8 @@ static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
/**
* pci_create_slot - create or increment refcount for physical PCI slot
* @parent: struct pci_bus of parent bridge
- * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder
+ * @slot_nr: PCI_SLOT(pci_dev->devfn), -1 for placeholder, or
+ * PCI_SLOT_ALL_DEVICES
* @name: user visible string presented in /sys/bus/pci/slots/<name>
* @hotplug: set if caller is hotplug driver, NULL otherwise
*
@@ -222,6 +245,16 @@ static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
* consist solely of a dddd:bb tuple, where dddd is the PCI domain of the
* %struct pci_bus and bb is the bus number. In other words, the devfn of
* the 'placeholder' slot will not be displayed.
+ *
+ * Bus-wide slots:
+ * For PCIe hotplug, the physical slot encompasses the entire secondary
+ * bus, not just a single device number. If the device supports ARI and ARI
+ * Forwarding is enabled in the upstream bridge, a multi-function device
+ * may include functions that appear to have several different device
+ * numbers, i.e., PCI_SLOT() values. Pass @slot_nr == PCI_SLOT_ALL_DEVICES
+ * to create a slot that matches all devices on the bus. Unlike placeholder
+ * slots, bus-wide slots go through normal slot lookup and reuse existing
+ * slots if present.
*/
struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
const char *name,
@@ -285,7 +318,8 @@ placeholder:
down_read(&pci_bus_sem);
list_for_each_entry(dev, &parent->devices, bus_list)
- if (PCI_SLOT(dev->devfn) == slot_nr)
+ if (slot_nr == PCI_SLOT_ALL_DEVICES ||
+ PCI_SLOT(dev->devfn) == slot_nr)
dev->slot = slot;
up_read(&pci_bus_sem);
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
index ca4f97be7538..91145e8d9d95 100644
--- a/drivers/pci/tph.c
+++ b/drivers/pci/tph.c
@@ -236,21 +236,27 @@ static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
* with a specific CPU
* @pdev: PCI device
* @mem_type: target memory type (volatile or persistent RAM)
- * @cpu_uid: associated CPU id
+ * @cpu: associated CPU id
* @tag: Steering Tag to be returned
*
* Return the Steering Tag for a target memory that is associated with a
- * specific CPU as indicated by cpu_uid.
+ * specific CPU as indicated by cpu.
*
* Return: 0 if success, otherwise negative value (-errno)
*/
int pcie_tph_get_cpu_st(struct pci_dev *pdev, enum tph_mem_type mem_type,
- unsigned int cpu_uid, u16 *tag)
+ unsigned int cpu, u16 *tag)
{
#ifdef CONFIG_ACPI
struct pci_dev *rp;
acpi_handle rp_acpi_handle;
union st_info info;
+ u32 cpu_uid;
+ int ret;
+
+ ret = acpi_get_cpu_uid(cpu, &cpu_uid);
+ if (ret != 0)
+ return ret;
rp = pcie_find_root_port(pdev);
if (!rp || !rp->bus || !rp->bus->bridge)
@@ -265,9 +271,9 @@ int pcie_tph_get_cpu_st(struct pci_dev *pdev, enum tph_mem_type mem_type,
*tag = tph_extract_tag(mem_type, pdev->tph_req_type, &info);
- pci_dbg(pdev, "get steering tag: mem_type=%s, cpu_uid=%d, tag=%#04x\n",
+ pci_dbg(pdev, "get steering tag: mem_type=%s, cpu=%d, tag=%#04x\n",
(mem_type == TPH_MEM_TYPE_VM) ? "volatile" : "persistent",
- cpu_uid, *tag);
+ cpu, *tag);
return 0;
#else
@@ -407,10 +413,13 @@ int pcie_enable_tph(struct pci_dev *pdev, int mode)
else
pdev->tph_req_type = PCI_TPH_REQ_TPH_ONLY;
- rp_req_type = get_rp_completer_type(pdev);
+ /* Check if the device is behind a Root Port */
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_RC_END) {
+ rp_req_type = get_rp_completer_type(pdev);
- /* Final req_type is the smallest value of two */
- pdev->tph_req_type = min(pdev->tph_req_type, rp_req_type);
+ /* Final req_type is the smallest value of two */
+ pdev->tph_req_type = min(pdev->tph_req_type, rp_req_type);
+ }
if (pdev->tph_req_type == PCI_TPH_REQ_DISABLE)
return -EINVAL;
diff --git a/drivers/pci/trace.c b/drivers/pci/trace.c
index cf11abca8602..c1da9d3d39d6 100644
--- a/drivers/pci/trace.c
+++ b/drivers/pci/trace.c
@@ -9,3 +9,4 @@
#define CREATE_TRACE_POINTS
#include <trace/events/pci.h>
+#include <trace/events/pci_controller.h>
diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c
index d9383bf541e7..c360eee11dd9 100644
--- a/drivers/pci/vgaarb.c
+++ b/drivers/pci/vgaarb.c
@@ -215,6 +215,7 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
struct vga_device *conflict;
unsigned int pci_bits;
u32 flags = 0;
+ int err;
/*
* Account for "normal" resources to lock. If we decode the legacy,
@@ -307,7 +308,9 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
if (change_bridge)
flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
- pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
+ err = pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
+ if (err)
+ return ERR_PTR(err);
conflict->owns &= ~match;
/* If we disabled normal decoding, reflect it in owns */
@@ -337,7 +340,9 @@ enable_them:
if (wants & VGA_RSRC_LEGACY_MASK)
flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
- pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
+ err = pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
+ if (err)
+ return ERR_PTR(err);
vgadev->owns |= wants;
lock_them:
@@ -455,6 +460,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
}
conflict = __vga_tryget(vgadev, rsrc);
spin_unlock_irqrestore(&vga_lock, flags);
+ if (IS_ERR(conflict)) {
+ rc = PTR_ERR(conflict);
+ break;
+ }
if (conflict == NULL)
break;
@@ -1134,6 +1143,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
char kbuf[64], *curr_pos;
size_t remaining = count;
+ int err;
int ret_val;
int i;
@@ -1165,7 +1175,11 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
goto done;
}
- vga_get_uninterruptible(pdev, io_state);
+ err = vga_get_uninterruptible(pdev, io_state);
+ if (err) {
+ ret_val = err;
+ goto done;
+ }
/* Update the client's locks lists */
for (i = 0; i < MAX_USER_CARDS; i++) {