summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/bus.c5
-rw-r--r--drivers/pci/controller/Kconfig11
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c2
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h20
-rw-r--r--drivers/pci/controller/dwc/Kconfig1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c69
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h5
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c1
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig1
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c48
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h1
-rw-r--r--drivers/pci/controller/pci-aardvark.c59
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c45
-rw-r--r--drivers/pci/controller/pcie-altera.c3
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c78
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c46
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c68
-rw-r--r--drivers/pci/controller/pcie-mediatek.c48
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c70
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c2
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c49
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c46
-rw-r--r--drivers/pci/controller/pcie-xilinx.c56
-rw-r--r--drivers/pci/controller/plda/Kconfig1
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c45
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h1
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c2
-rw-r--r--drivers/pci/controller/vmd.c239
-rw-r--r--drivers/pci/endpoint/Kconfig8
-rw-r--r--drivers/pci/endpoint/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c130
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c144
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c1
-rw-r--r--drivers/pci/endpoint/pci-ep-msi.c100
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c40
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/iov.c153
-rw-r--r--drivers/pci/pci-acpi.c7
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/pci.c30
-rw-r--r--drivers/pci/pci.h84
-rw-r--r--drivers/pci/pcie/aer.c7
-rw-r--r--drivers/pci/pcie/aspm.c11
-rw-r--r--drivers/pci/pcie/portdrv.c2
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pci/pwrctrl/slot.c8
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/pci/setup-bus.c3
-rw-r--r--drivers/pci/setup-res.c35
51 files changed, 1185 insertions, 642 deletions
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 69048869ef1c..b77fd30bbfd9 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -341,7 +341,6 @@ void pci_bus_add_device(struct pci_dev *dev)
{
struct device_node *dn = dev->dev.of_node;
struct platform_device *pdev;
- int retval;
/*
* Can not put in pci_device_add yet because resources
@@ -372,9 +371,7 @@ void pci_bus_add_device(struct pci_dev *dev)
if (!dn || of_device_is_available(dn))
pci_dev_allow_binding(dev);
- retval = device_attach(&dev->dev);
- if (retval < 0 && retval != -EPROBE_DEFER)
- pci_warn(dev, "device attach failed (%d)\n", retval);
+ device_initial_probe(&dev->dev);
pci_dev_assign_added(dev);
}
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 886f6f43a895..41748d083b93 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -13,6 +13,7 @@ config PCI_AARDVARK
depends on OF
depends on PCI_MSI
select PCI_BRIDGE_EMUL
+ select IRQ_MSI_LIB
help
Add support for Aardvark 64bit PCIe Host Controller. This
controller is part of the South Bridge of the Marvel Armada
@@ -29,6 +30,7 @@ config PCIE_ALTERA_MSI
tristate "Altera PCIe MSI feature"
depends on PCIE_ALTERA
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want PCIe MSI support for the Altera FPGA.
This MSI driver supports Altera MSI to GIC controller IP.
@@ -62,6 +64,7 @@ config PCIE_BRCMSTB
BMIPS_GENERIC || COMPILE_TEST
depends on OF
depends on PCI_MSI
+ select IRQ_MSI_LIB
default ARCH_BRCMSTB || BMIPS_GENERIC
help
Say Y here to enable PCIe host controller support for
@@ -98,6 +101,7 @@ config PCIE_IPROC_MSI
bool "Broadcom iProc PCIe MSI support"
depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA
depends on PCI_MSI
+ select IRQ_MSI_LIB
default ARCH_BCM_IPROC
help
Say Y here if you want to enable MSI support for Broadcom's iProc
@@ -152,6 +156,7 @@ config PCI_IXP4XX
config VMD
depends on PCI_MSI && X86_64 && !UML
tristate "Intel Volume Management Device Driver"
+ select IRQ_MSI_LIB
help
Adds support for the Intel Volume Management Device (VMD). VMD is a
secondary PCI host bridge that allows PCI Express root ports,
@@ -191,6 +196,7 @@ config PCIE_MEDIATEK
depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on OF
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
@@ -199,6 +205,7 @@ config PCIE_MEDIATEK_GEN3
tristate "MediaTek Gen3 PCIe controller"
depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed,
@@ -237,6 +244,7 @@ config PCIE_RCAR_HOST
bool "Renesas R-Car PCIe controller (host mode)"
depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want PCIe controller support on R-Car SoCs in host
mode.
@@ -315,6 +323,7 @@ config PCIE_XILINX
bool "Xilinx AXI PCIe controller"
depends on OF
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
@@ -324,6 +333,7 @@ config PCIE_XILINX_DMA_PL
depends on ARCH_ZYNQMP || COMPILE_TEST
depends on PCI_MSI
select PCI_HOST_COMMON
+ select IRQ_MSI_LIB
help
Say 'Y' here if you want kernel support for the Xilinx PL DMA
PCIe host bridge. The controller is a Soft IP which can act as
@@ -334,6 +344,7 @@ config PCIE_XILINX_NWL
bool "Xilinx NWL PCIe controller"
depends on ARCH_ZYNQMP || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say 'Y' here if you want kernel support for Xilinx
NWL PCIe controller. The controller can act as Root Port
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 8ab6cf70c18e..77c5a19b2ab1 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -353,7 +353,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
}
spin_unlock_irqrestore(&ep->lock, flags);
- offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
+ offset = CDNS_PCIE_NORMAL_MSG_ROUTING(PCIE_MSG_TYPE_R_LOCAL) |
CDNS_PCIE_NORMAL_MSG_CODE(msg_code);
writel(0, ep->irq_cpu_addr + offset);
}
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index a149845d341a..1d81c4bf6c6d 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -250,26 +250,6 @@ struct cdns_pcie_rp_ib_bar {
struct cdns_pcie;
-enum cdns_pcie_msg_routing {
- /* Route to Root Complex */
- MSG_ROUTING_TO_RC,
-
- /* Use Address Routing */
- MSG_ROUTING_BY_ADDR,
-
- /* Use ID Routing */
- MSG_ROUTING_BY_ID,
-
- /* Route as Broadcast Message from Root Complex */
- MSG_ROUTING_BCAST,
-
- /* Local message; terminate at receiver (INTx messages) */
- MSG_ROUTING_LOCAL,
-
- /* Gather & route to Root Complex (PME_TO_Ack message) */
- MSG_ROUTING_GATHER,
-};
-
struct cdns_pcie_ops {
int (*start_link)(struct cdns_pcie *pcie);
void (*stop_link)(struct cdns_pcie *pcie);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index d9f0386396ed..5dfa5592bf07 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -19,6 +19,7 @@ config PCIE_DW_DEBUGFS
config PCIE_DW_HOST
bool
select PCIE_DW
+ select IRQ_MSI_LIB
config PCIE_DW_EP
bool
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 906277f9ffaf..101a64187f2a 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -10,6 +10,7 @@
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/of_address.h>
@@ -23,35 +24,21 @@
static struct pci_ops dw_pcie_ops;
static struct pci_ops dw_child_pcie_ops;
-static void dw_msi_ack_irq(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void dw_msi_mask_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void dw_msi_unmask_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip dw_pcie_msi_irq_chip = {
- .name = "PCI-MSI",
- .irq_ack = dw_msi_ack_irq,
- .irq_mask = dw_msi_mask_irq,
- .irq_unmask = dw_msi_unmask_irq,
-};
-
-static struct msi_domain_info dw_pcie_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
- MSI_FLAG_MULTI_PCI_MSI,
- .chip = &dw_pcie_msi_irq_chip,
+#define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+#define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
+ .required_flags = DW_PCIE_MSI_FLAGS_REQUIRED,
+ .supported_flags = DW_PCIE_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "DW-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
/* MSI int handler */
@@ -227,26 +214,19 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct fwnode_handle *fwnode = of_fwnode_handle(pci->dev->of_node);
-
- pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
- &dw_pcie_msi_domain_ops, pp);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pci->dev),
+ .ops = &dw_pcie_msi_domain_ops,
+ .size = pp->num_vectors,
+ .host_data = pp,
+ };
+
+ pp->irq_domain = msi_create_parent_irq_domain(&info, &dw_pcie_msi_parent_ops);
if (!pp->irq_domain) {
dev_err(pci->dev, "Failed to create IRQ domain\n");
return -ENOMEM;
}
- irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
-
- pp->msi_domain = pci_msi_create_irq_domain(fwnode,
- &dw_pcie_msi_domain_info,
- pp->irq_domain);
- if (!pp->msi_domain) {
- dev_err(pci->dev, "Failed to create MSI domain\n");
- irq_domain_remove(pp->irq_domain);
- return -ENOMEM;
- }
-
return 0;
}
@@ -260,7 +240,6 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
NULL, NULL);
}
- irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 4d794964fa0f..89aad5a08928 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -702,18 +702,26 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
int retries;
/* Check if the link is up or not */
- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ for (retries = 0; retries < PCIE_LINK_WAIT_MAX_RETRIES; retries++) {
if (dw_pcie_link_up(pci))
break;
- msleep(LINK_WAIT_SLEEP_MS);
+ msleep(PCIE_LINK_WAIT_SLEEP_MS);
}
- if (retries >= LINK_WAIT_MAX_RETRIES) {
+ if (retries >= PCIE_LINK_WAIT_MAX_RETRIES) {
dev_info(pci->dev, "Phy link never came up\n");
return -ETIMEDOUT;
}
+ /*
+ * As per PCIe r6.0, sec 6.6.1, a Downstream Port that supports Link
+ * speeds greater than 5.0 GT/s, software must wait a minimum of 100 ms
+ * after Link training completes before sending a Configuration Request.
+ */
+ if (pci->max_link_speed > 2)
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index ce9e18554e42..ea4284ee06d4 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -62,10 +62,6 @@
#define dw_pcie_cap_set(_pci, _cap) \
set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_SLEEP_MS 90
-
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
#define LINK_WAIT_IATU 9
@@ -417,7 +413,6 @@ struct dw_pcie_rp {
const struct dw_pcie_host_ops *ops;
int msi_irq[MAX_MSI_CTRLS];
struct irq_domain *irq_domain;
- struct irq_domain *msi_domain;
dma_addr_t msi_data;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 93171a392879..108d30637920 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -458,6 +458,7 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
if (reg & PCIE_RDLH_LINK_UP_CHGED) {
if (rockchip_pcie_link_up(pci)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
/* Rescan the bus to enumerate endpoint devices */
pci_lock_rescan_remove();
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index c789e3f85655..9b12f2f02042 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1564,6 +1564,7 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
/* Rescan the bus to enumerate endpoint devices */
pci_lock_rescan_remove();
diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig
index 58ce034f701a..c50c4625937f 100644
--- a/drivers/pci/controller/mobiveil/Kconfig
+++ b/drivers/pci/controller/mobiveil/Kconfig
@@ -9,6 +9,7 @@ config PCIE_MOBIVEIL
config PCIE_MOBIVEIL_HOST
bool
depends on PCI_MSI
+ select IRQ_MSI_LIB
select PCIE_MOBIVEIL
config PCIE_LAYERSCAPE_GEN4
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index a600f46ee3c3..dbc72c73fd0a 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
@@ -353,16 +354,19 @@ static const struct irq_domain_ops intx_domain_ops = {
.map = mobiveil_pcie_intx_map,
};
-static struct irq_chip mobiveil_msi_irq_chip = {
- .name = "Mobiveil PCIe MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define MOBIVEIL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define MOBIVEIL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
-static struct msi_domain_info mobiveil_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .chip = &mobiveil_msi_irq_chip,
+static const struct msi_parent_ops mobiveil_msi_parent_ops = {
+ .required_flags = MOBIVEIL_MSI_FLAGS_REQUIRED,
+ .supported_flags = MOBIVEIL_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "Mobiveil-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -435,23 +439,20 @@ static const struct irq_domain_ops msi_domain_ops = {
static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct mobiveil_msi *msi = &pcie->rp.msi;
mutex_init(&msi->lock);
- msi->dev_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, pcie);
- if (!msi->dev_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &mobiveil_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &msi_domain_ops,
+ .host_data = pcie,
+ .size = msi->num_of_vectors,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &mobiveil_msi_parent_ops);
+ if (!msi->dev_domain) {
dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->dev_domain);
return -ENOMEM;
}
@@ -464,9 +465,8 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
struct mobiveil_root_port *rp = &pcie->rp;
/* setup INTx */
- rp->intx_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), PCI_NUM_INTX,
- &intx_domain_ops, pcie);
-
+ rp->intx_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX, &intx_domain_ops,
+ pcie);
if (!rp->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
index 662f17f9bf65..7246de6a7176 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -135,7 +135,6 @@
struct mobiveil_msi { /* MSI information */
struct mutex lock; /* protect bitmap variable */
- struct irq_domain *msi_domain;
struct irq_domain *dev_domain;
phys_addr_t msi_pages_phys;
int num_of_vectors;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 7bac64533b14..e34bea1ff0ac 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -13,6 +13,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -278,7 +279,6 @@ struct advk_pcie {
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
raw_spinlock_t irq_lock;
- struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain;
raw_spinlock_t msi_irq_lock;
DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
@@ -1332,18 +1332,6 @@ static void advk_msi_irq_unmask(struct irq_data *d)
raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
}
-static void advk_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void advk_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
static struct irq_chip advk_msi_bottom_irq_chip = {
.name = "MSI",
.irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
@@ -1436,17 +1424,20 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static struct irq_chip advk_msi_irq_chip = {
- .name = "advk-MSI",
- .irq_mask = advk_msi_top_irq_mask,
- .irq_unmask = advk_msi_top_irq_unmask,
-};
-
-static struct msi_domain_info advk_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI |
- MSI_FLAG_PCI_MSIX,
- .chip = &advk_msi_irq_chip,
+#define ADVK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_NO_AFFINITY)
+#define ADVK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops advk_msi_parent_ops = {
+ .required_flags = ADVK_MSI_FLAGS_REQUIRED,
+ .supported_flags = ADVK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "advk-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
@@ -1456,26 +1447,22 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
raw_spin_lock_init(&pcie->msi_irq_lock);
mutex_init(&pcie->msi_used_lock);
- pcie->msi_inner_domain = irq_domain_create_linear(NULL, MSI_IRQ_NUM,
- &advk_msi_domain_ops, pcie);
- if (!pcie->msi_inner_domain)
- return -ENOMEM;
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &advk_msi_domain_ops,
+ .host_data = pcie,
+ .size = MSI_IRQ_NUM,
+ };
- pcie->msi_domain =
- pci_msi_create_irq_domain(dev_fwnode(dev),
- &advk_msi_domain_info,
- pcie->msi_inner_domain);
- if (!pcie->msi_domain) {
- irq_domain_remove(pcie->msi_inner_domain);
+ pcie->msi_inner_domain = msi_create_parent_irq_domain(&info, &advk_msi_parent_ops);
+ if (!pcie->msi_inner_domain)
return -ENOMEM;
- }
return 0;
}
static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
{
- irq_domain_remove(pcie->msi_domain);
irq_domain_remove(pcie->msi_inner_domain);
}
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index a43f21eb8fbb..ea2ca2e70f20 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -29,7 +30,6 @@ struct altera_msi {
DECLARE_BITMAP(used, MAX_MSI_VECTORS);
struct mutex lock; /* protect "used" bitmap */
struct platform_device *pdev;
- struct irq_domain *msi_domain;
struct irq_domain *inner_domain;
void __iomem *csr_base;
void __iomem *vector_base;
@@ -74,18 +74,20 @@ static void altera_msi_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static struct irq_chip altera_msi_irq_chip = {
- .name = "Altera PCIe MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define ALTERA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_info altera_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .chip = &altera_msi_irq_chip,
-};
+#define ALTERA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+static const struct msi_parent_ops altera_msi_parent_ops = {
+ .required_flags = ALTERA_MSI_FLAGS_REQUIRED,
+ .supported_flags = ALTERA_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "Altera-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct altera_msi *msi = irq_data_get_irq_chip_data(data);
@@ -164,20 +166,16 @@ static const struct irq_domain_ops msi_domain_ops = {
static int altera_allocate_domains(struct altera_msi *msi)
{
- struct fwnode_handle *fwnode = of_fwnode_handle(msi->pdev->dev.of_node);
-
- msi->inner_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, msi);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(&msi->pdev->dev),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->num_of_vectors,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &altera_msi_parent_ops);
if (!msi->inner_domain) {
- dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &altera_msi_domain_info, msi->inner_domain);
- if (!msi->msi_domain) {
dev_err(&msi->pdev->dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->inner_domain);
return -ENOMEM;
}
@@ -186,7 +184,6 @@ static int altera_allocate_domains(struct altera_msi *msi)
static void altera_free_domains(struct altera_msi *msi)
{
- irq_domain_remove(msi->msi_domain);
irq_domain_remove(msi->inner_domain);
}
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 0fc77176a52e..3dbb7adc421c 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -852,10 +852,9 @@ static void aglx_isr(struct irq_desc *desc)
static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
/* Setup INTx */
- pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), PCI_NUM_INTX,
+ pcie->irq_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX,
&intx_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 92887b394eb4..b664d8bcd198 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -12,6 +12,7 @@
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -46,6 +47,7 @@
#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc
+#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK 0x1f0
#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00
#define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8
@@ -55,6 +57,9 @@
#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+#define PCIE_RC_PL_REG_PHY_CTL_1 0x1804
+#define PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK 0x8
+
#define PCIE_RC_PL_PHY_CTL_15 0x184c
#define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000
#define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff
@@ -265,7 +270,6 @@ struct brcm_msi {
struct device *dev;
void __iomem *base;
struct device_node *np;
- struct irq_domain *msi_domain;
struct irq_domain *inner_domain;
struct mutex lock; /* guards the alloc/free operations */
u64 target_addr;
@@ -465,17 +469,20 @@ static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
}
-static struct irq_chip brcm_msi_irq_chip = {
- .name = "BRCM STB PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define BRCM_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_info brcm_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
- .chip = &brcm_msi_irq_chip,
+#define BRCM_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops brcm_msi_parent_ops = {
+ .required_flags = BRCM_MSI_FLAGS_REQUIRED,
+ .supported_flags = BRCM_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "BRCM-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void brcm_pcie_msi_isr(struct irq_desc *desc)
@@ -581,21 +588,18 @@ static const struct irq_domain_ops msi_domain_ops = {
static int brcm_allocate_domains(struct brcm_msi *msi)
{
- struct fwnode_handle *fwnode = of_fwnode_handle(msi->np);
struct device *dev = msi->dev;
- msi->inner_domain = irq_domain_create_linear(NULL, msi->nr, &msi_domain_ops, msi);
- if (!msi->inner_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(msi->np),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->nr,
+ };
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &brcm_msi_domain_info,
- msi->inner_domain);
- if (!msi->msi_domain) {
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &brcm_msi_parent_ops);
+ if (!msi->inner_domain) {
dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->inner_domain);
return -ENOMEM;
}
@@ -604,7 +608,6 @@ static int brcm_allocate_domains(struct brcm_msi *msi)
static void brcm_free_domains(struct brcm_msi *msi)
{
- irq_domain_remove(msi->msi_domain);
irq_domain_remove(msi->inner_domain);
}
@@ -1072,7 +1075,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
void __iomem *base = pcie->base;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
- u32 tmp, burst, aspm_support;
+ u32 tmp, burst, aspm_support, num_lanes, num_lanes_cap;
u8 num_out_wins = 0;
int num_inbound_wins = 0;
int memc, ret;
@@ -1180,6 +1183,27 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ /* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */
+ num_lanes_cap = u32_get_bits(tmp, PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
+ num_lanes = 0;
+
+ /*
+ * Use hardware negotiated Max Link Width value by default. If the
+ * "num-lanes" DT property is present, assume that the chip's default
+ * link width capability information is incorrect/undesired and use the
+ * specified value instead.
+ */
+ if (!of_property_read_u32(pcie->np, "num-lanes", &num_lanes) &&
+ num_lanes && num_lanes <= 4 && num_lanes_cap != num_lanes) {
+ u32p_replace_bits(&tmp, num_lanes,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ tmp = readl(base + PCIE_RC_PL_REG_PHY_CTL_1);
+ u32p_replace_bits(&tmp, 1,
+ PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK);
+ writel(tmp, base + PCIE_RC_PL_REG_PHY_CTL_1);
+ }
+
/*
* For config space accesses on the RC, show the right class for
* a PCIe-PCIe bridge (the default setting is to be EP mode).
@@ -1333,11 +1357,7 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
if (ret)
return ret;
- /*
- * Wait for 100ms after PERST# deassertion; see PCIe CEM specification
- * sections 2.2, PCIe r5.0, 6.6.1.
- */
- msleep(100);
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
/*
* Give the RC/EP even more time to wake up, before trying to
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index d2cb4c4f821a..9ba242ab9596 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -5,6 +5,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
@@ -81,7 +82,6 @@ struct iproc_msi_grp {
* @bitmap_lock: lock to protect access to the MSI bitmap
* @nr_msi_vecs: total number of MSI vectors
* @inner_domain: inner IRQ domain
- * @msi_domain: MSI IRQ domain
* @nr_eq_region: required number of 4K aligned memory region for MSI event
* queues
* @nr_msi_region: required number of 4K aligned address region for MSI posted
@@ -101,7 +101,6 @@ struct iproc_msi {
struct mutex bitmap_lock;
unsigned int nr_msi_vecs;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
unsigned int nr_eq_region;
unsigned int nr_msi_region;
void *eq_cpu;
@@ -165,16 +164,18 @@ static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
return eq * EQ_LEN * sizeof(u32);
}
-static struct irq_chip iproc_msi_irq_chip = {
- .name = "iProc-MSI",
+#define IPROC_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+#define IPROC_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+
+static struct msi_parent_ops iproc_msi_parent_ops = {
+ .required_flags = IPROC_MSI_FLAGS_REQUIRED,
+ .supported_flags = IPROC_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "iProc-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
-
-static struct msi_domain_info iproc_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
- .chip = &iproc_msi_irq_chip,
-};
-
/*
* In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
* dedicated event queue. Each MSI group can support up to 64 MSI vectors.
@@ -446,27 +447,22 @@ static void iproc_msi_disable(struct iproc_msi *msi)
static int iproc_msi_alloc_domains(struct device_node *node,
struct iproc_msi *msi)
{
- msi->inner_domain = irq_domain_create_linear(NULL, msi->nr_msi_vecs,
- &msi_domain_ops, msi);
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->nr_msi_vecs,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &iproc_msi_parent_ops);
if (!msi->inner_domain)
return -ENOMEM;
- msi->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node),
- &iproc_msi_domain_info,
- msi->inner_domain);
- if (!msi->msi_domain) {
- irq_domain_remove(msi->inner_domain);
- return -ENOMEM;
- }
-
return 0;
}
static void iproc_msi_free_domains(struct iproc_msi *msi)
{
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
-
if (msi->inner_domain)
irq_domain_remove(msi->inner_domain);
}
@@ -542,7 +538,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
msi->nr_cpus = num_possible_cpus();
if (msi->nr_cpus == 1)
- iproc_msi_domain_info.flags |= MSI_FLAG_MULTI_PCI_MSI;
+ iproc_msi_parent_ops.supported_flags |= MSI_FLAG_MULTI_PCI_MSI;
msi->nr_irqs = of_irq_count(node);
if (!msi->nr_irqs) {
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index b55f5973414c..97147f43e41c 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
@@ -187,7 +188,6 @@ struct mtk_msi_set {
* @saved_irq_state: IRQ enable state saved at suspend time
* @irq_lock: lock protecting IRQ register access
* @intx_domain: legacy INTx IRQ domain
- * @msi_domain: MSI IRQ domain
* @msi_bottom_domain: MSI IRQ bottom domain
* @msi_sets: MSI sets information
* @lock: lock protecting IRQ bit map
@@ -210,7 +210,6 @@ struct mtk_gen3_pcie {
u32 saved_irq_state;
raw_spinlock_t irq_lock;
struct irq_domain *intx_domain;
- struct irq_domain *msi_domain;
struct irq_domain *msi_bottom_domain;
struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
struct mutex lock;
@@ -526,30 +525,22 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
return 0;
}
-static void mtk_pcie_msi_irq_mask(struct irq_data *data)
-{
- pci_msi_mask_irq(data);
- irq_chip_mask_parent(data);
-}
-
-static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
-{
- pci_msi_unmask_irq(data);
- irq_chip_unmask_parent(data);
-}
-
-static struct irq_chip mtk_msi_irq_chip = {
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = mtk_pcie_msi_irq_mask,
- .irq_unmask = mtk_pcie_msi_irq_unmask,
- .name = "MSI",
-};
-
-static struct msi_domain_info mtk_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
- MSI_FLAG_MULTI_PCI_MSI,
- .chip = &mtk_msi_irq_chip,
+#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops mtk_msi_parent_ops = {
+ .required_flags = MTK_MSI_FLAGS_REQUIRED,
+ .supported_flags = MTK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "MTK3-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -756,29 +747,23 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
/* Setup MSI */
mutex_init(&pcie->lock);
- pcie->msi_bottom_domain = irq_domain_create_linear(of_fwnode_handle(node),
- PCIE_MSI_IRQS_NUM,
- &mtk_msi_bottom_domain_ops, pcie);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &mtk_msi_bottom_domain_ops,
+ .host_data = pcie,
+ .size = PCIE_MSI_IRQS_NUM,
+ };
+
+ pcie->msi_bottom_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops);
if (!pcie->msi_bottom_domain) {
dev_err(dev, "failed to create MSI bottom domain\n");
ret = -ENODEV;
goto err_msi_bottom_domain;
}
- pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
- &mtk_msi_domain_info,
- pcie->msi_bottom_domain);
- if (!pcie->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- ret = -ENODEV;
- goto err_msi_domain;
- }
-
of_node_put(intc_node);
return 0;
-err_msi_domain:
- irq_domain_remove(pcie->msi_bottom_domain);
err_msi_bottom_domain:
irq_domain_remove(pcie->intx_domain);
out_put_node:
@@ -793,9 +778,6 @@ static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
if (pcie->intx_domain)
irq_domain_remove(pcie->intx_domain);
- if (pcie->msi_domain)
- irq_domain_remove(pcie->msi_domain);
-
if (pcie->msi_bottom_domain)
irq_domain_remove(pcie->msi_bottom_domain);
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index e1934aa06c8d..24cc30a2ab6c 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -12,6 +12,7 @@
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -180,7 +181,6 @@ struct mtk_pcie_soc {
* @irq: GIC irq
* @irq_domain: legacy INTx IRQ domain
* @inner_domain: inner IRQ domain
- * @msi_domain: MSI IRQ domain
* @lock: protect the msi_irq_in_use bitmap
* @msi_irq_in_use: bit map for assigned MSI IRQ
*/
@@ -200,7 +200,6 @@ struct mtk_pcie_port {
int irq;
struct irq_domain *irq_domain;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
};
@@ -470,40 +469,39 @@ static const struct irq_domain_ops msi_domain_ops = {
.free = mtk_pcie_irq_domain_free,
};
-static struct irq_chip mtk_msi_irq_chip = {
- .name = "MTK PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
-static struct msi_domain_info mtk_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .chip = &mtk_msi_irq_chip,
+static const struct msi_parent_ops mtk_msi_parent_ops = {
+ .required_flags = MTK_MSI_FLAGS_REQUIRED,
+ .supported_flags = MTK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "MTK-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- struct fwnode_handle *fwnode = of_fwnode_handle(port->pcie->dev->of_node);
-
mutex_init(&port->lock);
- port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
- &msi_domain_ops, port);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(port->pcie->dev),
+ .ops = &msi_domain_ops,
+ .host_data = port,
+ .size = MTK_MSI_IRQS_NUM,
+ };
+
+ port->inner_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops);
if (!port->inner_domain) {
dev_err(port->pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
- port->inner_domain);
- if (!port->msi_domain) {
- dev_err(port->pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(port->inner_domain);
- return -ENOMEM;
- }
-
return 0;
}
@@ -532,8 +530,6 @@ static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
irq_domain_remove(port->irq_domain);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- if (port->msi_domain)
- irq_domain_remove(port->msi_domain);
if (port->inner_domain)
irq_domain_remove(port->inner_domain);
}
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index c32b803a47c7..fe288fd770c4 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -597,30 +598,6 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static void rcar_msi_top_irq_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void rcar_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void rcar_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip rcar_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_ack = rcar_msi_top_irq_ack,
- .irq_mask = rcar_msi_top_irq_mask,
- .irq_unmask = rcar_msi_top_irq_unmask,
-};
-
static void rcar_msi_irq_ack(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
@@ -718,30 +695,36 @@ static const struct irq_domain_ops rcar_msi_domain_ops = {
.free = rcar_msi_domain_free,
};
-static struct msi_domain_info rcar_msi_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
- .chip = &rcar_msi_top_chip,
+#define RCAR_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define RCAR_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops rcar_msi_parent_ops = {
+ .required_flags = RCAR_MSI_FLAGS_REQUIRED,
+ .supported_flags = RCAR_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "RCAR-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int rcar_allocate_domains(struct rcar_msi *msi)
{
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
- &rcar_msi_domain_ops, msi);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pcie->dev),
+ .ops = &rcar_msi_domain_ops,
+ .host_data = msi,
+ .size = INT_PCI_MSI_NR,
+ };
+
+ msi->domain = msi_create_parent_irq_domain(&info, &rcar_msi_parent_ops);
if (!msi->domain) {
- dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -750,10 +733,7 @@ static int rcar_allocate_domains(struct rcar_msi *msi)
static void rcar_free_domains(struct rcar_msi *msi)
{
- struct irq_domain *parent = msi->domain->parent;
-
irq_domain_remove(msi->domain);
- irq_domain_remove(parent);
}
static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index b9e7a8710cf0..c11ed45c25f6 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -325,7 +325,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(rockchip->perst_gpio, 1);
- msleep(PCIE_T_RRS_READY_MS);
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
/* 500ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
index dc9690a535e1..b037c8f315e4 100644
--- a/drivers/pci/controller/pcie-xilinx-dma-pl.c
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -90,7 +91,6 @@ struct xilinx_pl_dma_variant {
};
struct xilinx_msi {
- struct irq_domain *msi_domain;
unsigned long *bitmap;
struct irq_domain *dev_domain;
struct mutex lock; /* Protect bitmap variable */
@@ -373,20 +373,20 @@ static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irq_chip xilinx_msi_irq_chip = {
- .name = "pl_dma:PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_info xilinx_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
- .chip = &xilinx_msi_irq_chip,
-};
+#define XILINX_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+static const struct msi_parent_ops xilinx_msi_parent_ops = {
+ .required_flags = XILINX_MSI_FLAGS_REQUIRED,
+ .supported_flags = XILINX_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "pl_dma-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -458,11 +458,6 @@ static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port)
irq_domain_remove(msi->dev_domain);
msi->dev_domain = NULL;
}
-
- if (msi->msi_domain) {
- irq_domain_remove(msi->msi_domain);
- msi->msi_domain = NULL;
- }
}
static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
@@ -470,19 +465,17 @@ static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
struct device *dev = port->dev;
struct xilinx_msi *msi = &port->msi;
int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
- struct fwnode_handle *fwnode = of_fwnode_handle(port->dev->of_node);
-
- msi->dev_domain = irq_domain_create_linear(NULL, XILINX_NUM_MSI_IRQS,
- &dev_msi_domain_ops, port);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(port->dev),
+ .ops = &dev_msi_domain_ops,
+ .host_data = port,
+ .size = XILINX_NUM_MSI_IRQS,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops);
if (!msi->dev_domain)
goto out;
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &xilinx_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain)
- goto out;
-
mutex_init(&msi->lock);
msi->bitmap = kzalloc(size, GFP_KERNEL);
if (!msi->bitmap)
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index c8b05477b719..05b8c205493c 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -145,7 +146,6 @@
#define LINK_WAIT_USLEEP_MAX 100000
struct nwl_msi { /* MSI information */
- struct irq_domain *msi_domain;
DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR);
struct irq_domain *dev_domain;
struct mutex lock; /* protect bitmap variable */
@@ -418,19 +418,22 @@ static const struct irq_domain_ops intx_domain_ops = {
};
#ifdef CONFIG_PCI_MSI
-static struct irq_chip nwl_msi_irq_chip = {
- .name = "nwl_pcie:msi",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-static struct msi_domain_info nwl_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
- .chip = &nwl_msi_irq_chip,
+#define NWL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define NWL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops nwl_msi_parent_ops = {
+ .required_flags = NWL_MSI_FLAGS_REQUIRED,
+ .supported_flags = NWL_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "nwl-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
+
#endif
static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -495,22 +498,19 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
struct device *dev = pcie->dev;
- struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
-
- msi->dev_domain = irq_domain_create_linear(NULL, INT_PCI_MSI_NR, &dev_msi_domain_ops, pcie);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &dev_msi_domain_ops,
+ .host_data = pcie,
+ .size = INT_PCI_MSI_NR,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &nwl_msi_parent_ops);
if (!msi->dev_domain) {
dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
}
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &nwl_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
- dev_err(dev, "failed to create msi IRQ domain\n");
- irq_domain_remove(msi->dev_domain);
- return -ENOMEM;
- }
#endif
return 0;
}
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index e36aa874bae9..f121836c3cf4 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -203,11 +204,6 @@ static void xilinx_msi_top_irq_ack(struct irq_data *d)
*/
}
-static struct irq_chip xilinx_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_ack = xilinx_msi_top_irq_ack,
-};
-
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -264,29 +260,42 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = {
.free = xilinx_msi_domain_free,
};
-static struct msi_domain_info xilinx_msi_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY,
- .chip = &xilinx_msi_top_chip,
+static bool xilinx_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ chip->irq_ack = xilinx_msi_top_irq_ack;
+ return true;
+}
+
+#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+static const struct msi_parent_ops xilinx_msi_parent_ops = {
+ .required_flags = XILINX_MSI_FLAGS_REQUIRED,
+ .supported_flags = MSI_GENERIC_FLAGS_MASK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "xilinx-",
+ .init_dev_msi_info = xilinx_init_dev_msi_info,
};
static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie)
{
- struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, XILINX_NUM_MSI_IRQS,
- &xilinx_msi_domain_ops, pcie);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- pcie->msi_domain = pci_msi_create_irq_domain(fwnode, &xilinx_msi_info, parent);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pcie->dev),
+ .ops = &xilinx_msi_domain_ops,
+ .host_data = pcie,
+ .size = XILINX_NUM_MSI_IRQS,
+ };
+
+ pcie->msi_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops);
if (!pcie->msi_domain) {
dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
return -ENOMEM;
}
@@ -295,10 +304,7 @@ static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie)
static void xilinx_free_msi_domains(struct xilinx_pcie *pcie)
{
- struct irq_domain *parent = pcie->msi_domain->parent;
-
irq_domain_remove(pcie->msi_domain);
- irq_domain_remove(parent);
}
/* INTx Functions */
diff --git a/drivers/pci/controller/plda/Kconfig b/drivers/pci/controller/plda/Kconfig
index c0e14146d7e4..62120101139c 100644
--- a/drivers/pci/controller/plda/Kconfig
+++ b/drivers/pci/controller/plda/Kconfig
@@ -5,6 +5,7 @@ menu "PLDA-based PCIe controllers"
config PCIE_PLDA_HOST
bool
+ select IRQ_MSI_LIB
config PCIE_MICROCHIP_HOST
tristate "Microchip AXI PCIe controller"
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
index 3abedf723215..8e2db2e5b64b 100644
--- a/drivers/pci/controller/plda/pcie-plda-host.c
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -11,6 +11,7 @@
#include <linux/align.h>
#include <linux/bitfield.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/pci_regs.h>
@@ -134,42 +135,41 @@ static const struct irq_domain_ops msi_domain_ops = {
.free = plda_irq_msi_domain_free,
};
-static struct irq_chip plda_msi_irq_chip = {
- .name = "PLDA PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info plda_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .chip = &plda_msi_irq_chip,
+#define PLDA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+#define PLDA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+
+static const struct msi_parent_ops plda_msi_parent_ops = {
+ .required_flags = PLDA_MSI_FLAGS_REQUIRED,
+ .supported_flags = PLDA_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "PLDA-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
{
struct device *dev = port->dev;
- struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct plda_msi *msi = &port->msi;
mutex_init(&port->msi.lock);
- msi->dev_domain = irq_domain_create_linear(NULL, msi->num_vectors, &msi_domain_ops, port);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &msi_domain_ops,
+ .host_data = port,
+ .size = msi->num_vectors,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &plda_msi_parent_ops);
if (!msi->dev_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &plda_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->dev_domain);
- return -ENOMEM;
- }
-
return 0;
}
@@ -563,7 +563,6 @@ static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
- irq_domain_remove(pcie->msi.msi_domain);
irq_domain_remove(pcie->msi.dev_domain);
irq_domain_remove(pcie->intx_domain);
diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
index 61ece26065ea..6b8665df7bf0 100644
--- a/drivers/pci/controller/plda/pcie-plda.h
+++ b/drivers/pci/controller/plda/pcie-plda.h
@@ -164,7 +164,6 @@ struct plda_pcie_host_ops {
struct plda_msi {
struct mutex lock; /* Protect used bitmap */
- struct irq_domain *msi_domain;
struct irq_domain *dev_domain;
u32 num_vectors;
u64 vector_phy;
diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c
index e73c1b7bc8ef..3caf53c6c082 100644
--- a/drivers/pci/controller/plda/pcie-starfive.c
+++ b/drivers/pci/controller/plda/pcie-starfive.c
@@ -368,7 +368,7 @@ static int starfive_pcie_host_init(struct plda_pcie_rp *plda)
* of 100ms following exit from a conventional reset before
* sending a configuration request to the device.
*/
- msleep(PCIE_RESET_CONFIG_DEVICE_WAIT_MS);
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
if (starfive_pcie_host_wait_for_link(pcie))
dev_info(dev, "port link down\n");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 8df064b62a2f..50f0c91d561c 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
@@ -174,58 +175,52 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq);
}
-/*
- * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
- */
static void vmd_irq_enable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
- unsigned long flags;
- raw_spin_lock_irqsave(&list_lock, flags);
- WARN_ON(vmdirq->enabled);
- list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
- vmdirq->enabled = true;
- raw_spin_unlock_irqrestore(&list_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &list_lock) {
+ WARN_ON(vmdirq->enabled);
+ list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+ vmdirq->enabled = true;
+ }
+}
+static void vmd_pci_msi_enable(struct irq_data *data)
+{
+ vmd_irq_enable(data->parent_data);
data->chip->irq_unmask(data);
}
static void vmd_irq_disable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
- unsigned long flags;
- data->chip->irq_mask(data);
-
- raw_spin_lock_irqsave(&list_lock, flags);
- if (vmdirq->enabled) {
- list_del_rcu(&vmdirq->node);
- vmdirq->enabled = false;
+ scoped_guard(raw_spinlock_irqsave, &list_lock) {
+ if (vmdirq->enabled) {
+ list_del_rcu(&vmdirq->node);
+ vmdirq->enabled = false;
+ }
}
- raw_spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void vmd_pci_msi_disable(struct irq_data *data)
+{
+ data->chip->irq_mask(data);
+ vmd_irq_disable(data->parent_data);
}
static struct irq_chip vmd_msi_controller = {
.name = "VMD-MSI",
- .irq_enable = vmd_irq_enable,
- .irq_disable = vmd_irq_disable,
.irq_compose_msi_msg = vmd_compose_msi_msg,
};
-static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
- msi_alloc_info_t *arg)
-{
- return 0;
-}
-
/*
* XXX: We can be even smarter selecting the best IRQ once we solve the
* affinity problem.
*/
static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
{
- unsigned long flags;
int i, best;
if (vmd->msix_count == 1 + vmd->first_vec)
@@ -242,113 +237,129 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
return &vmd->irqs[vmd->first_vec];
}
- raw_spin_lock_irqsave(&list_lock, flags);
- best = vmd->first_vec + 1;
- for (i = best; i < vmd->msix_count; i++)
- if (vmd->irqs[i].count < vmd->irqs[best].count)
- best = i;
- vmd->irqs[best].count++;
- raw_spin_unlock_irqrestore(&list_lock, flags);
+ scoped_guard(raw_spinlock_irq, &list_lock) {
+ best = vmd->first_vec + 1;
+ for (i = best; i < vmd->msix_count; i++)
+ if (vmd->irqs[i].count < vmd->irqs[best].count)
+ best = i;
+ vmd->irqs[best].count++;
+ }
return &vmd->irqs[best];
}
-static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
- unsigned int virq, irq_hw_number_t hwirq,
- msi_alloc_info_t *arg)
+static void vmd_msi_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs);
+
+static int vmd_msi_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
{
- struct msi_desc *desc = arg->desc;
- struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
- struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ struct msi_desc *desc = ((msi_alloc_info_t *)arg)->desc;
+ struct vmd_dev *vmd = domain->host_data;
+ struct vmd_irq *vmdirq;
- if (!vmdirq)
- return -ENOMEM;
+ for (int i = 0; i < nr_irqs; ++i) {
+ vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ if (!vmdirq) {
+ vmd_msi_free(domain, virq, i);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&vmdirq->node);
+ vmdirq->irq = vmd_next_irq(vmd, desc);
+ vmdirq->virq = virq + i;
- INIT_LIST_HEAD(&vmdirq->node);
- vmdirq->irq = vmd_next_irq(vmd, desc);
- vmdirq->virq = virq;
+ irq_domain_set_info(domain, virq + i, vmdirq->irq->virq,
+ &vmd_msi_controller, vmdirq,
+ handle_untracked_irq, vmd, NULL);
+ }
- irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
- handle_untracked_irq, vmd, NULL);
return 0;
}
-static void vmd_msi_free(struct irq_domain *domain,
- struct msi_domain_info *info, unsigned int virq)
+static void vmd_msi_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
- struct vmd_irq *vmdirq = irq_get_chip_data(virq);
- unsigned long flags;
+ struct vmd_irq *vmdirq;
- synchronize_srcu(&vmdirq->irq->srcu);
+ for (int i = 0; i < nr_irqs; ++i) {
+ vmdirq = irq_get_chip_data(virq + i);
- /* XXX: Potential optimization to rebalance */
- raw_spin_lock_irqsave(&list_lock, flags);
- vmdirq->irq->count--;
- raw_spin_unlock_irqrestore(&list_lock, flags);
+ synchronize_srcu(&vmdirq->irq->srcu);
- kfree(vmdirq);
+ /* XXX: Potential optimization to rebalance */
+ scoped_guard(raw_spinlock_irq, &list_lock)
+ vmdirq->irq->count--;
+
+ kfree(vmdirq);
+ }
}
-static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *arg)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+static const struct irq_domain_ops vmd_msi_domain_ops = {
+ .alloc = vmd_msi_alloc,
+ .free = vmd_msi_free,
+};
- if (nvec > vmd->msix_count)
- return vmd->msix_count;
+static bool vmd_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info)
+{
+ if (WARN_ON_ONCE(info->bus_token != DOMAIN_BUS_PCI_DEVICE_MSIX))
+ return false;
- memset(arg, 0, sizeof(*arg));
- return 0;
-}
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
-static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
-{
- arg->desc = desc;
+ info->chip->irq_enable = vmd_pci_msi_enable;
+ info->chip->irq_disable = vmd_pci_msi_disable;
+ return true;
}
-static struct msi_domain_ops vmd_msi_domain_ops = {
- .get_hwirq = vmd_get_hwirq,
- .msi_init = vmd_msi_init,
- .msi_free = vmd_msi_free,
- .msi_prepare = vmd_msi_prepare,
- .set_desc = vmd_set_desc,
-};
+#define VMD_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | MSI_FLAG_PCI_MSIX)
+#define VMD_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_info vmd_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .ops = &vmd_msi_domain_ops,
- .chip = &vmd_msi_controller,
+static const struct msi_parent_ops vmd_msi_parent_ops = {
+ .supported_flags = VMD_MSI_FLAGS_SUPPORTED,
+ .required_flags = VMD_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_VMD_MSI,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .prefix = "VMD-",
+ .init_dev_msi_info = vmd_init_dev_msi_info,
};
-static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
-{
- u16 reg;
-
- pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
- reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
- (reg | VMCONFIG_MSI_REMAP);
- pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
-}
-
static int vmd_create_irq_domain(struct vmd_dev *vmd)
{
- struct fwnode_handle *fn;
+ struct irq_domain_info info = {
+ .size = vmd->msix_count,
+ .ops = &vmd_msi_domain_ops,
+ .host_data = vmd,
+ };
- fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
- if (!fn)
+ info.fwnode = irq_domain_alloc_named_id_fwnode("VMD-MSI",
+ vmd->sysdata.domain);
+ if (!info.fwnode)
return -ENODEV;
- vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
+ vmd->irq_domain = msi_create_parent_irq_domain(&info,
+ &vmd_msi_parent_ops);
if (!vmd->irq_domain) {
- irq_domain_free_fwnode(fn);
+ irq_domain_free_fwnode(info.fwnode);
return -ENODEV;
}
return 0;
}
+static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
+{
+ u16 reg;
+
+ pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
+ reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
+ (reg | VMCONFIG_MSI_REMAP);
+ pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
+}
+
static void vmd_remove_irq_domain(struct vmd_dev *vmd)
{
/*
@@ -387,29 +398,24 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
{
struct vmd_dev *vmd = vmd_from_bus(bus);
void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
- unsigned long flags;
- int ret = 0;
if (!addr)
return -EFAULT;
- raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
+ guard(raw_spinlock_irqsave)(&vmd->cfg_lock);
switch (len) {
case 1:
*value = readb(addr);
- break;
+ return 0;
case 2:
*value = readw(addr);
- break;
+ return 0;
case 4:
*value = readl(addr);
- break;
+ return 0;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
- return ret;
}
/*
@@ -422,32 +428,27 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
{
struct vmd_dev *vmd = vmd_from_bus(bus);
void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
- unsigned long flags;
- int ret = 0;
if (!addr)
return -EFAULT;
- raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
+ guard(raw_spinlock_irqsave)(&vmd->cfg_lock);
switch (len) {
case 1:
writeb(value, addr);
readb(addr);
- break;
+ return 0;
case 2:
writew(value, addr);
readw(addr);
- break;
+ return 0;
case 4:
writel(value, addr);
readl(addr);
- break;
+ return 0;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
- return ret;
}
static struct pci_ops vmd_ops = {
@@ -889,12 +890,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
ret = vmd_create_irq_domain(vmd);
if (ret)
return ret;
-
- /*
- * Override the IRQ domain bus token so the domain can be
- * distinguished from a regular PCI/MSI domain.
- */
- irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
} else {
vmd_set_msi_remapping(vmd, false);
}
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
index 1c5d82eb57d4..8dad291be8b8 100644
--- a/drivers/pci/endpoint/Kconfig
+++ b/drivers/pci/endpoint/Kconfig
@@ -28,6 +28,14 @@ config PCI_ENDPOINT_CONFIGFS
configure the endpoint function and used to bind the
function with an endpoint controller.
+config PCI_ENDPOINT_MSI_DOORBELL
+ bool "PCI Endpoint MSI Doorbell Support"
+ depends on PCI_ENDPOINT && GENERIC_MSI_IRQ
+ help
+ This enables the EP's MSI interrupt controller to function as a
+ doorbell. The RC can trigger doorbell in EP by writing data to a
+ dedicated BAR, which the EP maps to the controller's message address.
+
source "drivers/pci/endpoint/functions/Kconfig"
endmenu
diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile
index 95b2fe47e3b0..b4869d52053a 100644
--- a/drivers/pci/endpoint/Makefile
+++ b/drivers/pci/endpoint/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o
obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\
pci-epc-mem.o functions/
+obj-$(CONFIG_PCI_ENDPOINT_MSI_DOORBELL) += pci-ep-msi.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 50eb4106369f..e091193bd8a8 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -11,12 +11,14 @@
#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/msi.h>
#include <linux/slab.h>
#include <linux/pci_ids.h>
#include <linux/random.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+#include <linux/pci-ep-msi.h>
#include <linux/pci_regs.h>
#define IRQ_TYPE_INTX 0
@@ -29,6 +31,8 @@
#define COMMAND_READ BIT(3)
#define COMMAND_WRITE BIT(4)
#define COMMAND_COPY BIT(5)
+#define COMMAND_ENABLE_DOORBELL BIT(6)
+#define COMMAND_DISABLE_DOORBELL BIT(7)
#define STATUS_READ_SUCCESS BIT(0)
#define STATUS_READ_FAIL BIT(1)
@@ -39,6 +43,11 @@
#define STATUS_IRQ_RAISED BIT(6)
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
+#define STATUS_DOORBELL_SUCCESS BIT(9)
+#define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10)
+#define STATUS_DOORBELL_ENABLE_FAIL BIT(11)
+#define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
+#define STATUS_DOORBELL_DISABLE_FAIL BIT(13)
#define FLAG_USE_DMA BIT(0)
@@ -66,6 +75,7 @@ struct pci_epf_test {
bool dma_supported;
bool dma_private;
const struct pci_epc_features *epc_features;
+ struct pci_epf_bar db_bar;
};
struct pci_epf_test_reg {
@@ -80,6 +90,9 @@ struct pci_epf_test_reg {
__le32 irq_number;
__le32 flags;
__le32 caps;
+ __le32 doorbell_bar;
+ __le32 doorbell_offset;
+ __le32 doorbell_data;
} __packed;
static struct pci_epf_header test_header = {
@@ -667,6 +680,115 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
}
}
+static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data)
+{
+ struct pci_epf_test *epf_test = data;
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ u32 status = le32_to_cpu(reg->status);
+
+ status |= STATUS_DOORBELL_SUCCESS;
+ reg->status = cpu_to_le32(status);
+ pci_epf_test_raise_irq(epf_test, reg);
+
+ return IRQ_HANDLED;
+}
+
+static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test)
+{
+ struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar];
+ struct pci_epf *epf = epf_test->epf;
+
+ free_irq(epf->db_msg[0].virq, epf_test);
+ reg->doorbell_bar = cpu_to_le32(NO_BAR);
+
+ pci_epf_free_doorbell(epf);
+}
+
+static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
+ struct pci_epf_test_reg *reg)
+{
+ u32 status = le32_to_cpu(reg->status);
+ struct pci_epf *epf = epf_test->epf;
+ struct pci_epc *epc = epf->epc;
+ struct msi_msg *msg;
+ enum pci_barno bar;
+ size_t offset;
+ int ret;
+
+ ret = pci_epf_alloc_doorbell(epf, 1);
+ if (ret)
+ goto set_status_err;
+
+ msg = &epf->db_msg[0].msg;
+ bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1);
+ if (bar < BAR_0)
+ goto err_doorbell_cleanup;
+
+ ret = request_irq(epf->db_msg[0].virq, pci_epf_test_doorbell_handler, 0,
+ "pci-ep-test-doorbell", epf_test);
+ if (ret) {
+ dev_err(&epf->dev,
+ "Failed to request doorbell IRQ: %d\n",
+ epf->db_msg[0].virq);
+ goto err_doorbell_cleanup;
+ }
+
+ reg->doorbell_data = cpu_to_le32(msg->data);
+ reg->doorbell_bar = cpu_to_le32(bar);
+
+ msg = &epf->db_msg[0].msg;
+ ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo,
+ &epf_test->db_bar.phys_addr, &offset);
+
+ if (ret)
+ goto err_doorbell_cleanup;
+
+ reg->doorbell_offset = cpu_to_le32(offset);
+
+ epf_test->db_bar.barno = bar;
+ epf_test->db_bar.size = epf->bar[bar].size;
+ epf_test->db_bar.flags = epf->bar[bar].flags;
+
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
+ if (ret)
+ goto err_doorbell_cleanup;
+
+ status |= STATUS_DOORBELL_ENABLE_SUCCESS;
+ reg->status = cpu_to_le32(status);
+ return;
+
+err_doorbell_cleanup:
+ pci_epf_test_doorbell_cleanup(epf_test);
+set_status_err:
+ status |= STATUS_DOORBELL_ENABLE_FAIL;
+ reg->status = cpu_to_le32(status);
+}
+
+static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
+ struct pci_epf_test_reg *reg)
+{
+ enum pci_barno bar = le32_to_cpu(reg->doorbell_bar);
+ u32 status = le32_to_cpu(reg->status);
+ struct pci_epf *epf = epf_test->epf;
+ struct pci_epc *epc = epf->epc;
+
+ if (bar < BAR_0)
+ goto set_status_err;
+
+ pci_epf_test_doorbell_cleanup(epf_test);
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
+
+ status |= STATUS_DOORBELL_DISABLE_SUCCESS;
+ reg->status = cpu_to_le32(status);
+
+ return;
+
+set_status_err:
+ status |= STATUS_DOORBELL_DISABLE_FAIL;
+ reg->status = cpu_to_le32(status);
+}
+
static void pci_epf_test_cmd_handler(struct work_struct *work)
{
u32 command;
@@ -714,6 +836,14 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
pci_epf_test_copy(epf_test, reg);
pci_epf_test_raise_irq(epf_test, reg);
break;
+ case COMMAND_ENABLE_DOORBELL:
+ pci_epf_test_enable_doorbell(epf_test, reg);
+ pci_epf_test_raise_irq(epf_test, reg);
+ break;
+ case COMMAND_DISABLE_DOORBELL:
+ pci_epf_test_disable_doorbell(epf_test, reg);
+ pci_epf_test_raise_irq(epf_test, reg);
+ break;
default:
dev_err(dev, "Invalid command 0x%x\n", command);
break;
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index e4da3fdb0007..83e9ab10f9c4 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -70,9 +70,11 @@ static struct workqueue_struct *kpcintb_workqueue;
enum epf_ntb_bar {
BAR_CONFIG,
BAR_DB,
- BAR_MW0,
BAR_MW1,
BAR_MW2,
+ BAR_MW3,
+ BAR_MW4,
+ VNTB_BAR_NUM,
};
/*
@@ -132,7 +134,7 @@ struct epf_ntb {
bool linkup;
u32 spad_size;
- enum pci_barno epf_ntb_bar[6];
+ enum pci_barno epf_ntb_bar[VNTB_BAR_NUM];
struct epf_ntb_ctrl *reg;
@@ -510,7 +512,7 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
struct device *dev = &ntb->epf->dev;
int ret;
struct pci_epf_bar *epf_bar;
- void __iomem *mw_addr;
+ void *mw_addr;
enum pci_barno barno;
size_t size = sizeof(u32) * ntb->db_count;
@@ -576,7 +578,7 @@ static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
for (i = 0; i < ntb->num_mws; i++) {
size = ntb->mws_size[i];
- barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+ barno = ntb->epf_ntb_bar[BAR_MW1 + i];
ntb->epf->bar[barno].barno = barno;
ntb->epf->bar[barno].size = size;
@@ -629,7 +631,7 @@ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
int i;
for (i = 0; i < num_mws; i++) {
- barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+ barno = ntb->epf_ntb_bar[BAR_MW1 + i];
pci_epc_clear_bar(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no,
@@ -654,6 +656,63 @@ static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
pci_epc_put(ntb->epf->epc);
}
+
+/**
+ * epf_ntb_is_bar_used() - Check if a bar is used in the ntb configuration
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @barno: Checked bar number
+ *
+ * Returns: true if used, false if free.
+ */
+static bool epf_ntb_is_bar_used(struct epf_ntb *ntb,
+ enum pci_barno barno)
+{
+ int i;
+
+ for (i = 0; i < VNTB_BAR_NUM; i++) {
+ if (ntb->epf_ntb_bar[i] == barno)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * epf_ntb_find_bar() - Assign BAR number when no configuration is provided
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @epc_features: The features provided by the EPC specific to this EPF
+ * @bar: NTB BAR index
+ * @barno: Bar start index
+ *
+ * When the BAR configuration was not provided through the userspace
+ * configuration, automatically assign BAR as it has been historically
+ * done by this endpoint function.
+ *
+ * Returns: the BAR number found, if any. -1 otherwise
+ */
+static int epf_ntb_find_bar(struct epf_ntb *ntb,
+ const struct pci_epc_features *epc_features,
+ enum epf_ntb_bar bar,
+ enum pci_barno barno)
+{
+ while (ntb->epf_ntb_bar[bar] < 0) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0)
+ break; /* No more BAR available */
+
+ /*
+ * Verify if the BAR found is not already assigned
+ * through the provided configuration
+ */
+ if (!epf_ntb_is_bar_used(ntb, barno))
+ ntb->epf_ntb_bar[bar] = barno;
+
+ barno += 1;
+ }
+
+ return barno;
+}
+
/**
* epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
* constructs (scratchpad region, doorbell, memorywindow)
@@ -676,23 +735,21 @@ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
/* These are required BARs which are mandatory for NTB functionality */
- for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) {
- barno = pci_epc_get_next_free_bar(epc_features, barno);
+ for (bar = BAR_CONFIG; bar <= BAR_MW1; bar++) {
+ barno = epf_ntb_find_bar(ntb, epc_features, bar, barno);
if (barno < 0) {
dev_err(dev, "Fail to get NTB function BAR\n");
- return barno;
+ return -ENOENT;
}
- ntb->epf_ntb_bar[bar] = barno;
}
/* These are optional BARs which don't impact NTB functionality */
- for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) {
- barno = pci_epc_get_next_free_bar(epc_features, barno);
+ for (bar = BAR_MW1, i = 1; i < num_mws; bar++, i++) {
+ barno = epf_ntb_find_bar(ntb, epc_features, bar, barno);
if (barno < 0) {
ntb->num_mws = i;
dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
}
- ntb->epf_ntb_bar[bar] = barno;
}
return 0;
@@ -860,6 +917,37 @@ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
return len; \
}
+#define EPF_NTB_BAR_R(_name, _id) \
+ static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+ { \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sprintf(page, "%d\n", ntb->epf_ntb_bar[_id]); \
+ }
+
+#define EPF_NTB_BAR_W(_name, _id) \
+ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+ { \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ int val; \
+ int ret; \
+ \
+ ret = kstrtoint(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ if (val < NO_BAR || val > BAR_5) \
+ return -EINVAL; \
+ \
+ ntb->epf_ntb_bar[_id] = val; \
+ \
+ return len; \
+ }
+
static ssize_t epf_ntb_num_mws_store(struct config_item *item,
const char *page, size_t len)
{
@@ -899,6 +987,18 @@ EPF_NTB_MW_R(mw3)
EPF_NTB_MW_W(mw3)
EPF_NTB_MW_R(mw4)
EPF_NTB_MW_W(mw4)
+EPF_NTB_BAR_R(ctrl_bar, BAR_CONFIG)
+EPF_NTB_BAR_W(ctrl_bar, BAR_CONFIG)
+EPF_NTB_BAR_R(db_bar, BAR_DB)
+EPF_NTB_BAR_W(db_bar, BAR_DB)
+EPF_NTB_BAR_R(mw1_bar, BAR_MW1)
+EPF_NTB_BAR_W(mw1_bar, BAR_MW1)
+EPF_NTB_BAR_R(mw2_bar, BAR_MW2)
+EPF_NTB_BAR_W(mw2_bar, BAR_MW2)
+EPF_NTB_BAR_R(mw3_bar, BAR_MW3)
+EPF_NTB_BAR_W(mw3_bar, BAR_MW3)
+EPF_NTB_BAR_R(mw4_bar, BAR_MW4)
+EPF_NTB_BAR_W(mw4_bar, BAR_MW4)
CONFIGFS_ATTR(epf_ntb_, spad_count);
CONFIGFS_ATTR(epf_ntb_, db_count);
@@ -910,6 +1010,12 @@ CONFIGFS_ATTR(epf_ntb_, mw4);
CONFIGFS_ATTR(epf_ntb_, vbus_number);
CONFIGFS_ATTR(epf_ntb_, vntb_pid);
CONFIGFS_ATTR(epf_ntb_, vntb_vid);
+CONFIGFS_ATTR(epf_ntb_, ctrl_bar);
+CONFIGFS_ATTR(epf_ntb_, db_bar);
+CONFIGFS_ATTR(epf_ntb_, mw1_bar);
+CONFIGFS_ATTR(epf_ntb_, mw2_bar);
+CONFIGFS_ATTR(epf_ntb_, mw3_bar);
+CONFIGFS_ATTR(epf_ntb_, mw4_bar);
static struct configfs_attribute *epf_ntb_attrs[] = {
&epf_ntb_attr_spad_count,
@@ -922,6 +1028,12 @@ static struct configfs_attribute *epf_ntb_attrs[] = {
&epf_ntb_attr_vbus_number,
&epf_ntb_attr_vntb_pid,
&epf_ntb_attr_vntb_vid,
+ &epf_ntb_attr_ctrl_bar,
+ &epf_ntb_attr_db_bar,
+ &epf_ntb_attr_mw1_bar,
+ &epf_ntb_attr_mw2_bar,
+ &epf_ntb_attr_mw3_bar,
+ &epf_ntb_attr_mw4_bar,
NULL,
};
@@ -1048,7 +1160,7 @@ static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
struct device *dev;
dev = &ntb->ntb.dev;
- barno = ntb->epf_ntb_bar[BAR_MW0 + idx];
+ barno = ntb->epf_ntb_bar[BAR_MW1 + idx];
epf_bar = &ntb->epf->bar[barno];
epf_bar->phys_addr = addr;
epf_bar->barno = barno;
@@ -1379,6 +1491,7 @@ static int epf_ntb_probe(struct pci_epf *epf,
{
struct epf_ntb *ntb;
struct device *dev;
+ int i;
dev = &epf->dev;
@@ -1389,6 +1502,11 @@ static int epf_ntb_probe(struct pci_epf *epf,
epf->header = &epf_ntb_header;
ntb->epf = epf;
ntb->vbus_number = 0xff;
+
+ /* Initially, no bar is assigned */
+ for (i = 0; i < VNTB_BAR_NUM; i++)
+ ntb->epf_ntb_bar[i] = NO_BAR;
+
epf_set_drvdata(epf, ntb);
dev_info(dev, "pci-ep epf driver loaded\n");
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index d712c7a866d2..ef50c82e647f 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -691,6 +691,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group)
if (IS_ERR_OR_NULL(group))
return;
+ list_del(&group->group_entry);
configfs_unregister_default_group(group);
}
EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c
new file mode 100644
index 000000000000..9ca89cbfec15
--- /dev/null
+++ b/drivers/pci/endpoint/pci-ep-msi.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Endpoint *Controller* (EPC) MSI library
+ *
+ * Copyright (C) 2025 NXP
+ * Author: Frank Li <Frank.Li@nxp.com>
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+#include <linux/pci-ep-msi.h>
+#include <linux/slab.h>
+
+static void pci_epf_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ epc = pci_epc_get(dev_name(msi_desc_to_dev(desc)));
+ if (!epc)
+ return;
+
+ epf = list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list);
+
+ if (epf && epf->db_msg && desc->msi_index < epf->num_db)
+ memcpy(&epf->db_msg[desc->msi_index].msg, msg, sizeof(*msg));
+
+ pci_epc_put(epc);
+}
+
+int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db)
+{
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ struct irq_domain *domain;
+ void *msg;
+ int ret;
+ int i;
+
+ /* TODO: Multi-EPF support */
+ if (list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list) != epf) {
+ dev_err(dev, "MSI doorbell doesn't support multiple EPF\n");
+ return -EINVAL;
+ }
+
+ domain = of_msi_map_get_device_domain(epc->dev.parent, 0,
+ DOMAIN_BUS_PLATFORM_MSI);
+ if (!domain) {
+ dev_err(dev, "Can't find MSI domain for EPC\n");
+ return -ENODEV;
+ }
+
+ if (!irq_domain_is_msi_parent(domain))
+ return -ENODEV;
+
+ if (!irq_domain_is_msi_immutable(domain)) {
+ dev_err(dev, "Mutable MSI controller not supported\n");
+ return -ENODEV;
+ }
+
+ dev_set_msi_domain(epc->dev.parent, domain);
+
+ msg = kcalloc(num_db, sizeof(struct pci_epf_doorbell_msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ epf->num_db = num_db;
+ epf->db_msg = msg;
+
+ ret = platform_device_msi_init_and_alloc_irqs(epc->dev.parent, num_db,
+ pci_epf_write_msi_msg);
+ if (ret) {
+ dev_err(dev, "Failed to allocate MSI\n");
+ kfree(msg);
+ return ret;
+ }
+
+ for (i = 0; i < num_db; i++)
+ epf->db_msg[i].virq = msi_get_virq(epc->dev.parent, i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epf_alloc_doorbell);
+
+void pci_epf_free_doorbell(struct pci_epf *epf)
+{
+ platform_device_msi_free_irqs_all(epf->epc->dev.parent);
+
+ kfree(epf->db_msg);
+ epf->db_msg = NULL;
+ epf->num_db = 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_free_doorbell);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 577a9e490115..d54e18872aef 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -338,7 +338,7 @@ static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
mutex_lock(&pci_epf_mutex);
list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
pci_ep_cfs_remove_epf_group(group);
- list_del(&driver->epf_group);
+ WARN_ON(!list_empty(&driver->epf_group));
mutex_unlock(&pci_epf_mutex);
}
@@ -477,6 +477,44 @@ struct pci_epf *pci_epf_create(const char *name)
}
EXPORT_SYMBOL_GPL(pci_epf_create);
+/**
+ * pci_epf_align_inbound_addr() - Align the given address based on the BAR
+ * alignment requirement
+ * @epf: the EPF device
+ * @addr: inbound address to be aligned
+ * @bar: the BAR number corresponding to the given addr
+ * @base: base address matching the @bar alignment requirement
+ * @off: offset to be added to the @base address
+ *
+ * Helper function to align input @addr based on BAR's alignment requirement.
+ * The aligned base address and offset are returned via @base and @off.
+ *
+ * NOTE: The pci_epf_alloc_space() function already accounts for alignment.
+ * This API is primarily intended for use with other memory regions not
+ * allocated by pci_epf_alloc_space(), such as peripheral register spaces or
+ * the message address of a platform MSI controller.
+ *
+ * Return: 0 on success, errno otherwise.
+ */
+int pci_epf_align_inbound_addr(struct pci_epf *epf, enum pci_barno bar,
+ u64 addr, dma_addr_t *base, size_t *off)
+{
+ /*
+ * Most EP controllers require the BAR start address to be aligned to
+ * the BAR size, because they mask off the lower bits.
+ *
+ * Alignment to BAR size also works for controllers that support
+ * unaligned addresses.
+ */
+ u64 align = epf->bar[bar].size;
+
+ *base = round_down(addr, align);
+ *off = addr & (align - 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_align_inbound_addr);
+
static void pci_epf_dev_release(struct device *dev)
{
struct pci_epf *epf = to_pci_epf(dev);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index ebd342bda235..d783da1dbd24 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -995,7 +995,7 @@ static inline int pcie_hotplug_depth(struct pci_dev *dev)
while (bus->parent) {
bus = bus->parent;
- if (bus->self && bus->self->is_hotplug_bridge)
+ if (bus->self && bus->self->is_pciehp)
depth++;
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 10693b5d7eb6..ac4375954c94 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -7,11 +7,16 @@
* Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/log2.h>
#include <linux/pci.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
+#include <asm/div64.h>
#include "pci.h"
#define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */
@@ -150,7 +155,28 @@ resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
if (!dev->is_physfn)
return 0;
- return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
+ return dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)];
+}
+
+void pci_iov_resource_set_size(struct pci_dev *dev, int resno,
+ resource_size_t size)
+{
+ if (!pci_resource_is_iov(resno)) {
+ pci_warn(dev, "%s is not an IOV resource\n",
+ pci_resource_name(dev, resno));
+ return;
+ }
+
+ dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)] = size;
+}
+
+bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev)
+{
+ u16 cmd;
+
+ pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_CTRL, &cmd);
+
+ return cmd & PCI_SRIOV_CTRL_MSE;
}
static void pci_read_vf_config_common(struct pci_dev *virtfn)
@@ -341,12 +367,14 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
virtfn->multifunction = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &dev->resource[i + PCI_IOV_RESOURCES];
+ int idx = pci_resource_num_from_vf_bar(i);
+
+ res = &dev->resource[idx];
if (!res->parent)
continue;
virtfn->resource[i].name = pci_name(virtfn);
virtfn->resource[i].flags = res->flags;
- size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
+ size = pci_iov_resource_size(dev, idx);
resource_set_range(&virtfn->resource[i],
res->start + size * id, size);
rc = request_resource(res, &virtfn->resource[i]);
@@ -643,8 +671,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- bars |= (1 << (i + PCI_IOV_RESOURCES));
- res = &dev->resource[i + PCI_IOV_RESOURCES];
+ int idx = pci_resource_num_from_vf_bar(i);
+ resource_size_t vf_bar_sz = pci_iov_resource_size(dev, idx);
+
+ bars |= (1 << idx);
+ res = &dev->resource[idx];
+ if (vf_bar_sz * nr_virtfn > resource_size(res))
+ continue;
if (res->parent)
nres++;
}
@@ -810,8 +843,10 @@ found:
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &dev->resource[i + PCI_IOV_RESOURCES];
- res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES);
+ int idx = pci_resource_num_from_vf_bar(i);
+
+ res = &dev->resource[idx];
+ res_name = pci_resource_name(dev, idx);
/*
* If it is already FIXED, don't change it, something
@@ -850,6 +885,7 @@ found:
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
+ iov->vf_rebar_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VF_REBAR);
if (pdev)
iov->dev = pci_dev_get(pdev);
@@ -869,7 +905,7 @@ fail_max_buses:
dev->is_physfn = 0;
failed:
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &dev->resource[i + PCI_IOV_RESOURCES];
+ res = &dev->resource[pci_resource_num_from_vf_bar(i)];
res->flags = 0;
}
@@ -888,6 +924,30 @@ static void sriov_release(struct pci_dev *dev)
dev->sriov = NULL;
}
+static void sriov_restore_vf_rebar_state(struct pci_dev *dev)
+{
+ unsigned int pos, nbars, i;
+ u32 ctrl;
+
+ pos = pci_iov_vf_rebar_cap(dev);
+ if (!pos)
+ return;
+
+ pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl);
+ nbars = FIELD_GET(PCI_VF_REBAR_CTRL_NBAR_MASK, ctrl);
+
+ for (i = 0; i < nbars; i++, pos += 8) {
+ int bar_idx, size;
+
+ pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl);
+ bar_idx = FIELD_GET(PCI_VF_REBAR_CTRL_BAR_IDX, ctrl);
+ size = pci_rebar_bytes_to_size(dev->sriov->barsz[bar_idx]);
+ ctrl &= ~PCI_VF_REBAR_CTRL_BAR_SIZE;
+ ctrl |= FIELD_PREP(PCI_VF_REBAR_CTRL_BAR_SIZE, size);
+ pci_write_config_dword(dev, pos + PCI_VF_REBAR_CTRL, ctrl);
+ }
+}
+
static void sriov_restore_state(struct pci_dev *dev)
{
int i;
@@ -907,7 +967,7 @@ static void sriov_restore_state(struct pci_dev *dev)
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
- pci_update_resource(dev, i + PCI_IOV_RESOURCES);
+ pci_update_resource(dev, pci_resource_num_from_vf_bar(i));
pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
pci_iov_set_numvfs(dev, iov->num_VFs);
@@ -973,7 +1033,7 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
struct resource *res = pci_resource_n(dev, resno);
- int vf_bar = resno - PCI_IOV_RESOURCES;
+ int vf_bar = pci_resource_num_to_vf_bar(resno);
struct pci_bus_region region;
u16 cmd;
u32 new;
@@ -1047,8 +1107,10 @@ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
*/
void pci_restore_iov_state(struct pci_dev *dev)
{
- if (dev->is_physfn)
+ if (dev->is_physfn) {
+ sriov_restore_vf_rebar_state(dev);
sriov_restore_state(dev);
+ }
}
/**
@@ -1255,3 +1317,72 @@ int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
return nr_virtfn;
}
EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);
+
+/**
+ * pci_iov_vf_bar_set_size - set a new size for a VF BAR
+ * @dev: the PCI device
+ * @resno: the resource number
+ * @size: new size as defined in the spec (0=1MB, 31=128TB)
+ *
+ * Set the new size of a VF BAR that supports VF resizable BAR capability.
+ * Unlike pci_resize_resource(), this does not cause the resource that
+ * reserves the MMIO space (originally up to total_VFs) to be resized, which
+ * means that following calls to pci_enable_sriov() can fail if the resources
+ * no longer fit.
+ *
+ * Return: 0 on success, or negative on failure.
+ */
+int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
+{
+ u32 sizes;
+ int ret;
+
+ if (!pci_resource_is_iov(resno))
+ return -EINVAL;
+
+ if (pci_iov_is_memory_decoding_enabled(dev))
+ return -EBUSY;
+
+ sizes = pci_rebar_get_possible_sizes(dev, resno);
+ if (!sizes)
+ return -ENOTSUPP;
+
+ if (!(sizes & BIT(size)))
+ return -EINVAL;
+
+ ret = pci_rebar_set_size(dev, resno, size);
+ if (ret)
+ return ret;
+
+ pci_iov_resource_set_size(dev, resno, pci_rebar_size_to_bytes(size));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_iov_vf_bar_set_size);
+
+/**
+ * pci_iov_vf_bar_get_sizes - get VF BAR sizes allowing to create up to num_vfs
+ * @dev: the PCI device
+ * @resno: the resource number
+ * @num_vfs: number of VFs
+ *
+ * Get the sizes of a VF resizable BAR that can accommodate @num_vfs within
+ * the currently assigned size of the resource @resno.
+ *
+ * Return: A bitmask of sizes in format defined in the spec (bit 0=1MB,
+ * bit 31=128TB).
+ */
+u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
+{
+ u64 vf_len = pci_resource_len(dev, resno);
+ u32 sizes;
+
+ if (!num_vfs)
+ return 0;
+
+ do_div(vf_len, num_vfs);
+ sizes = (roundup_pow_of_two(vf_len + 1) - 1) >> ilog2(SZ_1M);
+
+ return sizes & pci_rebar_get_possible_sizes(dev, resno);
+}
+EXPORT_SYMBOL_GPL(pci_iov_vf_bar_get_sizes);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index b78e0e417324..ed7ed66a595b 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -816,15 +816,10 @@ int pci_acpi_program_hp_params(struct pci_dev *dev)
bool pciehp_is_native(struct pci_dev *bridge)
{
const struct pci_host_bridge *host;
- u32 slot_cap;
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
- pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
- if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
- return false;
-
if (pcie_ports_native)
return true;
@@ -1002,7 +997,7 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
struct acpi_device *adev, *rpadev;
const union acpi_object *obj;
- if (acpi_pci_disabled || !dev->is_hotplug_bridge)
+ if (acpi_pci_disabled || !dev->is_pciehp)
return false;
adev = ACPI_COMPANION(&dev->dev);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 67db34fd10ee..01e6aea1b0c7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1628,7 +1628,7 @@ static int pci_bus_num_vf(struct device *dev)
*/
static int pci_dma_configure(struct device *dev)
{
- struct pci_driver *driver = to_pci_driver(dev->driver);
+ const struct device_driver *drv = READ_ONCE(dev->driver);
struct device *bridge;
int ret = 0;
@@ -1645,8 +1645,8 @@ static int pci_dma_configure(struct device *dev)
pci_put_host_bridge_device(bridge);
- /* @driver may not be valid when we're called from the IOMMU layer */
- if (!ret && dev->driver && !driver->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && drv && !to_pci_driver(drv)->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e9448d55113b..a6004513a50d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3030,8 +3030,12 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
* pci_bridge_d3_possible - Is it possible to put the bridge into D3
* @bridge: Bridge to check
*
- * This function checks if it is possible to move the bridge to D3.
* Currently we only allow D3 for some PCIe ports and for Thunderbolt.
+ *
+ * Return: Whether it is possible to move the bridge to D3.
+ *
+ * The return value is guaranteed to be constant across the entire lifetime
+ * of the bridge, including its hot-removal.
*/
bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
@@ -3046,10 +3050,14 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
- * Hotplug ports handled by firmware in System Management Mode
- * may not be put into D3 by the OS (Thunderbolt on non-Macs).
+ * Hotplug ports handled by platform firmware may not be put
+ * into D3 by the OS, e.g. ACPI slots ...
*/
- if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
+ if (bridge->is_hotplug_bridge && !bridge->is_pciehp)
+ return false;
+
+ /* ... or PCIe hotplug ports not handled natively by the OS. */
+ if (bridge->is_pciehp && !pciehp_is_native(bridge))
return false;
if (pci_bridge_d3_force)
@@ -3068,7 +3076,7 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
* by vendors for runtime D3 at least until 2018 because there
* was no OS support.
*/
- if (bridge->is_hotplug_bridge)
+ if (bridge->is_pciehp)
return false;
if (dmi_check_system(bridge_d3_blacklist))
@@ -3205,7 +3213,6 @@ void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
void pci_pm_init(struct pci_dev *dev)
{
int pm;
- u16 status;
u16 pmc;
device_enable_async_suspend(&dev->dev);
@@ -3266,9 +3273,6 @@ void pci_pm_init(struct pci_dev *dev)
pci_pme_active(dev, false);
}
- pci_read_config_word(dev, PCI_STATUS, &status);
- if (status & PCI_STATUS_IMM_READY)
- dev->imm_ready = 1;
pci_pm_power_up_and_verify_state(dev);
pm_runtime_forbid(&dev->dev);
pm_runtime_set_active(&dev->dev);
@@ -3752,7 +3756,13 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
unsigned int pos, nbars, i;
u32 ctrl;
- pos = pdev->rebar_cap;
+ if (pci_resource_is_iov(bar)) {
+ pos = pci_iov_vf_rebar_cap(pdev);
+ bar = pci_resource_num_to_vf_bar(bar);
+ } else {
+ pos = pdev->rebar_cap;
+ }
+
if (!pos)
return -ENOTSUPP;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 12215ee72afb..34f65d69662e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -36,13 +36,6 @@ struct pcie_tlp_log;
#define PCIE_T_PERST_CLK_US 100
/*
- * End of conventional reset (PERST# de-asserted) to first configuration
- * request (device able to respond with a "Request Retry Status" completion),
- * from PCIe r6.0, sec 6.6.1.
- */
-#define PCIE_T_RRS_READY_MS 100
-
-/*
* PCIe r6.0, sec 5.3.3.2.1 <PME Synchronization>
* Recommends 1ms to 10ms timeout to check L2 ready.
*/
@@ -61,7 +54,11 @@ struct pcie_tlp_log;
* completes before sending a Configuration Request to the device
* immediately below that Port."
*/
-#define PCIE_RESET_CONFIG_DEVICE_WAIT_MS 100
+#define PCIE_RESET_CONFIG_WAIT_MS 100
+
+/* Parameters for the waiting for link up routine */
+#define PCIE_LINK_WAIT_MAX_RETRIES 10
+#define PCIE_LINK_WAIT_SLEEP_MS 90
/* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */
#define PCIE_MSG_TYPE_R_RC 0
@@ -391,12 +388,14 @@ void pci_bus_put(struct pci_bus *bus);
#define PCIE_LNKCAP_SLS2SPEED(lnkcap) \
({ \
- ((lnkcap) == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \
- (lnkcap) == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \
- (lnkcap) == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \
- (lnkcap) == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \
- (lnkcap) == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \
- (lnkcap) == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \
+ u32 lnkcap_sls = (lnkcap) & PCI_EXP_LNKCAP_SLS; \
+ \
+ (lnkcap_sls == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \
PCI_SPEED_UNKNOWN); \
})
@@ -411,13 +410,17 @@ void pci_bus_put(struct pci_bus *bus);
PCI_SPEED_UNKNOWN)
#define PCIE_LNKCTL2_TLS2SPEED(lnkctl2) \
- ((lnkctl2) == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \
- (lnkctl2) == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \
- (lnkctl2) == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \
- (lnkctl2) == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \
- (lnkctl2) == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \
- (lnkctl2) == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \
- PCI_SPEED_UNKNOWN)
+({ \
+ u16 lnkctl2_tls = (lnkctl2) & PCI_EXP_LNKCTL2_TLS; \
+ \
+ (lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \
+ PCI_SPEED_UNKNOWN); \
+})
/* PCIe speed to Mb/s reduced by encoding overhead */
#define PCIE_SPEED2MBS_ENC(speed) \
@@ -486,6 +489,7 @@ struct pci_sriov {
u16 subsystem_vendor; /* VF subsystem vendor */
u16 subsystem_device; /* VF subsystem device */
resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
+ u16 vf_rebar_cap; /* VF Resizable BAR capability offset */
bool drivers_autoprobe; /* Auto probing of VFs by driver */
};
@@ -710,10 +714,28 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
+void pci_iov_resource_set_size(struct pci_dev *dev, int resno,
+ resource_size_t size);
+bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev);
+static inline u16 pci_iov_vf_rebar_cap(struct pci_dev *dev)
+{
+ if (!dev->is_physfn)
+ return 0;
+
+ return dev->sriov->vf_rebar_cap;
+}
static inline bool pci_resource_is_iov(int resno)
{
return resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END;
}
+static inline int pci_resource_num_from_vf_bar(int resno)
+{
+ return resno + PCI_IOV_RESOURCES;
+}
+static inline int pci_resource_num_to_vf_bar(int resno)
+{
+ return resno - PCI_IOV_RESOURCES;
+}
extern const struct attribute_group sriov_pf_dev_attr_group;
extern const struct attribute_group sriov_vf_dev_attr_group;
#else
@@ -734,10 +756,30 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
{
return 0;
}
+static inline void pci_iov_resource_set_size(struct pci_dev *dev, int resno,
+ resource_size_t size) { }
+static inline bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev)
+{
+ return false;
+}
+static inline u16 pci_iov_vf_rebar_cap(struct pci_dev *dev)
+{
+ return 0;
+}
static inline bool pci_resource_is_iov(int resno)
{
return false;
}
+static inline int pci_resource_num_from_vf_bar(int resno)
+{
+ WARN_ON_ONCE(1);
+ return -ENODEV;
+}
+static inline int pci_resource_num_to_vf_bar(int resno)
+{
+ WARN_ON_ONCE(1);
+ return -ENODEV;
+}
#endif /* CONFIG_PCI_IOV */
#ifdef CONFIG_PCIE_TPH
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 70ac66188367..e286c197d716 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -116,12 +116,12 @@ struct aer_info {
PCI_ERR_ROOT_MULTI_COR_RCV | \
PCI_ERR_ROOT_MULTI_UNCOR_RCV)
-static int pcie_aer_disable;
+static bool pcie_aer_disable;
static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
void pci_no_aer(void)
{
- pcie_aer_disable = 1;
+ pcie_aer_disable = true;
}
bool pci_aer_available(void)
@@ -1039,7 +1039,8 @@ static int find_device_iter(struct pci_dev *dev, void *data)
/* List this device */
if (add_error_device(e_info, dev)) {
/* We cannot handle more... Stop iteration */
- /* TODO: Should print error message here? */
+ pci_err(dev, "Exceeded max supported (%d) devices with errors logged\n",
+ AER_MAX_MULTI_ERR_DEVICES);
return 1;
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 29fcb0689a91..919a05b97647 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -245,7 +245,7 @@ struct pcie_link_state {
u32 clkpm_disable:1; /* Clock PM disabled */
};
-static int aspm_disabled, aspm_force;
+static bool aspm_disabled, aspm_force;
static bool aspm_support_enabled = true;
static DEFINE_MUTEX(aspm_lock);
static LIST_HEAD(link_list);
@@ -884,10 +884,9 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Configure the ASPM L1 substates. Caller must disable L1 first. */
static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
{
- u32 val;
+ u32 val = 0;
struct pci_dev *child = link->downstream, *parent = link->pdev;
- val = 0;
if (state & PCIE_LINK_STATE_L1_1)
val |= PCI_L1SS_CTL1_ASPM_L1_1;
if (state & PCIE_LINK_STATE_L1_2)
@@ -1712,11 +1711,11 @@ static int __init pcie_aspm_disable(char *str)
{
if (!strcmp(str, "off")) {
aspm_policy = POLICY_DEFAULT;
- aspm_disabled = 1;
+ aspm_disabled = true;
aspm_support_enabled = false;
pr_info("PCIe ASPM is disabled\n");
} else if (!strcmp(str, "force")) {
- aspm_force = 1;
+ aspm_force = true;
pr_info("PCIe ASPM is forcibly enabled\n");
}
return 1;
@@ -1734,7 +1733,7 @@ void pcie_no_aspm(void)
*/
if (!aspm_force) {
aspm_policy = POLICY_DEFAULT;
- aspm_disabled = 1;
+ aspm_disabled = true;
}
}
diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
index e8318fd5f6ed..d1b68c18444f 100644
--- a/drivers/pci/pcie/portdrv.c
+++ b/drivers/pci/pcie/portdrv.c
@@ -220,7 +220,7 @@ static int get_port_device_capability(struct pci_dev *dev)
struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
int services = 0;
- if (dev->is_hotplug_bridge &&
+ if (dev->is_pciehp &&
(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) &&
(pcie_ports_native || host->native_pcie_hotplug)) {
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 4b8693ec9e4c..898885b1619f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1678,7 +1678,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
if (reg32 & PCI_EXP_SLTCAP_HPC)
- pdev->is_hotplug_bridge = 1;
+ pdev->is_hotplug_bridge = pdev->is_pciehp = 1;
}
static void set_pcie_thunderbolt(struct pci_dev *dev)
@@ -2595,6 +2595,15 @@ void pcie_report_downtraining(struct pci_dev *dev)
__pcie_print_link_status(dev, false);
}
+static void pci_imm_ready_init(struct pci_dev *dev)
+{
+ u16 status;
+
+ pci_read_config_word(dev, PCI_STATUS, &status);
+ if (status & PCI_STATUS_IMM_READY)
+ dev->imm_ready = 1;
+}
+
static void pci_init_capabilities(struct pci_dev *dev)
{
pci_ea_init(dev); /* Enhanced Allocation */
@@ -2604,6 +2613,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Buffers for saving PCIe and PCI-X capabilities */
pci_allocate_cap_save_buffers(dev);
+ pci_imm_ready_init(dev); /* Immediate Readiness */
pci_pm_init(dev); /* Power Management */
pci_vpd_init(dev); /* Vital Product Data */
pci_configure_ari(dev); /* Alternative Routing-ID Forwarding */
diff --git a/drivers/pci/pwrctrl/slot.c b/drivers/pci/pwrctrl/slot.c
index 18becc144913..6e138310b45b 100644
--- a/drivers/pci/pwrctrl/slot.c
+++ b/drivers/pci/pwrctrl/slot.c
@@ -4,6 +4,7 @@
* Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
*/
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -30,6 +31,7 @@ static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
{
struct pci_pwrctrl_slot_data *slot;
struct device *dev = &pdev->dev;
+ struct clk *clk;
int ret;
slot = devm_kzalloc(dev, sizeof(*slot), GFP_KERNEL);
@@ -55,6 +57,12 @@ static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
if (ret)
goto err_regulator_disable;
+ clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(clk)) {
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Failed to enable slot clock\n");
+ }
+
pci_pwrctrl_init(&slot->ctx, dev);
ret = devm_pci_pwrctrl_device_set_ready(dev, &slot->ctx);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index d7f4ee634263..db6e142b082d 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -105,13 +105,13 @@ int pcie_failed_link_retrain(struct pci_dev *dev)
!pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting)
return ret;
- pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
if (!(lnksta & PCI_EXP_LNKSTA_DLLLA) && pcie_lbms_seen(dev, lnksta)) {
- u16 oldlnkctl2 = lnkctl2;
+ u16 oldlnkctl2;
pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &oldlnkctl2);
ret = pcie_set_target_speed(dev, PCIE_SPEED_2_5GT, false);
if (ret) {
pci_info(dev, "retraining failed\n");
@@ -123,6 +123,8 @@ int pcie_failed_link_retrain(struct pci_dev *dev)
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
}
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
+
if ((lnksta & PCI_EXP_LNKSTA_DLLLA) &&
(lnkctl2 & PCI_EXP_LNKCTL2_TLS) == PCI_EXP_LNKCTL2_TLS_2_5GT &&
pci_match_id(ids, dev)) {
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 07c3d021a47e..7853ac6999e2 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1888,7 +1888,8 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data)
bool *unassigned = data;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES];
+ int idx = pci_resource_num_from_vf_bar(i);
+ struct resource *r = &dev->resource[idx];
struct pci_bus_region region;
/* Not assigned or rejected by kernel? */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index c6657cdd06f6..d2b3ed51e880 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -423,13 +423,39 @@ void pci_release_resource(struct pci_dev *dev, int resno)
}
EXPORT_SYMBOL(pci_release_resource);
+static bool pci_resize_is_memory_decoding_enabled(struct pci_dev *dev,
+ int resno)
+{
+ u16 cmd;
+
+ if (pci_resource_is_iov(resno))
+ return pci_iov_is_memory_decoding_enabled(dev);
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+
+ return cmd & PCI_COMMAND_MEMORY;
+}
+
+static void pci_resize_resource_set_size(struct pci_dev *dev, int resno,
+ int size)
+{
+ resource_size_t res_size = pci_rebar_size_to_bytes(size);
+ struct resource *res = pci_resource_n(dev, resno);
+
+ if (!pci_resource_is_iov(resno)) {
+ resource_set_size(res, res_size);
+ } else {
+ resource_set_size(res, res_size * pci_sriov_get_totalvfs(dev));
+ pci_iov_resource_set_size(dev, resno, res_size);
+ }
+}
+
int pci_resize_resource(struct pci_dev *dev, int resno, int size)
{
struct resource *res = pci_resource_n(dev, resno);
struct pci_host_bridge *host;
int old, ret;
u32 sizes;
- u16 cmd;
/* Check if we must preserve the firmware's resource assignment */
host = pci_find_host_bridge(dev->bus);
@@ -440,8 +466,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
if (!(res->flags & IORESOURCE_UNSET))
return -EBUSY;
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- if (cmd & PCI_COMMAND_MEMORY)
+ if (pci_resize_is_memory_decoding_enabled(dev, resno))
return -EBUSY;
sizes = pci_rebar_get_possible_sizes(dev, resno);
@@ -459,7 +484,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
if (ret)
return ret;
- resource_set_size(res, pci_rebar_size_to_bytes(size));
+ pci_resize_resource_set_size(dev, resno, size);
/* Check if the new config works by trying to assign everything. */
if (dev->bus->self) {
@@ -471,7 +496,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
error_resize:
pci_rebar_set_size(dev, resno, old);
- resource_set_size(res, pci_rebar_size_to_bytes(old));
+ pci_resize_resource_set_size(dev, resno, old);
return ret;
}
EXPORT_SYMBOL(pci_resize_resource);