summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c75
-rw-r--r--drivers/pci/iov.c101
-rw-r--r--drivers/pci/msi.c31
-rw-r--r--drivers/pci/pci-driver.c8
-rw-r--r--drivers/pci/pci.c225
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c28
-rw-r--r--drivers/pci/probe.c6
-rw-r--r--drivers/pci/quirks.c58
-rw-r--r--drivers/pci/setup-bus.c50
-rw-r--r--drivers/pci/setup-res.c7
11 files changed, 466 insertions, 124 deletions
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index f3796124ad7c..4c8f4cde6854 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -204,36 +204,39 @@ static void pciehp_power_thread(struct work_struct *work)
kfree(info);
}
-void pciehp_queue_pushbutton_work(struct work_struct *work)
+static void pciehp_queue_power_work(struct slot *p_slot, int req)
{
- struct slot *p_slot = container_of(work, struct slot, work.work);
struct power_work_info *info;
+ p_slot->state = (req == ENABLE_REQ) ? POWERON_STATE : POWEROFF_STATE;
+
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
- ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
- __func__);
+ ctrl_err(p_slot->ctrl, "no memory to queue %s request\n",
+ (req == ENABLE_REQ) ? "poweron" : "poweroff");
return;
}
info->p_slot = p_slot;
INIT_WORK(&info->work, pciehp_power_thread);
+ info->req = req;
+ queue_work(p_slot->wq, &info->work);
+}
+
+void pciehp_queue_pushbutton_work(struct work_struct *work)
+{
+ struct slot *p_slot = container_of(work, struct slot, work.work);
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
- p_slot->state = POWEROFF_STATE;
- info->req = DISABLE_REQ;
+ pciehp_queue_power_work(p_slot, DISABLE_REQ);
break;
case BLINKINGON_STATE:
- p_slot->state = POWERON_STATE;
- info->req = ENABLE_REQ;
+ pciehp_queue_power_work(p_slot, ENABLE_REQ);
break;
default:
- kfree(info);
- goto out;
+ break;
}
- queue_work(p_slot->wq, &info->work);
- out:
mutex_unlock(&p_slot->lock);
}
@@ -301,27 +304,12 @@ static void handle_button_press_event(struct slot *p_slot)
static void handle_surprise_event(struct slot *p_slot)
{
u8 getstatus;
- struct power_work_info *info;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
- __func__);
- return;
- }
- info->p_slot = p_slot;
- INIT_WORK(&info->work, pciehp_power_thread);
pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus) {
- p_slot->state = POWEROFF_STATE;
- info->req = DISABLE_REQ;
- } else {
- p_slot->state = POWERON_STATE;
- info->req = ENABLE_REQ;
- }
-
- queue_work(p_slot->wq, &info->work);
+ if (!getstatus)
+ pciehp_queue_power_work(p_slot, DISABLE_REQ);
+ else
+ pciehp_queue_power_work(p_slot, ENABLE_REQ);
}
/*
@@ -330,17 +318,6 @@ static void handle_surprise_event(struct slot *p_slot)
static void handle_link_event(struct slot *p_slot, u32 event)
{
struct controller *ctrl = p_slot->ctrl;
- struct power_work_info *info;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
- __func__);
- return;
- }
- info->p_slot = p_slot;
- info->req = event == INT_LINK_UP ? ENABLE_REQ : DISABLE_REQ;
- INIT_WORK(&info->work, pciehp_power_thread);
switch (p_slot->state) {
case BLINKINGON_STATE:
@@ -348,22 +325,19 @@ static void handle_link_event(struct slot *p_slot, u32 event)
cancel_delayed_work(&p_slot->work);
/* Fall through */
case STATIC_STATE:
- p_slot->state = event == INT_LINK_UP ?
- POWERON_STATE : POWEROFF_STATE;
- queue_work(p_slot->wq, &info->work);
+ pciehp_queue_power_work(p_slot, event == INT_LINK_UP ?
+ ENABLE_REQ : DISABLE_REQ);
break;
case POWERON_STATE:
if (event == INT_LINK_UP) {
ctrl_info(ctrl,
"Link Up event ignored on slot(%s): already powering on\n",
slot_name(p_slot));
- kfree(info);
} else {
ctrl_info(ctrl,
"Link Down event queued on slot(%s): currently getting powered on\n",
slot_name(p_slot));
- p_slot->state = POWEROFF_STATE;
- queue_work(p_slot->wq, &info->work);
+ pciehp_queue_power_work(p_slot, DISABLE_REQ);
}
break;
case POWEROFF_STATE:
@@ -371,19 +345,16 @@ static void handle_link_event(struct slot *p_slot, u32 event)
ctrl_info(ctrl,
"Link Up event queued on slot(%s): currently getting powered off\n",
slot_name(p_slot));
- p_slot->state = POWERON_STATE;
- queue_work(p_slot->wq, &info->work);
+ pciehp_queue_power_work(p_slot, ENABLE_REQ);
} else {
ctrl_info(ctrl,
"Link Down event ignored on slot(%s): already powering off\n",
slot_name(p_slot));
- kfree(info);
}
break;
default:
ctrl_err(ctrl, "ignoring invalid state %#x on slot(%s)\n",
p_slot->state, slot_name(p_slot));
- kfree(info);
break;
}
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ee0ebff103a4..31f31d460fc9 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -54,24 +54,29 @@ static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
* The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride
* determine how many additional bus numbers will be consumed by VFs.
*
- * Iterate over all valid NumVFs and calculate the maximum number of bus
- * numbers that could ever be required.
+ * Iterate over all valid NumVFs, validate offset and stride, and calculate
+ * the maximum number of bus numbers that could ever be required.
*/
-static inline u8 virtfn_max_buses(struct pci_dev *dev)
+static int compute_max_vf_buses(struct pci_dev *dev)
{
struct pci_sriov *iov = dev->sriov;
- int nr_virtfn;
- u8 max = 0;
- int busnr;
+ int nr_virtfn, busnr, rc = 0;
- for (nr_virtfn = 1; nr_virtfn <= iov->total_VFs; nr_virtfn++) {
+ for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) {
pci_iov_set_numvfs(dev, nr_virtfn);
+ if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) {
+ rc = -EIO;
+ goto out;
+ }
+
busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
- if (busnr > max)
- max = busnr;
+ if (busnr > iov->max_VF_buses)
+ iov->max_VF_buses = busnr;
}
- return max;
+out:
+ pci_iov_set_numvfs(dev, 0);
+ return rc;
}
static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
@@ -222,21 +227,25 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
- return 0;
+ return 0;
+}
+
+int __weak pcibios_sriov_disable(struct pci_dev *pdev)
+{
+ return 0;
}
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
{
int rc;
- int i, j;
+ int i;
int nres;
- u16 offset, stride, initial;
+ u16 initial;
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
int bars = 0;
int bus;
- int retval;
if (!nr_virtfn)
return 0;
@@ -253,11 +262,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
(!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
return -EINVAL;
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
- if (!offset || (nr_virtfn > 1 && !stride))
- return -EIO;
-
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
bars |= (1 << (i + PCI_IOV_RESOURCES));
@@ -270,9 +274,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return -ENOMEM;
}
- iov->offset = offset;
- iov->stride = stride;
-
bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
if (bus > dev->bus->busn_res.end) {
dev_err(&dev->dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
@@ -313,10 +314,10 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
if (nr_virtfn < initial)
initial = nr_virtfn;
- if ((retval = pcibios_sriov_enable(dev, initial))) {
- dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n",
- retval);
- return retval;
+ rc = pcibios_sriov_enable(dev, initial);
+ if (rc) {
+ dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n", rc);
+ goto err_pcibios;
}
for (i = 0; i < initial; i++) {
@@ -331,27 +332,24 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return 0;
failed:
- for (j = 0; j < i; j++)
- virtfn_remove(dev, j, 0);
+ while (i--)
+ virtfn_remove(dev, i, 0);
+ pcibios_sriov_disable(dev);
+err_pcibios:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
- pci_iov_set_numvfs(dev, 0);
ssleep(1);
pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
+ pci_iov_set_numvfs(dev, 0);
return rc;
}
-int __weak pcibios_sriov_disable(struct pci_dev *pdev)
-{
- return 0;
-}
-
static void sriov_disable(struct pci_dev *dev)
{
int i;
@@ -384,7 +382,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
int rc;
int nres;
u32 pgsz;
- u16 ctrl, total, offset, stride;
+ u16 ctrl, total;
struct pci_sriov *iov;
struct resource *res;
struct pci_dev *pdev;
@@ -399,10 +397,6 @@ static int sriov_init(struct pci_dev *dev, int pos)
ssleep(1);
}
- pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
- if (!total)
- return 0;
-
ctrl = 0;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
if (pdev->is_physfn)
@@ -414,11 +408,10 @@ static int sriov_init(struct pci_dev *dev, int pos)
found:
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
- pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0);
- pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
- pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
- if (!offset || (total > 1 && !stride))
- return -EIO;
+
+ pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
+ if (!total)
+ return 0;
pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
@@ -436,8 +429,15 @@ found:
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
- bar64 = __pci_read_base(dev, pci_bar_unknown, res,
- pos + PCI_SRIOV_BAR + i * 4);
+ /*
+ * If it is already FIXED, don't change it, something
+ * (perhaps EA or header fixups) wants it this way.
+ */
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
+ else
+ bar64 = __pci_read_base(dev, pci_bar_unknown, res,
+ pos + PCI_SRIOV_BAR + i * 4);
if (!res->flags)
continue;
if (resource_size(res) & (PAGE_SIZE - 1)) {
@@ -456,8 +456,6 @@ found:
iov->nres = nres;
iov->ctrl = ctrl;
iov->total_VFs = total;
- iov->offset = offset;
- iov->stride = stride;
iov->pgsz = pgsz;
iov->self = dev;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
@@ -474,10 +472,15 @@ found:
dev->sriov = iov;
dev->is_physfn = 1;
- iov->max_VF_buses = virtfn_max_buses(dev);
+ rc = compute_max_vf_buses(dev);
+ if (rc)
+ goto fail_max_buses;
return 0;
+fail_max_buses:
+ dev->sriov = NULL;
+ dev->is_physfn = 0;
failed:
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d4497141d083..324a1643fce2 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -475,10 +475,11 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
int ret = -ENOMEM;
int num_msi = 0;
int count = 0;
+ int i;
/* Determine how many msi entries we have */
for_each_pci_msi_entry(entry, pdev)
- ++num_msi;
+ num_msi += entry->nvec_used;
if (!num_msi)
return 0;
@@ -487,19 +488,21 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
if (!msi_attrs)
return -ENOMEM;
for_each_pci_msi_entry(entry, pdev) {
- msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
- if (!msi_dev_attr)
- goto error_attrs;
- msi_attrs[count] = &msi_dev_attr->attr;
-
- sysfs_attr_init(&msi_dev_attr->attr);
- msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
- entry->irq);
- if (!msi_dev_attr->attr.name)
- goto error_attrs;
- msi_dev_attr->attr.mode = S_IRUGO;
- msi_dev_attr->show = msi_mode_show;
- ++count;
+ for (i = 0; i < entry->nvec_used; i++) {
+ msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
+ if (!msi_dev_attr)
+ goto error_attrs;
+ msi_attrs[count] = &msi_dev_attr->attr;
+
+ sysfs_attr_init(&msi_dev_attr->attr);
+ msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
+ entry->irq + i);
+ if (!msi_dev_attr->attr.name)
+ goto error_attrs;
+ msi_dev_attr->attr.mode = S_IRUGO;
+ msi_dev_attr->show = msi_mode_show;
+ ++count;
+ }
}
msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index dd652f2ae03d..2865ba37e295 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -172,7 +172,7 @@ static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
__u32 vendor, device, subvendor = PCI_ANY_ID,
subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
int fields = 0;
- int retval = -ENODEV;
+ size_t retval = -ENODEV;
fields = sscanf(buf, "%x %x %x %x %x %x",
&vendor, &device, &subvendor, &subdevice,
@@ -190,15 +190,13 @@ static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
!((id->class ^ class) & class_mask)) {
list_del(&dynid->node);
kfree(dynid);
- retval = 0;
+ retval = count;
break;
}
}
spin_unlock(&pdrv->dynids.lock);
- if (retval)
- return retval;
- return count;
+ return retval;
}
static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6a9a1116f1eb..2b28a4e77ea4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -27,6 +27,7 @@
#include <linux/pci_hotplug.h>
#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
+#include <linux/aer.h>
#include "pci.h"
const char *pci_power_names[] = {
@@ -458,6 +459,30 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev,
EXPORT_SYMBOL(pci_find_parent_resource);
/**
+ * pci_find_pcie_root_port - return PCIe Root Port
+ * @dev: PCI device to query
+ *
+ * Traverse up the parent chain and return the PCIe Root Port PCI Device
+ * for a given PCI Device.
+ */
+struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
+{
+ struct pci_dev *bridge, *highest_pcie_bridge = NULL;
+
+ bridge = pci_upstream_bridge(dev);
+ while (bridge && pci_is_pcie(bridge)) {
+ highest_pcie_bridge = bridge;
+ bridge = pci_upstream_bridge(bridge);
+ }
+
+ if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
+ return NULL;
+
+ return highest_pcie_bridge;
+}
+EXPORT_SYMBOL(pci_find_pcie_root_port);
+
+/**
* pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
* @dev: the PCI device to operate on
* @pos: config space offset of status word
@@ -484,7 +509,7 @@ int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
}
/**
- * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
+ * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
* Restore the BAR values for a given device, so as to make it
@@ -494,6 +519,10 @@ static void pci_restore_bars(struct pci_dev *dev)
{
int i;
+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+ if (dev->is_virtfn)
+ return;
+
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
@@ -1099,6 +1128,8 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
+ pci_cleanup_aer_error_status_regs(dev);
+
pci_restore_config_space(dev);
pci_restore_pcix_state(dev);
@@ -2148,6 +2179,198 @@ void pci_pm_init(struct pci_dev *dev)
}
}
+static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
+{
+ unsigned long flags = IORESOURCE_PCI_FIXED;
+
+ switch (prop) {
+ case PCI_EA_P_MEM:
+ case PCI_EA_P_VF_MEM:
+ flags |= IORESOURCE_MEM;
+ break;
+ case PCI_EA_P_MEM_PREFETCH:
+ case PCI_EA_P_VF_MEM_PREFETCH:
+ flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ break;
+ case PCI_EA_P_IO:
+ flags |= IORESOURCE_IO;
+ break;
+ default:
+ return 0;
+ }
+
+ return flags;
+}
+
+static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
+ u8 prop)
+{
+ if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
+ return &dev->resource[bei];
+#ifdef CONFIG_PCI_IOV
+ else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
+ (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
+ return &dev->resource[PCI_IOV_RESOURCES +
+ bei - PCI_EA_BEI_VF_BAR0];
+#endif
+ else if (bei == PCI_EA_BEI_ROM)
+ return &dev->resource[PCI_ROM_RESOURCE];
+ else
+ return NULL;
+}
+
+/* Read an Enhanced Allocation (EA) entry */
+static int pci_ea_read(struct pci_dev *dev, int offset)
+{
+ struct resource *res;
+ int ent_size, ent_offset = offset;
+ resource_size_t start, end;
+ unsigned long flags;
+ u32 dw0, bei, base, max_offset;
+ u8 prop;
+ bool support_64 = (sizeof(resource_size_t) >= 8);
+
+ pci_read_config_dword(dev, ent_offset, &dw0);
+ ent_offset += 4;
+
+ /* Entry size field indicates DWORDs after 1st */
+ ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
+
+ if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
+ goto out;
+
+ bei = (dw0 & PCI_EA_BEI) >> 4;
+ prop = (dw0 & PCI_EA_PP) >> 8;
+
+ /*
+ * If the Property is in the reserved range, try the Secondary
+ * Property instead.
+ */
+ if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
+ prop = (dw0 & PCI_EA_SP) >> 16;
+ if (prop > PCI_EA_P_BRIDGE_IO)
+ goto out;
+
+ res = pci_ea_get_resource(dev, bei, prop);
+ if (!res) {
+ dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
+ goto out;
+ }
+
+ flags = pci_ea_flags(dev, prop);
+ if (!flags) {
+ dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
+ goto out;
+ }
+
+ /* Read Base */
+ pci_read_config_dword(dev, ent_offset, &base);
+ start = (base & PCI_EA_FIELD_MASK);
+ ent_offset += 4;
+
+ /* Read MaxOffset */
+ pci_read_config_dword(dev, ent_offset, &max_offset);
+ ent_offset += 4;
+
+ /* Read Base MSBs (if 64-bit entry) */
+ if (base & PCI_EA_IS_64) {
+ u32 base_upper;
+
+ pci_read_config_dword(dev, ent_offset, &base_upper);
+ ent_offset += 4;
+
+ flags |= IORESOURCE_MEM_64;
+
+ /* entry starts above 32-bit boundary, can't use */
+ if (!support_64 && base_upper)
+ goto out;
+
+ if (support_64)
+ start |= ((u64)base_upper << 32);
+ }
+
+ end = start + (max_offset | 0x03);
+
+ /* Read MaxOffset MSBs (if 64-bit entry) */
+ if (max_offset & PCI_EA_IS_64) {
+ u32 max_offset_upper;
+
+ pci_read_config_dword(dev, ent_offset, &max_offset_upper);
+ ent_offset += 4;
+
+ flags |= IORESOURCE_MEM_64;
+
+ /* entry too big, can't use */
+ if (!support_64 && max_offset_upper)
+ goto out;
+
+ if (support_64)
+ end += ((u64)max_offset_upper << 32);
+ }
+
+ if (end < start) {
+ dev_err(&dev->dev, "EA Entry crosses address boundary\n");
+ goto out;
+ }
+
+ if (ent_size != ent_offset - offset) {
+ dev_err(&dev->dev,
+ "EA Entry Size (%d) does not match length read (%d)\n",
+ ent_size, ent_offset - offset);
+ goto out;
+ }
+
+ res->name = pci_name(dev);
+ res->start = start;
+ res->end = end;
+ res->flags = flags;
+
+ if (bei <= PCI_EA_BEI_BAR5)
+ dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei, res, prop);
+ else if (bei == PCI_EA_BEI_ROM)
+ dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
+ res, prop);
+ else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
+ dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei - PCI_EA_BEI_VF_BAR0, res, prop);
+ else
+ dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei, res, prop);
+
+out:
+ return offset + ent_size;
+}
+
+/* Enhanced Allocation Initalization */
+void pci_ea_init(struct pci_dev *dev)
+{
+ int ea;
+ u8 num_ent;
+ int offset;
+ int i;
+
+ /* find PCI EA capability in list */
+ ea = pci_find_capability(dev, PCI_CAP_ID_EA);
+ if (!ea)
+ return;
+
+ /* determine the number of entries */
+ pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
+ &num_ent);
+ num_ent &= PCI_EA_NUM_ENT_MASK;
+
+ offset = ea + PCI_EA_FIRST_ENT;
+
+ /* Skip DWORD 2 for type 1 functions */
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ offset += 4;
+
+ /* parse each EA entry */
+ for (i = 0; i < num_ent; ++i)
+ offset = pci_ea_read(dev, offset);
+}
+
static void pci_add_saved_cap(struct pci_dev *pci_dev,
struct pci_cap_saved_state *new_cap)
{
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 24ba9dc8910a..a1607331693e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -78,6 +78,7 @@ bool pci_dev_keep_suspended(struct pci_dev *dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
void pci_pm_init(struct pci_dev *dev);
+void pci_ea_init(struct pci_dev *dev);
void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 9803e3d039fe..fba785e9df75 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -74,6 +74,34 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
+int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
+{
+ int pos;
+ u32 status;
+ int port_type;
+
+ if (!pci_is_pcie(dev))
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -EIO;
+
+ port_type = pci_pcie_type(dev);
+ if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
+ }
+
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+
+ return 0;
+}
+
/**
* add_error_device - list device to be handled
* @e_info: pointer to error info
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 0b2be174d981..6ae984da0b9c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/pci-aspm.h>
+#include <linux/aer.h>
#include <asm-generic/pci-bridge.h>
#include "pci.h"
@@ -1598,6 +1599,9 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
static void pci_init_capabilities(struct pci_dev *dev)
{
+ /* Enhanced Allocation */
+ pci_ea_init(dev);
+
/* MSI/MSI-X list */
pci_msi_init_pci_dev(dev);
@@ -1621,6 +1625,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Enable ACS P2P upstream forwarding */
pci_enable_acs(dev);
+
+ pci_cleanup_aer_error_status_regs(dev);
}
static void pci_set_msi_domain(struct pci_dev *dev)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6a30252cd79f..7c591bc10884 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2230,6 +2230,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disab
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void quirk_disable_msi(struct pci_dev *dev)
@@ -3692,6 +3693,63 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
quirk_tw686x_class);
/*
+ * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
+ * values for the Attribute as were supplied in the header of the
+ * corresponding Request, except as explicitly allowed when IDO is used."
+ *
+ * If a non-compliant device generates a completion with a different
+ * attribute than the request, the receiver may accept it (which itself
+ * seems non-compliant based on sec 2.3.2), or it may handle it as a
+ * Malformed TLP or an Unexpected Completion, which will probably lead to a
+ * device access timeout.
+ *
+ * If the non-compliant device generates completions with zero attributes
+ * (instead of copying the attributes from the request), we can work around
+ * this by disabling the "Relaxed Ordering" and "No Snoop" attributes in
+ * upstream devices so they always generate requests with zero attributes.
+ *
+ * This affects other devices under the same Root Port, but since these
+ * attributes are performance hints, there should be no functional problem.
+ *
+ * Note that Configuration Space accesses are never supposed to have TLP
+ * Attributes, so we're safe waiting till after any Configuration Space
+ * accesses to do the Root Port fixup.
+ */
+static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
+{
+ struct pci_dev *root_port = pci_find_pcie_root_port(pdev);
+
+ if (!root_port) {
+ dev_warn(&pdev->dev, "PCIe Completion erratum may cause device errors\n");
+ return;
+ }
+
+ dev_info(&root_port->dev, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
+ dev_name(&pdev->dev));
+ pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
+}
+
+/*
+ * The Chelsio T5 chip fails to copy TLP Attributes from a Request to the
+ * Completion it generates.
+ */
+static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
+{
+ /*
+ * This mask/compare operation selects for Physical Function 4 on a
+ * T5. We only need to fix up the Root Port once for any of the
+ * PFs. PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely
+ * 0x54xx so we use that one,
+ */
+ if ((pdev->device & 0xff00) == 0x5400)
+ quirk_disable_root_port_attributes(pdev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ quirk_chelsio_T5_disable_root_port_attributes);
+
+/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
* IOMMU in the system. Multifunction devices that do not support
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 508cc56130e3..1723ac1b30e1 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1037,9 +1037,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
struct resource *r = &dev->resource[i];
resource_size_t r_size;
- if (r->parent || ((r->flags & mask) != type &&
- (r->flags & mask) != type2 &&
- (r->flags & mask) != type3))
+ if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
+ ((r->flags & mask) != type &&
+ (r->flags & mask) != type2 &&
+ (r->flags & mask) != type3))
continue;
r_size = resource_size(r);
#ifdef CONFIG_PCI_IOV
@@ -1340,6 +1341,47 @@ void pci_bus_size_bridges(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_size_bridges);
+static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
+{
+ int i;
+ struct resource *parent_r;
+ unsigned long mask = IORESOURCE_IO | IORESOURCE_MEM |
+ IORESOURCE_PREFETCH;
+
+ pci_bus_for_each_resource(b, parent_r, i) {
+ if (!parent_r)
+ continue;
+
+ if ((r->flags & mask) == (parent_r->flags & mask) &&
+ resource_contains(parent_r, r))
+ request_resource(parent_r, r);
+ }
+}
+
+/*
+ * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they
+ * are skipped by pbus_assign_resources_sorted().
+ */
+static void pdev_assign_fixed_resources(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct pci_bus *b;
+ struct resource *r = &dev->resource[i];
+
+ if (r->parent || !(r->flags & IORESOURCE_PCI_FIXED) ||
+ !(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+
+ b = dev->bus;
+ while (b && !r->parent) {
+ assign_fixed_resource_on_bus(b, r);
+ b = b->parent;
+ }
+ }
+}
+
void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head,
struct list_head *fail_head)
@@ -1350,6 +1392,8 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
pbus_assign_resources_sorted(bus, realloc_head, fail_head);
list_for_each_entry(dev, &bus->devices, bus_list) {
+ pdev_assign_fixed_resources(dev);
+
b = dev->subordinate;
if (!b)
continue;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 232f9254c11a..604011e047d6 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -36,6 +36,11 @@ void pci_update_resource(struct pci_dev *dev, int resno)
enum pci_bar_type type;
struct resource *res = dev->resource + resno;
+ if (dev->is_virtfn) {
+ dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
+ return;
+ }
+
/*
* Ignore resources for unimplemented BARs and unused resource slots
* for 64 bit BARs.
@@ -177,6 +182,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
end = res->end;
res->start = fw_addr;
res->end = res->start + size - 1;
+ res->flags &= ~IORESOURCE_UNSET;
root = pci_find_parent_resource(dev, res);
if (!root) {
@@ -194,6 +200,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
resno, res, conflict->name, conflict);
res->start = start;
res->end = end;
+ res->flags |= IORESOURCE_UNSET;
return -EBUSY;
}
return 0;