diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-10 09:01:01 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-10 09:01:01 -0800 |
commit | 9e66645d72d3c395da92b0f8855c787f4b5f0e89 (patch) | |
tree | 61b94adb6c32340c45b6d984837556b6b845e983 /drivers/pci/msi.c | |
parent | ecb50f0afd35a51ef487e8a54b976052eb03d729 (diff) | |
parent | 74faaf7aa64c76b60db0f5c994fd43a46be772ce (diff) |
Merge branch 'irq-irqdomain-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq domain updates from Thomas Gleixner:
"The real interesting irq updates:
- Support for hierarchical irq domains:
For complex interrupt routing scenarios where more than one
interrupt related chip is involved we had no proper representation
in the generic interrupt infrastructure so far. That made people
implement rather ugly constructs in their nested irq chip
implementations. The main offenders are x86 and arm/gic.
To distangle that mess we have now hierarchical irqdomains which
seperate the various interrupt chips and connect them via the
hierarchical domains. That keeps the domain specific details
internal to the particular hierarchy level and removes the
criss/cross referencing of chip internals. The resulting hierarchy
for a complex x86 system will look like this:
vector mapped: 74
msi-0 mapped: 2
dmar-ir-1 mapped: 69
ioapic-1 mapped: 4
ioapic-0 mapped: 20
pci-msi-2 mapped: 45
dmar-ir-0 mapped: 3
ioapic-2 mapped: 1
pci-msi-1 mapped: 2
htirq mapped: 0
Neither ioapic nor pci-msi know about the dmar interrupt remapping
between themself and the vector domain. If interrupt remapping is
disabled ioapic and pci-msi become direct childs of the vector
domain.
In hindsight we should have done that years ago, but in hindsight
we always know better :)
- Support for generic MSI interrupt domain handling
We have more and more non PCI related MSI interrupts, so providing
a generic infrastructure for this is better than having all
affected architectures implementing their own private hacks.
- Support for PCI-MSI interrupt domain handling, based on the generic
MSI support.
This part carries the pci/msi branch from Bjorn Helgaas pci tree to
avoid a massive conflict. The PCI/MSI parts are acked by Bjorn.
I have two more branches on top of this. The full conversion of x86
to hierarchical domains and a partial conversion of arm/gic"
* 'irq-irqdomain-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (41 commits)
genirq: Move irq_chip_write_msi_msg() helper to core
PCI/MSI: Allow an msi_controller to be associated to an irq domain
PCI/MSI: Provide mechanism to alloc/free MSI/MSIX interrupt from irqdomain
PCI/MSI: Enhance core to support hierarchy irqdomain
PCI/MSI: Move cached entry functions to irq core
genirq: Provide default callbacks for msi_domain_ops
genirq: Introduce msi_domain_alloc/free_irqs()
asm-generic: Add msi.h
genirq: Add generic msi irq domain support
genirq: Introduce callback irq_chip.irq_write_msi_msg
genirq: Work around __irq_set_handler vs stacked domains ordering issues
irqdomain: Introduce helper function irq_domain_add_hierarchy()
irqdomain: Implement a method to automatically call parent domains alloc/free
genirq: Introduce helper irq_domain_set_info() to reduce duplicated code
genirq: Split out flow handler typedefs into seperate header file
genirq: Add IRQ_SET_MASK_OK_DONE to support stacked irqchip
genirq: Introduce irq_chip.irq_compose_msi_msg() to support stacked irqchip
genirq: Add more helper functions to support stacked irq_chip
genirq: Introduce helper functions to support stacked irq_chip
irqdomain: Do irq_find_mapping and set_type for hierarchy irqdomain in case OF
...
Diffstat (limited to 'drivers/pci/msi.c')
-rw-r--r-- | drivers/pci/msi.c | 394 |
1 files changed, 307 insertions, 87 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 084587d7cd13..fd60806d3fd0 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -19,19 +19,82 @@ #include <linux/errno.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/irqdomain.h> #include "pci.h" static int pci_msi_enable = 1; +int pci_msi_ignore_mask; #define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) +#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN +static struct irq_domain *pci_msi_default_domain; +static DEFINE_MUTEX(pci_msi_domain_lock); + +struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev) +{ + return pci_msi_default_domain; +} + +static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev) +{ + struct irq_domain *domain = NULL; + + if (dev->bus->msi) + domain = dev->bus->msi->domain; + if (!domain) + domain = arch_get_pci_msi_domain(dev); + + return domain; +} + +static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct irq_domain *domain; + + domain = pci_msi_get_domain(dev); + if (domain) + return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); + + return arch_setup_msi_irqs(dev, nvec, type); +} + +static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) +{ + struct irq_domain *domain; + + domain = pci_msi_get_domain(dev); + if (domain) + pci_msi_domain_free_irqs(domain, dev); + else + arch_teardown_msi_irqs(dev); +} +#else +#define pci_msi_setup_msi_irqs arch_setup_msi_irqs +#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs +#endif /* Arch hooks */ +struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev) +{ + return NULL; +} + +static struct msi_controller *pci_msi_controller(struct pci_dev *dev) +{ + struct msi_controller *msi_ctrl = dev->bus->msi; + + if (msi_ctrl) + return msi_ctrl; + + return pcibios_msi_controller(dev); +} + int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) { - struct msi_chip *chip = dev->bus->msi; + struct msi_controller *chip = pci_msi_controller(dev); int err; if (!chip || !chip->setup_irq) @@ -48,7 +111,7 @@ int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) void __weak arch_teardown_msi_irq(unsigned int irq) { - struct msi_chip *chip = irq_get_chip_data(irq); + struct msi_controller *chip = irq_get_chip_data(irq); if (!chip || !chip->teardown_irq) return; @@ -85,19 +148,13 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) */ void default_teardown_msi_irqs(struct pci_dev *dev) { + int i; struct msi_desc *entry; - list_for_each_entry(entry, &dev->msi_list, list) { - int i, nvec; - if (entry->irq == 0) - continue; - if (entry->nvec_used) - nvec = entry->nvec_used; - else - nvec = 1 << entry->msi_attrib.multiple; - for (i = 0; i < nvec; i++) - arch_teardown_msi_irq(entry->irq + i); - } + list_for_each_entry(entry, &dev->msi_list, list) + if (entry->irq) + for (i = 0; i < entry->nvec_used; i++) + arch_teardown_msi_irq(entry->irq + i); } void __weak arch_teardown_msi_irqs(struct pci_dev *dev) @@ -120,7 +177,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq) } if (entry) - __write_msi_msg(entry, &entry->msg); + __pci_write_msi_msg(entry, &entry->msg); } void __weak arch_restore_msi_irqs(struct pci_dev *dev) @@ -163,11 +220,11 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) * reliably as devices without an INTx disable bit will then generate a * level IRQ which will never be cleared. */ -u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { u32 mask_bits = desc->masked; - if (!desc->msi_attrib.maskbit) + if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) return 0; mask_bits &= ~mask; @@ -177,14 +234,9 @@ u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) return mask_bits; } -__weak u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) -{ - return default_msi_mask_irq(desc, mask, flag); -} - static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { - desc->masked = arch_msi_mask_irq(desc, mask, flag); + desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); } /* @@ -194,11 +246,15 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) * file. This saves a few milliseconds when initialising devices with lots * of MSI-X interrupts. */ -u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) +u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) { u32 mask_bits = desc->masked; unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; + + if (pci_msi_ignore_mask) + return 0; + mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; if (flag) mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; @@ -207,14 +263,9 @@ u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) return mask_bits; } -__weak u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) -{ - return default_msix_mask_irq(desc, flag); -} - static void msix_mask_irq(struct msi_desc *desc, u32 flag) { - desc->masked = arch_msix_mask_irq(desc, flag); + desc->masked = __pci_msix_desc_mask_irq(desc, flag); } static void msi_set_mask_bit(struct irq_data *data, u32 flag) @@ -230,12 +281,20 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag) } } -void mask_msi_irq(struct irq_data *data) +/** + * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts + * @data: pointer to irqdata associated to that interrupt + */ +void pci_msi_mask_irq(struct irq_data *data) { msi_set_mask_bit(data, 1); } -void unmask_msi_irq(struct irq_data *data) +/** + * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts + * @data: pointer to irqdata associated to that interrupt + */ +void pci_msi_unmask_irq(struct irq_data *data) { msi_set_mask_bit(data, 0); } @@ -244,12 +303,11 @@ void default_restore_msi_irqs(struct pci_dev *dev) { struct msi_desc *entry; - list_for_each_entry(entry, &dev->msi_list, list) { + list_for_each_entry(entry, &dev->msi_list, list) default_restore_msi_irq(dev, entry->irq); - } } -void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { BUG_ON(entry->dev->current_state != PCI_D0); @@ -279,32 +337,7 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) } } -void read_msi_msg(unsigned int irq, struct msi_msg *msg) -{ - struct msi_desc *entry = irq_get_msi_desc(irq); - - __read_msi_msg(entry, msg); -} - -void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) -{ - /* Assert that the cache is valid, assuming that - * valid messages are not all-zeroes. */ - BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | - entry->msg.data)); - - *msg = entry->msg; -} - -void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) -{ - struct msi_desc *entry = irq_get_msi_desc(irq); - - __get_cached_msi_msg(entry, msg); -} -EXPORT_SYMBOL_GPL(get_cached_msi_msg); - -void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { if (entry->dev->current_state != PCI_D0) { /* Don't touch the hardware now */ @@ -341,34 +374,27 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) entry->msg = *msg; } -void write_msi_msg(unsigned int irq, struct msi_msg *msg) +void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { struct msi_desc *entry = irq_get_msi_desc(irq); - __write_msi_msg(entry, msg); + __pci_write_msi_msg(entry, msg); } -EXPORT_SYMBOL_GPL(write_msi_msg); +EXPORT_SYMBOL_GPL(pci_write_msi_msg); static void free_msi_irqs(struct pci_dev *dev) { struct msi_desc *entry, *tmp; struct attribute **msi_attrs; struct device_attribute *dev_attr; - int count = 0; + int i, count = 0; - list_for_each_entry(entry, &dev->msi_list, list) { - int i, nvec; - if (!entry->irq) - continue; - if (entry->nvec_used) - nvec = entry->nvec_used; - else - nvec = 1 << entry->msi_attrib.multiple; - for (i = 0; i < nvec; i++) - BUG_ON(irq_has_action(entry->irq + i)); - } + list_for_each_entry(entry, &dev->msi_list, list) + if (entry->irq) + for (i = 0; i < entry->nvec_used; i++) + BUG_ON(irq_has_action(entry->irq + i)); - arch_teardown_msi_irqs(dev); + pci_msi_teardown_msi_irqs(dev); list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { if (entry->msi_attrib.is_msix) { @@ -451,9 +477,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev) PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); arch_restore_msi_irqs(dev); - list_for_each_entry(entry, &dev->msi_list, list) { + list_for_each_entry(entry, &dev->msi_list, list) msix_mask_irq(entry, entry->masked); - } msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); } @@ -497,9 +522,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev) int count = 0; /* Determine how many msi entries we have */ - list_for_each_entry(entry, &pdev->msi_list, list) { + list_for_each_entry(entry, &pdev->msi_list, list) ++num_msi; - } if (!num_msi) return 0; @@ -559,7 +583,7 @@ error_attrs: return ret; } -static struct msi_desc *msi_setup_entry(struct pci_dev *dev) +static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec) { u16 control; struct msi_desc *entry; @@ -577,6 +601,8 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev) entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; + entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); + entry->nvec_used = nvec; if (control & PCI_MSI_FLAGS_64BIT) entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; @@ -623,7 +649,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) msi_set_enable(dev, 0); /* Disable MSI during set up */ - entry = msi_setup_entry(dev); + entry = msi_setup_entry(dev, nvec); if (!entry) return -ENOMEM; @@ -634,7 +660,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) list_add_tail(&entry->list, &dev->msi_list); /* Configure MSI capability structure */ - ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); + ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); if (ret) { msi_mask_irq(entry, mask, ~mask); free_msi_irqs(dev); @@ -701,6 +727,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, entry->msi_attrib.entry_nr = entries[i].entry; entry->msi_attrib.default_irq = dev->irq; entry->mask_base = base; + entry->nvec_used = 1; list_add_tail(&entry->list, &dev->msi_list); } @@ -719,7 +746,6 @@ static void msix_program_entries(struct pci_dev *dev, PCI_MSIX_ENTRY_VECTOR_CTRL; entries[i].vector = entry->irq; - irq_set_msi_desc(entry->irq, entry); entry->masked = readl(entry->mask_base + offset); msix_mask_irq(entry, 1); i++; @@ -756,7 +782,7 @@ static int msix_capability_init(struct pci_dev *dev, if (ret) return ret; - ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); + ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); if (ret) goto out_avail; @@ -895,7 +921,7 @@ void pci_msi_shutdown(struct pci_dev *dev) /* Return the device with MSI unmasked as initial states */ mask = msi_mask(desc->msi_attrib.multi_cap); /* Keep cached state to be restored */ - arch_msi_mask_irq(desc, mask, ~mask); + __pci_msi_desc_mask_irq(desc, mask, ~mask); /* Restore dev->irq to its default pin-assertion irq */ dev->irq = desc->msi_attrib.default_irq; @@ -993,7 +1019,7 @@ void pci_msix_shutdown(struct pci_dev *dev) /* Return the device with MSI-X masked as initial states */ list_for_each_entry(entry, &dev->msi_list, list) { /* Keep cached states to be restored */ - arch_msix_mask_irq(entry, 1); + __pci_msix_desc_mask_irq(entry, 1); } msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); @@ -1138,3 +1164,197 @@ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, return nvec; } EXPORT_SYMBOL(pci_enable_msix_range); + +#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN +/** + * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space + * @irq_data: Pointer to interrupt data of the MSI interrupt + * @msg: Pointer to the message + */ +void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) +{ + struct msi_desc *desc = irq_data->msi_desc; + + /* + * For MSI-X desc->irq is always equal to irq_data->irq. For + * MSI only the first interrupt of MULTI MSI passes the test. + */ + if (desc->irq == irq_data->irq) + __pci_write_msi_msg(desc, msg); +} + +/** + * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source + * @dev: Pointer to the PCI device + * @desc: Pointer to the msi descriptor + * + * The ID number is only used within the irqdomain. + */ +irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, + struct msi_desc *desc) +{ + return (irq_hw_number_t)desc->msi_attrib.entry_nr | + PCI_DEVID(dev->bus->number, dev->devfn) << 11 | + (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; +} + +static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) +{ + return !desc->msi_attrib.is_msix && desc->nvec_used > 1; +} + +/** + * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev + * @domain: The interrupt domain to check + * @info: The domain info for verification + * @dev: The device to check + * + * Returns: + * 0 if the functionality is supported + * 1 if Multi MSI is requested, but the domain does not support it + * -ENOTSUPP otherwise + */ +int pci_msi_domain_check_cap(struct irq_domain *domain, + struct msi_domain_info *info, struct device *dev) +{ + struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev)); + + /* Special handling to support pci_enable_msi_range() */ + if (pci_msi_desc_is_multi_msi(desc) && + !(info->flags & MSI_FLAG_MULTI_PCI_MSI)) + return 1; + else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX)) + return -ENOTSUPP; + + return 0; +} + +static int pci_msi_domain_handle_error(struct irq_domain *domain, + struct msi_desc *desc, int error) +{ + /* Special handling to support pci_enable_msi_range() */ + if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC) + return 1; + + return error; +} + +#ifdef GENERIC_MSI_DOMAIN_OPS +static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, + struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc), + desc); +} +#else +#define pci_msi_domain_set_desc NULL +#endif + +static struct msi_domain_ops pci_msi_domain_ops_default = { + .set_desc = pci_msi_domain_set_desc, + .msi_check = pci_msi_domain_check_cap, + .handle_error = pci_msi_domain_handle_error, +}; + +static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) +{ + struct msi_domain_ops *ops = info->ops; + + if (ops == NULL) { + info->ops = &pci_msi_domain_ops_default; + } else { + if (ops->set_desc == NULL) + ops->set_desc = pci_msi_domain_set_desc; + if (ops->msi_check == NULL) + ops->msi_check = pci_msi_domain_check_cap; + if (ops->handle_error == NULL) + ops->handle_error = pci_msi_domain_handle_error; + } +} + +static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) +{ + struct irq_chip *chip = info->chip; + + BUG_ON(!chip); + if (!chip->irq_write_msi_msg) + chip->irq_write_msi_msg = pci_msi_domain_write_msg; +} + +/** + * pci_msi_create_irq_domain - Creat a MSI interrupt domain + * @node: Optional device-tree node of the interrupt controller + * @info: MSI domain info + * @parent: Parent irq domain + * + * Updates the domain and chip ops and creates a MSI interrupt domain. + * + * Returns: + * A domain pointer or NULL in case of failure. + */ +struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, + struct msi_domain_info *info, + struct irq_domain *parent) +{ + if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) + pci_msi_domain_update_dom_ops(info); + if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) + pci_msi_domain_update_chip_ops(info); + + return msi_create_irq_domain(node, info, parent); +} + +/** + * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain + * @domain: The interrupt domain to allocate from + * @dev: The device for which to allocate + * @nvec: The number of interrupts to allocate + * @type: Unused to allow simpler migration from the arch_XXX interfaces + * + * Returns: + * A virtual interrupt number or an error code in case of failure + */ +int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, + int nvec, int type) +{ + return msi_domain_alloc_irqs(domain, &dev->dev, nvec); +} + +/** + * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain + * @domain: The interrupt domain + * @dev: The device for which to free interrupts + */ +void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev) +{ + msi_domain_free_irqs(domain, &dev->dev); +} + +/** + * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain + * @node: Optional device-tree node of the interrupt controller + * @info: MSI domain info + * @parent: Parent irq domain + * + * Returns: A domain pointer or NULL in case of failure. If successful + * the default PCI/MSI irqdomain pointer is updated. + */ +struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, + struct msi_domain_info *info, struct irq_domain *parent) +{ + struct irq_domain *domain; + + mutex_lock(&pci_msi_domain_lock); + if (pci_msi_default_domain) { + pr_err("PCI: default irq domain for PCI MSI has already been created.\n"); + domain = NULL; + } else { + domain = pci_msi_create_irq_domain(node, info, parent); + pci_msi_default_domain = domain; + } + mutex_unlock(&pci_msi_domain_lock); + + return domain; +} +#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ |