diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2013-05-29 11:52:21 +0930 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2015-01-21 16:28:49 +1030 |
commit | 8cfc99b58366ea9f391fe0da7d16940ca6a1d9c0 (patch) | |
tree | 8e9941de0e82da7d4f49856c6134d50173a05bab | |
parent | eb29d8d2aad70636ea23810b4868693673d630d5 (diff) |
s390: add pci_iomap_range
Virtio drivers should map the part of the range they need, not
necessarily all of it.
To this end, support mapping ranges within BAR on s390.
Since multiple ranges can now be mapped within a BAR, we keep track of
the number of mappings created, and only clear out the mapping for a BAR
when this number reaches 0.
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: linux-pci@vger.kernel.org
Tested-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | arch/s390/include/asm/pci_io.h | 1 | ||||
-rw-r--r-- | arch/s390/pci/pci.c | 34 |
2 files changed, 28 insertions, 7 deletions
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index f664e96f48c7..1a9a98de5bde 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -16,6 +16,7 @@ struct zpci_iomap_entry { u32 fh; u8 bar; + u16 count; }; extern struct zpci_iomap_entry *zpci_iomap_start; diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 3290f11ae1d9..753a56731951 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -259,7 +259,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count) } /* Create a virtual mapping cookie for a PCI BAR */ -void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) +void __iomem *pci_iomap_range(struct pci_dev *pdev, + int bar, + unsigned long offset, + unsigned long max) { struct zpci_dev *zdev = get_zdev(pdev); u64 addr; @@ -270,14 +273,27 @@ void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) idx = zdev->bars[bar].map_idx; spin_lock(&zpci_iomap_lock); - zpci_iomap_start[idx].fh = zdev->fh; - zpci_iomap_start[idx].bar = bar; + if (zpci_iomap_start[idx].count++) { + BUG_ON(zpci_iomap_start[idx].fh != zdev->fh || + zpci_iomap_start[idx].bar != bar); + } else { + zpci_iomap_start[idx].fh = zdev->fh; + zpci_iomap_start[idx].bar = bar; + } + /* Detect overrun */ + BUG_ON(!zpci_iomap_start[idx].count); spin_unlock(&zpci_iomap_lock); addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); - return (void __iomem *) addr; + return (void __iomem *) addr + offset; } -EXPORT_SYMBOL_GPL(pci_iomap); +EXPORT_SYMBOL_GPL(pci_iomap_range); + +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + return pci_iomap_range(dev, bar, 0, maxlen); +} +EXPORT_SYMBOL(pci_iomap); void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) { @@ -285,8 +301,12 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; spin_lock(&zpci_iomap_lock); - zpci_iomap_start[idx].fh = 0; - zpci_iomap_start[idx].bar = 0; + /* Detect underrun */ + BUG_ON(!zpci_iomap_start[idx].count); + if (!--zpci_iomap_start[idx].count) { + zpci_iomap_start[idx].fh = 0; + zpci_iomap_start[idx].bar = 0; + } spin_unlock(&zpci_iomap_lock); } EXPORT_SYMBOL_GPL(pci_iounmap); |