diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-20 10:07:25 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-20 10:07:25 -0800 |
commit | 787314c35fbb97e02823a1b8eb8cfa58f366cd49 (patch) | |
tree | 3fe5a484c1846c80361217a726997484533e8344 /drivers/iommu | |
parent | 6491d4d02893d9787ba67279595990217177b351 (diff) | |
parent | 9c6ecf6a3ade2dc4b03a239af68058b22897af41 (diff) |
Merge tag 'iommu-updates-v3.8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel:
"A few new features this merge-window. The most important one is
probably, that dma-debug now warns if a dma-handle is not checked with
dma_mapping_error by the device driver. This requires minor changes
to some architectures which make use of dma-debug. Most of these
changes have the respective Acks by the Arch-Maintainers.
Besides that there are updates to the AMD IOMMU driver for refactor
the IOMMU-Groups support and to make sure it does not trigger a
hardware erratum.
The OMAP changes (for which I pulled in a branch from Tony Lindgren's
tree) have a conflict in linux-next with the arm-soc tree. The
conflict is in the file arch/arm/mach-omap2/clock44xx_data.c which is
deleted in the arm-soc tree. It is safe to delete the file too so
solve the conflict. Similar changes are done in the arm-soc tree in
the common clock framework migration. A missing hunk from the patch
in the IOMMU tree will be submitted as a seperate patch when the
merge-window is closed."
* tag 'iommu-updates-v3.8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (29 commits)
ARM: dma-mapping: support debug_dma_mapping_error
ARM: OMAP4: hwmod data: ipu and dsp to use parent clocks instead of leaf clocks
iommu/omap: Adapt to runtime pm
iommu/omap: Migrate to hwmod framework
iommu/omap: Keep mmu enabled when requested
iommu/omap: Remove redundant clock handling on ISR
iommu/amd: Remove obsolete comment
iommu/amd: Don't use 512GB pages
iommu/tegra: smmu: Move bus_set_iommu after probe for multi arch
iommu/tegra: gart: Move bus_set_iommu after probe for multi arch
iommu/tegra: smmu: Remove unnecessary PTC/TLB flush all
tile: dma_debug: add debug_dma_mapping_error support
sh: dma_debug: add debug_dma_mapping_error support
powerpc: dma_debug: add debug_dma_mapping_error support
mips: dma_debug: add debug_dma_mapping_error support
microblaze: dma-mapping: support debug_dma_mapping_error
ia64: dma_debug: add debug_dma_mapping_error support
c6x: dma_debug: add debug_dma_mapping_error support
ARM64: dma_debug: add debug_dma_mapping_error support
intel-iommu: Prevent devices with RMRRs from being placed into SI Domain
...
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 196 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_types.h | 1 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 31 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 68 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.h | 3 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu2.c | 36 | ||||
-rw-r--r-- | drivers/iommu/tegra-gart.c | 2 | ||||
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 6 |
8 files changed, 215 insertions, 128 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 55074cba20eb..c1c74e030a58 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -57,17 +57,9 @@ * physically contiguous memory regions it is mapping into page sizes * that we support. * - * Traditionally the IOMMU core just handed us the mappings directly, - * after making sure the size is an order of a 4KiB page and that the - * mapping has natural alignment. - * - * To retain this behavior, we currently advertise that we support - * all page sizes that are an order of 4KiB. - * - * If at some point we'd like to utilize the IOMMU core's new behavior, - * we could change this to advertise the real page sizes we support. + * 512GB Pages are not supported due to a hardware bug */ -#define AMD_IOMMU_PGSIZES (~0xFFFUL) +#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) static DEFINE_RWLOCK(amd_iommu_devtable_lock); @@ -140,6 +132,9 @@ static void free_dev_data(struct iommu_dev_data *dev_data) list_del(&dev_data->dev_data_list); spin_unlock_irqrestore(&dev_data_list_lock, flags); + if (dev_data->group) + iommu_group_put(dev_data->group); + kfree(dev_data); } @@ -274,41 +269,23 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) *from = to; } -#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) - -static int iommu_init_device(struct device *dev) +static struct pci_bus *find_hosted_bus(struct pci_bus *bus) { - struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev); - struct iommu_dev_data *dev_data; - struct iommu_group *group; - u16 alias; - int ret; - - if (dev->archdata.iommu) - return 0; - - dev_data = find_dev_data(get_device_id(dev)); - if (!dev_data) - return -ENOMEM; - - alias = amd_iommu_alias_table[dev_data->devid]; - if (alias != dev_data->devid) { - struct iommu_dev_data *alias_data; + while (!bus->self) { + if (!pci_is_root_bus(bus)) + bus = bus->parent; + else + return ERR_PTR(-ENODEV); + } - alias_data = find_dev_data(alias); - if (alias_data == NULL) { - pr_err("AMD-Vi: Warning: Unhandled device %s\n", - dev_name(dev)); - free_dev_data(dev_data); - return -ENOTSUPP; - } - dev_data->alias_data = alias_data; + return bus; +} - dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff); - } +#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) - if (dma_pdev == NULL) - dma_pdev = pci_dev_get(pdev); +static struct pci_dev *get_isolation_root(struct pci_dev *pdev) +{ + struct pci_dev *dma_pdev = pdev; /* Account for quirked devices */ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); @@ -330,14 +307,9 @@ static int iommu_init_device(struct device *dev) * Finding the next device may require skipping virtual buses. */ while (!pci_is_root_bus(dma_pdev->bus)) { - struct pci_bus *bus = dma_pdev->bus; - - while (!bus->self) { - if (!pci_is_root_bus(bus)) - bus = bus->parent; - else - goto root_bus; - } + struct pci_bus *bus = find_hosted_bus(dma_pdev->bus); + if (IS_ERR(bus)) + break; if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break; @@ -345,19 +317,137 @@ static int iommu_init_device(struct device *dev) swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); } -root_bus: - group = iommu_group_get(&dma_pdev->dev); - pci_dev_put(dma_pdev); + return dma_pdev; +} + +static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev) +{ + struct iommu_group *group = iommu_group_get(&pdev->dev); + int ret; + if (!group) { group = iommu_group_alloc(); if (IS_ERR(group)) return PTR_ERR(group); + + WARN_ON(&pdev->dev != dev); } ret = iommu_group_add_device(group, dev); - iommu_group_put(group); + return ret; +} + +static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data, + struct device *dev) +{ + if (!dev_data->group) { + struct iommu_group *group = iommu_group_alloc(); + if (IS_ERR(group)) + return PTR_ERR(group); + + dev_data->group = group; + } + + return iommu_group_add_device(dev_data->group, dev); +} + +static int init_iommu_group(struct device *dev) +{ + struct iommu_dev_data *dev_data; + struct iommu_group *group; + struct pci_dev *dma_pdev; + int ret; + + group = iommu_group_get(dev); + if (group) { + iommu_group_put(group); + return 0; + } + + dev_data = find_dev_data(get_device_id(dev)); + if (!dev_data) + return -ENOMEM; + + if (dev_data->alias_data) { + u16 alias; + struct pci_bus *bus; + + if (dev_data->alias_data->group) + goto use_group; + + /* + * If the alias device exists, it's effectively just a first + * level quirk for finding the DMA source. + */ + alias = amd_iommu_alias_table[dev_data->devid]; + dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff); + if (dma_pdev) { + dma_pdev = get_isolation_root(dma_pdev); + goto use_pdev; + } + + /* + * If the alias is virtual, try to find a parent device + * and test whether the IOMMU group is actualy rooted above + * the alias. Be careful to also test the parent device if + * we think the alias is the root of the group. + */ + bus = pci_find_bus(0, alias >> 8); + if (!bus) + goto use_group; + + bus = find_hosted_bus(bus); + if (IS_ERR(bus) || !bus->self) + goto use_group; + + dma_pdev = get_isolation_root(pci_dev_get(bus->self)); + if (dma_pdev != bus->self || (dma_pdev->multifunction && + !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))) + goto use_pdev; + + pci_dev_put(dma_pdev); + goto use_group; + } + + dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev))); +use_pdev: + ret = use_pdev_iommu_group(dma_pdev, dev); + pci_dev_put(dma_pdev); + return ret; +use_group: + return use_dev_data_iommu_group(dev_data->alias_data, dev); +} + +static int iommu_init_device(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct iommu_dev_data *dev_data; + u16 alias; + int ret; + + if (dev->archdata.iommu) + return 0; + + dev_data = find_dev_data(get_device_id(dev)); + if (!dev_data) + return -ENOMEM; + + alias = amd_iommu_alias_table[dev_data->devid]; + if (alias != dev_data->devid) { + struct iommu_dev_data *alias_data; + + alias_data = find_dev_data(alias); + if (alias_data == NULL) { + pr_err("AMD-Vi: Warning: Unhandled device %s\n", + dev_name(dev)); + free_dev_data(dev_data); + return -ENOTSUPP; + } + dev_data->alias_data = alias_data; + } + ret = init_iommu_group(dev); if (ret) return ret; diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index c9aa3d079ff0..e38ab438bb34 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -426,6 +426,7 @@ struct iommu_dev_data { struct iommu_dev_data *alias_data;/* The alias dev_data */ struct protection_domain *domain; /* Domain the device is bound to */ atomic_t bind; /* Domain attach reference count */ + struct iommu_group *group; /* IOMMU group for virtual aliases */ u16 devid; /* PCI Device ID */ bool iommu_v2; /* Device can make use of IOMMUv2 */ bool passthrough; /* Default for device is pt_domain */ diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 9476c1b96090..c2c07a4a7f21 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2327,8 +2327,39 @@ static int domain_add_dev_info(struct dmar_domain *domain, return 0; } +static bool device_has_rmrr(struct pci_dev *dev) +{ + struct dmar_rmrr_unit *rmrr; + int i; + + for_each_rmrr_units(rmrr) { + for (i = 0; i < rmrr->devices_cnt; i++) { + /* + * Return TRUE if this RMRR contains the device that + * is passed in. + */ + if (rmrr->devices[i] == dev) + return true; + } + } + return false; +} + static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { + + /* + * We want to prevent any device associated with an RMRR from + * getting placed into the SI Domain. This is done because + * problems exist when devices are moved in and out of domains + * and their respective RMRR info is lost. We exempt USB devices + * from this process due to their usage of RMRRs that are known + * to not be needed after BIOS hand-off to OS. + */ + if (device_has_rmrr(pdev) && + (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) + return 0; + if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) return 1; diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index badc17c2bcb4..18108c1405e2 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -16,13 +16,13 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/ioport.h> -#include <linux/clk.h> #include <linux/platform_device.h> #include <linux/iommu.h> #include <linux/omap-iommu.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/io.h> +#include <linux/pm_runtime.h> #include <asm/cacheflush.h> @@ -143,31 +143,44 @@ EXPORT_SYMBOL_GPL(omap_iommu_arch_version); static int iommu_enable(struct omap_iommu *obj) { int err; + struct platform_device *pdev = to_platform_device(obj->dev); + struct iommu_platform_data *pdata = pdev->dev.platform_data; - if (!obj) + if (!obj || !pdata) return -EINVAL; if (!arch_iommu) return -ENODEV; - clk_enable(obj->clk); + if (pdata->deassert_reset) { + err = pdata->deassert_reset(pdev, pdata->reset_name); + if (err) { + dev_err(obj->dev, "deassert_reset failed: %d\n", err); + return err; + } + } + + pm_runtime_get_sync(obj->dev); err = arch_iommu->enable(obj); - clk_disable(obj->clk); return err; } static void iommu_disable(struct omap_iommu *obj) { - if (!obj) - return; + struct platform_device *pdev = to_platform_device(obj->dev); + struct iommu_platform_data *pdata = pdev->dev.platform_data; - clk_enable(obj->clk); + if (!obj || !pdata) + return; arch_iommu->disable(obj); - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); + + if (pdata->assert_reset) + pdata->assert_reset(pdev, pdata->reset_name); } /* @@ -290,7 +303,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) if (!obj || !obj->nr_tlb_entries || !e) return -EINVAL; - clk_enable(obj->clk); + pm_runtime_get_sync(obj->dev); iotlb_lock_get(obj, &l); if (l.base == obj->nr_tlb_entries) { @@ -320,7 +333,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) cr = iotlb_alloc_cr(obj, e); if (IS_ERR(cr)) { - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); return PTR_ERR(cr); } @@ -334,7 +347,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) l.vict = l.base; iotlb_lock_set(obj, &l); out: - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); return err; } @@ -364,7 +377,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da) int i; struct cr_regs cr; - clk_enable(obj->clk); + pm_runtime_get_sync(obj->dev); for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { u32 start; @@ -383,7 +396,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da) iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); } } - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); if (i == obj->nr_tlb_entries) dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); @@ -397,7 +410,7 @@ static void flush_iotlb_all(struct omap_iommu *obj) { struct iotlb_lock l; - clk_enable(obj->clk); + pm_runtime_get_sync(obj->dev); l.base = 0; l.vict = 0; @@ -405,7 +418,7 @@ static void flush_iotlb_all(struct omap_iommu *obj) iommu_write_reg(obj, 1, MMU_GFLUSH); - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); } #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) @@ -415,11 +428,11 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) if (!obj || !buf) return -EINVAL; - clk_enable(obj->clk); + pm_runtime_get_sync(obj->dev); bytes = arch_iommu->dump_ctx(obj, buf, bytes); - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); return bytes; } @@ -433,7 +446,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) struct cr_regs tmp; struct cr_regs *p = crs; - clk_enable(obj->clk); + pm_runtime_get_sync(obj->dev); iotlb_lock_get(obj, &saved); for_each_iotlb_cr(obj, num, i, tmp) { @@ -443,7 +456,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) } iotlb_lock_set(obj, &saved); - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); return p - crs; } @@ -807,9 +820,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) if (!obj->refcount) return IRQ_NONE; - clk_enable(obj->clk); errs = iommu_report_fault(obj, &da); - clk_disable(obj->clk); if (errs == 0) return IRQ_HANDLED; @@ -931,17 +942,10 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev) struct resource *res; struct iommu_platform_data *pdata = pdev->dev.platform_data; - if (pdev->num_resources != 2) - return -EINVAL; - obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); if (!obj) return -ENOMEM; - obj->clk = clk_get(&pdev->dev, pdata->clk_name); - if (IS_ERR(obj->clk)) - goto err_clk; - obj->nr_tlb_entries = pdata->nr_tlb_entries; obj->name = pdata->name; obj->dev = &pdev->dev; @@ -984,6 +988,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev) goto err_irq; platform_set_drvdata(pdev, obj); + pm_runtime_irq_safe(obj->dev); + pm_runtime_enable(obj->dev); + dev_info(&pdev->dev, "%s registered\n", obj->name); return 0; @@ -992,8 +999,6 @@ err_irq: err_ioremap: release_mem_region(res->start, resource_size(res)); err_mem: - clk_put(obj->clk); -err_clk: kfree(obj); return err; } @@ -1014,7 +1019,8 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev) release_mem_region(res->start, resource_size(res)); iounmap(obj->regbase); - clk_put(obj->clk); + pm_runtime_disable(obj->dev); + dev_info(&pdev->dev, "%s removed\n", obj->name); kfree(obj); return 0; diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 2b5f3c04d167..120084206602 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -29,7 +29,6 @@ struct iotlb_entry { struct omap_iommu { const char *name; struct module *owner; - struct clk *clk; void __iomem *regbase; struct device *dev; void *isr_priv; @@ -116,8 +115,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) * MMU Register offsets */ #define MMU_REVISION 0x00 -#define MMU_SYSCONFIG 0x10 -#define MMU_SYSSTATUS 0x14 #define MMU_IRQSTATUS 0x18 #define MMU_IRQENABLE 0x1c #define MMU_WALKING_ST 0x40 diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c index c02020292377..d745094a69dd 100644 --- a/drivers/iommu/omap-iommu2.c +++ b/drivers/iommu/omap-iommu2.c @@ -28,19 +28,6 @@ */ #define IOMMU_ARCH_VERSION 0x00000011 -/* SYSCONF */ -#define MMU_SYS_IDLE_SHIFT 3 -#define MMU_SYS_IDLE_FORCE (0 << MMU_SYS_IDLE_SHIFT) -#define MMU_SYS_IDLE_NONE (1 << MMU_SYS_IDLE_SHIFT) -#define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT) -#define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT) - -#define MMU_SYS_SOFTRESET (1 << 1) -#define MMU_SYS_AUTOIDLE 1 - -/* SYSSTATUS */ -#define MMU_SYS_RESETDONE 1 - /* IRQSTATUS & IRQENABLE */ #define MMU_IRQ_MULTIHITFAULT (1 << 4) #define MMU_IRQ_TABLEWALKFAULT (1 << 3) @@ -97,7 +84,6 @@ static void __iommu_set_twl(struct omap_iommu *obj, bool on) static int omap2_iommu_enable(struct omap_iommu *obj) { u32 l, pa; - unsigned long timeout; if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) return -EINVAL; @@ -106,29 +92,10 @@ static int omap2_iommu_enable(struct omap_iommu *obj) if (!IS_ALIGNED(pa, SZ_16K)) return -EINVAL; - iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG); - - timeout = jiffies + msecs_to_jiffies(20); - do { - l = iommu_read_reg(obj, MMU_SYSSTATUS); - if (l & MMU_SYS_RESETDONE) - break; - } while (!time_after(jiffies, timeout)); - - if (!(l & MMU_SYS_RESETDONE)) { - dev_err(obj->dev, "can't take mmu out of reset\n"); - return -ENODEV; - } - l = iommu_read_reg(obj, MMU_REVISION); dev_info(obj->dev, "%s: version %d.%d\n", obj->name, (l >> 4) & 0xf, l & 0xf); - l = iommu_read_reg(obj, MMU_SYSCONFIG); - l &= ~MMU_SYS_IDLE_MASK; - l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE); - iommu_write_reg(obj, l, MMU_SYSCONFIG); - iommu_write_reg(obj, pa, MMU_TTB); __iommu_set_twl(obj, true); @@ -142,7 +109,6 @@ static void omap2_iommu_disable(struct omap_iommu *obj) l &= ~MMU_CNTL_MASK; iommu_write_reg(obj, l, MMU_CNTL); - iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG); dev_dbg(obj->dev, "%s is shutting down\n", obj->name); } @@ -271,8 +237,6 @@ omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) char *p = buf; pr_reg(REVISION); - pr_reg(SYSCONFIG); - pr_reg(SYSSTATUS); pr_reg(IRQSTATUS); pr_reg(IRQENABLE); pr_reg(WALKING_ST); diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index c16e8fc8a4bd..4c9db62814ff 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -398,6 +398,7 @@ static int tegra_gart_probe(struct platform_device *pdev) do_gart_setup(gart, NULL); gart_handle = gart; + bus_set_iommu(&platform_bus_type, &gart_iommu_ops); return 0; fail: @@ -450,7 +451,6 @@ static struct platform_driver tegra_gart_driver = { static int __devinit tegra_gart_init(void) { - bus_set_iommu(&platform_bus_type, &gart_iommu_ops); return platform_driver_register(&tegra_gart_driver); } diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 4252d743963d..25c1210c0832 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -694,10 +694,8 @@ static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) *pte = _PTE_VACANT(iova); FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0); - if (!--(*count)) { + if (!--(*count)) free_ptbl(as, iova); - smmu_flush_regs(as->smmu, 0); - } } static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, @@ -1232,6 +1230,7 @@ static int tegra_smmu_probe(struct platform_device *pdev) smmu_debugfs_create(smmu); smmu_handle = smmu; + bus_set_iommu(&platform_bus_type, &smmu_iommu_ops); return 0; } @@ -1276,7 +1275,6 @@ static struct platform_driver tegra_smmu_driver = { static int __devinit tegra_smmu_init(void) { - bus_set_iommu(&platform_bus_type, &smmu_iommu_ops); return platform_driver_register(&tegra_smmu_driver); } |