summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd/amd_iommu.h5
-rw-r--r--drivers/iommu/amd/init.c24
-rw-r--r--drivers/iommu/amd/iommu.c27
-rw-r--r--drivers/iommu/generic_pt/iommu_pt.h3
-rw-r--r--drivers/iommu/intel/irq_remapping.c8
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c6
-rw-r--r--drivers/iommu/iommufd/selftest.c14
7 files changed, 67 insertions, 20 deletions
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 25044d28f28a..b742ef1adb35 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -173,6 +173,11 @@ static inline struct protection_domain *to_pdomain(struct iommu_domain *dom)
bool translation_pre_enabled(struct amd_iommu *iommu);
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
+int amd_iommu_pdom_id_alloc(void);
+int amd_iommu_pdom_id_reserve(u16 id, gfp_t gfp);
+void amd_iommu_pdom_id_free(int id);
+void amd_iommu_pdom_id_destroy(void);
+
#ifdef CONFIG_DMI
void amd_iommu_apply_ivrs_quirks(void);
#else
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 4b2953418977..384c90b4f90a 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1136,8 +1136,11 @@ static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
static bool __reuse_device_table(struct amd_iommu *iommu)
{
struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- u32 lo, hi, old_devtb_size;
+ struct dev_table_entry *old_dev_tbl_entry;
+ u32 lo, hi, old_devtb_size, devid;
phys_addr_t old_devtb_phys;
+ u16 dom_id;
+ bool dte_v;
u64 entry;
/* Each IOMMU use separate device table with the same size */
@@ -1173,6 +1176,22 @@ static bool __reuse_device_table(struct amd_iommu *iommu)
return false;
}
+ for (devid = 0; devid <= pci_seg->last_bdf; devid++) {
+ old_dev_tbl_entry = &pci_seg->old_dev_tbl_cpy[devid];
+ dte_v = FIELD_GET(DTE_FLAG_V, old_dev_tbl_entry->data[0]);
+ dom_id = FIELD_GET(DEV_DOMID_MASK, old_dev_tbl_entry->data[1]);
+
+ if (!dte_v || !dom_id)
+ continue;
+ /*
+ * ID reservation can fail with -ENOSPC when there
+ * are multiple devices present in the same domain,
+ * hence check only for -ENOMEM.
+ */
+ if (amd_iommu_pdom_id_reserve(dom_id, GFP_KERNEL) == -ENOMEM)
+ return false;
+ }
+
return true;
}
@@ -3127,8 +3146,7 @@ static bool __init check_ioapic_information(void)
static void __init free_dma_resources(void)
{
- ida_destroy(&pdom_ids);
-
+ amd_iommu_pdom_id_destroy();
free_unity_maps();
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 9f1d56a5e145..5d45795c367a 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1811,17 +1811,26 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
* contain.
*
****************************************************************************/
-
-static int pdom_id_alloc(void)
+int amd_iommu_pdom_id_alloc(void)
{
return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC);
}
-static void pdom_id_free(int id)
+int amd_iommu_pdom_id_reserve(u16 id, gfp_t gfp)
+{
+ return ida_alloc_range(&pdom_ids, id, id, gfp);
+}
+
+void amd_iommu_pdom_id_free(int id)
{
ida_free(&pdom_ids, id);
}
+void amd_iommu_pdom_id_destroy(void)
+{
+ ida_destroy(&pdom_ids);
+}
+
static void free_gcr3_tbl_level1(u64 *tbl)
{
u64 *ptr;
@@ -1864,7 +1873,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
gcr3_info->glx = 0;
/* Free per device domain ID */
- pdom_id_free(gcr3_info->domid);
+ amd_iommu_pdom_id_free(gcr3_info->domid);
iommu_free_pages(gcr3_info->gcr3_tbl);
gcr3_info->gcr3_tbl = NULL;
@@ -1900,14 +1909,14 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
return -EBUSY;
/* Allocate per device domain ID */
- domid = pdom_id_alloc();
+ domid = amd_iommu_pdom_id_alloc();
if (domid <= 0)
return -ENOSPC;
gcr3_info->domid = domid;
gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
if (gcr3_info->gcr3_tbl == NULL) {
- pdom_id_free(domid);
+ amd_iommu_pdom_id_free(domid);
return -ENOMEM;
}
@@ -2503,7 +2512,7 @@ struct protection_domain *protection_domain_alloc(void)
if (!domain)
return NULL;
- domid = pdom_id_alloc();
+ domid = amd_iommu_pdom_id_alloc();
if (domid <= 0) {
kfree(domain);
return NULL;
@@ -2802,7 +2811,7 @@ void amd_iommu_domain_free(struct iommu_domain *dom)
WARN_ON(!list_empty(&domain->dev_list));
pt_iommu_deinit(&domain->iommu);
- pdom_id_free(domain->id);
+ amd_iommu_pdom_id_free(domain->id);
kfree(domain);
}
@@ -2853,7 +2862,7 @@ void amd_iommu_init_identity_domain(void)
domain->ops = &identity_domain_ops;
domain->owner = &amd_iommu_ops;
- identity_domain.id = pdom_id_alloc();
+ identity_domain.id = amd_iommu_pdom_id_alloc();
protection_domain_init(&identity_domain);
}
diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h
index 97aeda1ad01c..3327116a441c 100644
--- a/drivers/iommu/generic_pt/iommu_pt.h
+++ b/drivers/iommu/generic_pt/iommu_pt.h
@@ -372,6 +372,9 @@ static inline struct pt_table_p *_table_alloc(struct pt_common *common,
table_mem = iommu_alloc_pages_node_sz(iommu_table->nid, gfp,
log2_to_int(lg2sz));
+ if (!table_mem)
+ return ERR_PTR(-ENOMEM);
+
if (pt_feature(common, PT_FEAT_DMA_INCOHERENT) &&
mode == ALLOC_NORMAL) {
int ret = iommu_pages_start_incoherent(
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 4f9b01dc91e8..8bcbfe3d9c72 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1303,17 +1303,17 @@ static struct irq_chip intel_ir_chip = {
* irq_enter();
* handle_edge_irq()
* irq_chip_ack_parent()
- * irq_move_irq(); // No EOI
+ * intel_ack_posted_msi_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* handle_edge_irq()
* irq_chip_ack_parent()
- * irq_move_irq(); // No EOI
+ * intel_ack_posted_msi_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* handle_edge_irq()
* irq_chip_ack_parent()
- * irq_move_irq(); // No EOI
+ * intel_ack_posted_msi_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* apic_eoi()
@@ -1322,7 +1322,7 @@ static struct irq_chip intel_ir_chip = {
*/
static struct irq_chip intel_ir_chip_post_msi = {
.name = "INTEL-IR-POST",
- .irq_ack = irq_move_irq,
+ .irq_ack = intel_ack_posted_msi_irq,
.irq_set_affinity = intel_ir_set_affinity,
.irq_compose_msi_msg = intel_ir_compose_msi_msg,
.irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 54cf4d856179..436992331111 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -495,7 +495,11 @@ int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
return -EOVERFLOW;
start_byte = start - ALIGN_DOWN(start, PAGE_SIZE);
- dmabuf = dma_buf_get(fd);
+ if (IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
+ dmabuf = dma_buf_get(fd);
+ else
+ dmabuf = ERR_PTR(-ENXIO);
+
if (!IS_ERR(dmabuf)) {
pages = iopt_alloc_dmabuf_pages(ictx, dmabuf, start_byte, start,
length,
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index c4322fd26f93..550ff36dec3a 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -1184,14 +1184,20 @@ static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
unsigned int mockpt_id,
unsigned long start, size_t length)
{
+ unsigned long last;
struct iommufd_ioas *ioas;
int rc;
+ if (!length)
+ return -EINVAL;
+ if (check_add_overflow(start, length - 1, &last))
+ return -EOVERFLOW;
+
ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
down_write(&ioas->iopt.iova_rwsem);
- rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
+ rc = iopt_reserve_iova(&ioas->iopt, start, last, NULL);
up_write(&ioas->iopt.iova_rwsem);
iommufd_put_object(ucmd->ictx, &ioas->obj);
return rc;
@@ -1215,8 +1221,10 @@ static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
page_size = 1 << __ffs(mock->domain.pgsize_bitmap);
if (iova % page_size || length % page_size ||
(uintptr_t)uptr % page_size ||
- check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
- return -EINVAL;
+ check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end)) {
+ rc = -EINVAL;
+ goto out_put;
+ }
for (; length; length -= page_size) {
struct page *pages[1];