From cdd3a2499d30695730b22dc025c00b9b28884c6b Mon Sep 17 00:00:00 2001 From: Sai Praneeth Prakhya Date: Fri, 24 May 2019 16:40:16 -0700 Subject: iommu/vt-d: Introduce macros useful for dumping DMAR table A scalable mode DMAR table walk would involve looking at bits in each stage of walk, like, 1. Is PASID enabled in the context entry? 2. What's the size of PASID directory? 3. Is the PASID directory entry present? 4. Is the PASID table entry present? 5. Number of PASID table entries? Hence, add these macros that will later be used during this walk. Apart from adding new macros, move existing macros (like pasid_pde_is_present(), get_pasid_table_from_pde() and pasid_supported()) to appropriate header files so that they could be reused. Cc: Joerg Roedel Cc: Ashok Raj Cc: Lu Baolu Cc: Sohil Mehta Cc: David Woodhouse Cc: Jacob Pan Cc: Andy Shevchenko Reviewed-by: Lu Baolu Reviewed-by: Andy Shevchenko Signed-off-by: Sai Praneeth Prakhya Signed-off-by: Joerg Roedel --- include/linux/intel-iommu.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 6925a18a5ca3..4140726867a9 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -447,6 +447,12 @@ enum { #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) +extern int intel_iommu_sm; + +#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) +#define pasid_supported(iommu) (sm_supported(iommu) && \ + ecap_pasid((iommu)->ecap)) + struct pasid_entry; struct pasid_state_entry; struct page_req_dsc; -- cgit v1.2.3 From 7423e01741dd6a5f1255f589145313f0fb1c8cbe Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 25 May 2019 13:41:22 +0800 Subject: iommu: Add API to request DMA domain for device Normally during iommu probing a device, a default doamin will be allocated and attached to the device. The domain type of the default domain is statically defined, which results in a situation where the allocated default domain isn't suitable for the device due to some limitations. We already have API iommu_request_dm_for_dev() to replace a DMA domain with an identity one. This adds iommu_request_dma_domain_for_dev() to request a dma domain if an allocated identity domain isn't suitable for the device in question. Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a815cf6f6f47..91af22a344e2 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -362,6 +362,7 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain, extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern int iommu_request_dm_for_dev(struct device *dev); +extern int iommu_request_dma_domain_for_dev(struct device *dev); extern struct iommu_resv_region * iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, enum iommu_resv_type type); @@ -626,6 +627,11 @@ static inline int iommu_request_dm_for_dev(struct device *dev) return -ENODEV; } +static inline int iommu_request_dma_domain_for_dev(struct device *dev) +{ + return -ENODEV; +} + static inline int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { -- cgit v1.2.3 From 185da893fab1caa458c47f032a6f53717dbae2eb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 May 2019 09:29:26 +0200 Subject: iommu/dma: Cleanup dma-iommu.h No need for a __KERNEL__ guard outside uapi and add a missing comment describing the #else cpp statement. Last but not least include instead of the asm version, which is frowned upon. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy Signed-off-by: Joerg Roedel --- include/linux/dma-iommu.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 476e0c54de2d..dfb83f9c24dc 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -16,9 +16,8 @@ #ifndef __DMA_IOMMU_H #define __DMA_IOMMU_H -#ifdef __KERNEL__ +#include #include -#include #ifdef CONFIG_IOMMU_DMA #include @@ -86,7 +85,7 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); -#else +#else /* CONFIG_IOMMU_DMA */ struct iommu_domain; struct msi_desc; @@ -128,5 +127,4 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he } #endif /* CONFIG_IOMMU_DMA */ -#endif /* __KERNEL__ */ #endif /* __DMA_IOMMU_H */ -- cgit v1.2.3 From af751d4308a7c80434b5f40fd44288d33dc1962f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 May 2019 09:29:27 +0200 Subject: iommu/dma: Remove the flush_page callback We now have a arch_dma_prep_coherent architecture hook that is used for the generic DMA remap allocator, and we should use the same interface for the dma-iommu code. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy Acked-by: Catalin Marinas Signed-off-by: Joerg Roedel --- include/linux/dma-iommu.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index dfb83f9c24dc..e1ef265b578b 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -44,8 +44,7 @@ int dma_info_to_prot(enum dma_data_direction dir, bool coherent, * the arch code to take care of attributes and cache maintenance */ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, - unsigned long attrs, int prot, dma_addr_t *handle, - void (*flush_page)(struct device *, const void *, phys_addr_t)); + unsigned long attrs, int prot, dma_addr_t *handle); void iommu_dma_free(struct device *dev, struct page **pages, size_t size, dma_addr_t *handle); -- cgit v1.2.3 From 06d60728ff5c01795ac0bad66a5c42e3e78dcb6b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 May 2019 09:29:29 +0200 Subject: iommu/dma: move the arm64 wrappers to common code There is nothing really arm64 specific in the iommu_dma_ops implementation, so move it to dma-iommu.c and keep a lot of symbols self-contained. Note the implementation does depend on the DMA_DIRECT_REMAP infrastructure for now, so we'll have to make the DMA_IOMMU support depend on it, but this will be relaxed soon. Signed-off-by: Christoph Hellwig Acked-by: Robin Murphy Signed-off-by: Joerg Roedel --- include/linux/dma-iommu.h | 42 +++--------------------------------------- 1 file changed, 3 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index e1ef265b578b..b3cc3fb84079 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -24,49 +24,13 @@ #include #include -int iommu_dma_init(void); - /* Domain management interface for IOMMU drivers */ int iommu_get_dma_cookie(struct iommu_domain *domain); int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); void iommu_put_dma_cookie(struct iommu_domain *domain); /* Setup call for arch DMA mapping code */ -int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, - u64 size, struct device *dev); - -/* General helpers for DMA-API <-> IOMMU-API interaction */ -int dma_info_to_prot(enum dma_data_direction dir, bool coherent, - unsigned long attrs); - -/* - * These implement the bulk of the relevant DMA mapping callbacks, but require - * the arch code to take care of attributes and cache maintenance - */ -struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, - unsigned long attrs, int prot, dma_addr_t *handle); -void iommu_dma_free(struct device *dev, struct page **pages, size_t size, - dma_addr_t *handle); - -int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma); - -dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, int prot); -int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, int prot); - -/* - * Arch code with no special attribute handling may use these - * directly as DMA mapping callbacks for simplicity - */ -void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, - enum dma_data_direction dir, unsigned long attrs); -void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs); -dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, - size_t size, enum dma_data_direction dir, unsigned long attrs); -void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, unsigned long attrs); +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size); /* The DMA API isn't _quite_ the whole story, though... */ /* @@ -91,9 +55,9 @@ struct msi_desc; struct msi_msg; struct device; -static inline int iommu_dma_init(void) +static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, + u64 size) { - return 0; } static inline int iommu_get_dma_cookie(struct iommu_domain *domain) -- cgit v1.2.3 From a9f4d93dbeb6f5ccb50c6362ba944afe34cb8f12 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 May 2019 09:29:46 +0200 Subject: iommu/dma: Switch copyright boilerplace to SPDX Signed-off-by: Christoph Hellwig Acked-by: Robin Murphy Signed-off-by: Joerg Roedel --- include/linux/dma-iommu.h | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index b3cc3fb84079..05556f4d9cce 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014-2015 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . */ #ifndef __DMA_IOMMU_H #define __DMA_IOMMU_H -- cgit v1.2.3 From 4ec066c7b1476e0ca66a7acdb575627a5d1a1ee6 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 25 May 2019 13:41:33 +0800 Subject: iommu/vt-d: Cleanup get_valid_domain_for_dev() Previously, get_valid_domain_for_dev() is used to retrieve the DMA domain which has been attached to the device or allocate one if no domain has been attached yet. As we have delegated the DMA domain management to upper layer, this function is used purely to allocate a private DMA domain if the default domain doesn't work for ths device. Cleanup the code for readability. Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- include/linux/intel-iommu.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 4140726867a9..5b961c8ca64c 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -660,7 +660,6 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); -struct dmar_domain *get_valid_domain_for_dev(struct device *dev); void *alloc_pgtable_page(int node); void free_pgtable_page(void *vaddr); struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); -- cgit v1.2.3 From f34e1176df34b87e88eb65cd730255c913f92f8c Mon Sep 17 00:00:00 2001 From: Weitao Hou Date: Mon, 20 May 2019 13:09:48 +0800 Subject: iommu/vt-d: Fix typo in SVM code comment Fix 'acccess' to 'access'. Signed-off-by: Weitao Hou Signed-off-by: Joerg Roedel --- include/linux/intel-svm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index e3f76315ca4d..8dfead70699c 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -57,7 +57,7 @@ struct svm_dev_ops { /** * intel_svm_bind_mm() - Bind the current process to a PASID - * @dev: Device to be granted acccess + * @dev: Device to be granted access * @pasid: Address for allocated PASID * @flags: Flags. Later for requesting supervisor mode, etc. * @ops: Callbacks to device driver -- cgit v1.2.3 From ec6bc2e9e81b8805390851d7c7c907b0ed08b646 Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Mon, 3 Jun 2019 15:57:46 +0100 Subject: driver core: Add per device iommu param DMA faults can be detected by IOMMU at device level. Adding a pointer to struct device allows IOMMU subsystem to report relevant faults back to the device driver for further handling. For direct assigned device (or user space drivers), guest OS holds responsibility to handle and respond per device IOMMU fault. Therefore we need fault reporting mechanism to propagate faults beyond IOMMU subsystem. There are two other IOMMU data pointers under struct device today, here we introduce iommu_param as a parent pointer such that all device IOMMU data can be consolidated here. The idea was suggested here by Greg KH and Joerg. The name iommu_param is chosen here since iommu_data has been used. Suggested-by: Greg Kroah-Hartman Reviewed-by: Greg Kroah-Hartman Signed-off-by: Jacob Pan Link: https://lkml.org/lkml/2017/10/6/81 Signed-off-by: Joerg Roedel --- include/linux/device.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/device.h b/include/linux/device.h index e85264fb6616..f0a975abd6e9 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -42,6 +42,7 @@ struct iommu_ops; struct iommu_group; struct iommu_fwspec; struct dev_pin_info; +struct iommu_param; struct bus_attribute { struct attribute attr; @@ -959,6 +960,7 @@ struct dev_links_info { * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. * @iommu_fwspec: IOMMU-specific properties supplied by firmware. + * @iommu_param: Per device generic IOMMU runtime data * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). @@ -1052,6 +1054,7 @@ struct device { void (*release)(struct device *dev); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; + struct iommu_param *iommu_param; bool offline_disabled:1; bool offline:1; -- cgit v1.2.3 From 4e32348ba5269aac1165f496b78189201568dd8c Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Mon, 3 Jun 2019 15:57:47 +0100 Subject: iommu: Introduce device fault data Device faults detected by IOMMU can be reported outside the IOMMU subsystem for further processing. This patch introduces a generic device fault data structure. The fault can be either an unrecoverable fault or a page request, also referred to as a recoverable fault. We only care about non internal faults that are likely to be reported to an external subsystem. Signed-off-by: Jacob Pan Signed-off-by: Jean-Philippe Brucker Signed-off-by: Liu, Yi L Signed-off-by: Ashok Raj Signed-off-by: Eric Auger Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a815cf6f6f47..2b05056d5fa7 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -25,6 +25,7 @@ #include #include #include +#include #define IOMMU_READ (1 << 0) #define IOMMU_WRITE (1 << 1) @@ -49,6 +50,7 @@ struct device; struct iommu_domain; struct notifier_block; struct iommu_sva; +struct iommu_fault_event; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 @@ -58,6 +60,7 @@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, void *); +typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); struct iommu_domain_geometry { dma_addr_t aperture_start; /* First address that can be mapped */ @@ -301,6 +304,41 @@ struct iommu_device { struct device *dev; }; +/** + * struct iommu_fault_event - Generic fault event + * + * Can represent recoverable faults such as a page requests or + * unrecoverable faults such as DMA or IRQ remapping faults. + * + * @fault: fault descriptor + */ +struct iommu_fault_event { + struct iommu_fault fault; +}; + +/** + * struct iommu_fault_param - per-device IOMMU fault data + * @handler: Callback function to handle IOMMU faults at device level + * @data: handler private data + */ +struct iommu_fault_param { + iommu_dev_fault_handler_t handler; + void *data; +}; + +/** + * struct iommu_param - collection of per-device IOMMU data + * + * @fault_param: IOMMU detected device fault reporting data + * + * TODO: migrate other per device data pointers under iommu_dev_data, e.g. + * struct iommu_group *iommu_group; + * struct iommu_fwspec *iommu_fwspec; + */ +struct iommu_param { + struct iommu_fault_param *fault_param; +}; + int iommu_device_register(struct iommu_device *iommu); void iommu_device_unregister(struct iommu_device *iommu); int iommu_device_sysfs_add(struct iommu_device *iommu, @@ -504,6 +542,7 @@ struct iommu_ops {}; struct iommu_group {}; struct iommu_fwspec {}; struct iommu_device {}; +struct iommu_fault_param {}; static inline bool iommu_present(struct bus_type *bus) { -- cgit v1.2.3 From 0c830e6b32826311fc2b9ea1f4679be0f4ef0933 Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Mon, 3 Jun 2019 15:57:48 +0100 Subject: iommu: Introduce device fault report API Traditionally, device specific faults are detected and handled within their own device drivers. When IOMMU is enabled, faults such as DMA related transactions are detected by IOMMU. There is no generic reporting mechanism to report faults back to the in-kernel device driver or the guest OS in case of assigned devices. This patch introduces a registration API for device specific fault handlers. This differs from the existing iommu_set_fault_handler/ report_iommu_fault infrastructures in several ways: - it allows to report more sophisticated fault events (both unrecoverable faults and page request faults) due to the nature of the iommu_fault struct - it is device specific and not domain specific. The current iommu_report_device_fault() implementation only handles the "shoot and forget" unrecoverable fault case. Handling of page request faults or stalled faults will come later. Signed-off-by: Jacob Pan Signed-off-by: Ashok Raj Signed-off-by: Jean-Philippe Brucker Signed-off-by: Eric Auger Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2b05056d5fa7..3e783f5bf472 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -336,6 +336,7 @@ struct iommu_fault_param { * struct iommu_fwspec *iommu_fwspec; */ struct iommu_param { + struct mutex lock; struct iommu_fault_param *fault_param; }; @@ -428,6 +429,15 @@ extern int iommu_group_register_notifier(struct iommu_group *group, struct notifier_block *nb); extern int iommu_group_unregister_notifier(struct iommu_group *group, struct notifier_block *nb); +extern int iommu_register_device_fault_handler(struct device *dev, + iommu_dev_fault_handler_t handler, + void *data); + +extern int iommu_unregister_device_fault_handler(struct device *dev); + +extern int iommu_report_device_fault(struct device *dev, + struct iommu_fault_event *evt); + extern int iommu_group_id(struct iommu_group *group); extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); @@ -736,6 +746,25 @@ static inline int iommu_group_unregister_notifier(struct iommu_group *group, return 0; } +static inline +int iommu_register_device_fault_handler(struct device *dev, + iommu_dev_fault_handler_t handler, + void *data) +{ + return -ENODEV; +} + +static inline int iommu_unregister_device_fault_handler(struct device *dev) +{ + return 0; +} + +static inline +int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) +{ + return -ENODEV; +} + static inline int iommu_group_id(struct iommu_group *group) { return -ENODEV; -- cgit v1.2.3 From bf3255b3cfe2d06280340dbac3f44b65d3ee6da3 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Mon, 3 Jun 2019 15:57:49 +0100 Subject: iommu: Add recoverable fault reporting Some IOMMU hardware features, for example PCI PRI and Arm SMMU Stall, enable recoverable I/O page faults. Allow IOMMU drivers to report PRI Page Requests and Stall events through the new fault reporting API. The consumer of the fault can be either an I/O page fault handler in the host, or a guest OS. Once handled, the fault must be completed by sending a page response back to the IOMMU. Add an iommu_page_response() function to complete a page fault. There are two ways to extend the userspace API: * Add a field to iommu_page_response and a flag to iommu_page_response::flags describing the validity of this field. * Introduce a new iommu_page_response_X structure with a different version number. The kernel must then support both versions. Signed-off-by: Jacob Pan Signed-off-by: Jean-Philippe Brucker Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 3e783f5bf472..76c8cda61dfd 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -227,6 +227,7 @@ struct iommu_sva_ops { * @sva_bind: Bind process address space to device * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle + * @page_response: handle page request response * @pgsize_bitmap: bitmap of all possible supported page sizes */ struct iommu_ops { @@ -287,6 +288,10 @@ struct iommu_ops { void (*sva_unbind)(struct iommu_sva *handle); int (*sva_get_pasid)(struct iommu_sva *handle); + int (*page_response)(struct device *dev, + struct iommu_fault_event *evt, + struct iommu_page_response *msg); + unsigned long pgsize_bitmap; }; @@ -311,19 +316,25 @@ struct iommu_device { * unrecoverable faults such as DMA or IRQ remapping faults. * * @fault: fault descriptor + * @list: pending fault event list, used for tracking responses */ struct iommu_fault_event { struct iommu_fault fault; + struct list_head list; }; /** * struct iommu_fault_param - per-device IOMMU fault data * @handler: Callback function to handle IOMMU faults at device level * @data: handler private data + * @faults: holds the pending faults which needs response + * @lock: protect pending faults list */ struct iommu_fault_param { iommu_dev_fault_handler_t handler; void *data; + struct list_head faults; + struct mutex lock; }; /** @@ -437,6 +448,8 @@ extern int iommu_unregister_device_fault_handler(struct device *dev); extern int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt); +extern int iommu_page_response(struct device *dev, + struct iommu_page_response *msg); extern int iommu_group_id(struct iommu_group *group); extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); @@ -765,6 +778,12 @@ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) return -ENODEV; } +static inline int iommu_page_response(struct device *dev, + struct iommu_page_response *msg) +{ + return -ENODEV; +} + static inline int iommu_group_id(struct iommu_group *group) { return -ENODEV; -- cgit v1.2.3 From adfd373820906d376c8b643f1a279ac809605b6b Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Mon, 3 Jun 2019 08:53:35 +0200 Subject: iommu: Introduce IOMMU_RESV_DIRECT_RELAXABLE reserved memory regions Introduce a new type for reserved region. This corresponds to directly mapped regions which are known to be relaxable in some specific conditions, such as device assignment use case. Well known examples are those used by USB controllers providing PS/2 keyboard emulation for pre-boot BIOS and early BOOT or RMRRs associated to IGD working in legacy mode. Since commit c875d2c1b808 ("iommu/vt-d: Exclude devices using RMRRs from IOMMU API domains") and commit 18436afdc11a ("iommu/vt-d: Allow RMRR on graphics devices too"), those regions are currently considered "safe" with respect to device assignment use case which requires a non direct mapping at IOMMU physical level (RAM GPA -> HPA mapping). Those RMRRs currently exist and sometimes the device is attempting to access it but this has not been considered an issue until now. However at the moment, iommu_get_group_resv_regions() is not able to make any difference between directly mapped regions: those which must be absolutely enforced and those like above ones which are known as relaxable. This is a blocker for reporting severe conflicts between non relaxable RMRRs (like MSI doorbells) and guest GPA space. With this new reserved region type we will be able to use iommu_get_group_resv_regions() to enumerate the IOVA space that is usable through the IOMMU API without introducing regressions with respect to existing device assignment use cases (USB and IGD). Signed-off-by: Eric Auger Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 91af22a344e2..ab7a1c85af75 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -135,6 +135,12 @@ enum iommu_attr { enum iommu_resv_type { /* Memory regions which must be mapped 1:1 at all times */ IOMMU_RESV_DIRECT, + /* + * Memory regions which are advertised to be 1:1 but are + * commonly considered relaxable in some conditions, + * for instance in device assignment use case (USB, Graphics) + */ + IOMMU_RESV_DIRECT_RELAXABLE, /* Arbitrary "never map this or give it to a device" address ranges */ IOMMU_RESV_RESERVED, /* Hardware MSI region (untranslated) */ -- cgit v1.2.3 From 90ec7a76cc4ba65bfedeb8621cba09cd5a317d8f Mon Sep 17 00:00:00 2001 From: Vivek Gautam Date: Thu, 16 May 2019 15:00:20 +0530 Subject: iommu/io-pgtable-arm: Add support to use system cache Few Qualcomm platforms such as, sdm845 have an additional outer cache called as System cache, aka. Last level cache (LLC) that allows non-coherent devices to upgrade to using caching. This cache sits right before the DDR, and is tightly coupled with the memory controller. The clients using this cache request their slices from this system cache, make it active, and can then start using it. There is a fundamental assumption that non-coherent devices can't access caches. This change adds an exception where they *can* use some level of cache despite still being non-coherent overall. The coherent devices that use cacheable memory, and CPU make use of this system cache by default. Looking at memory types, we have following - a) Normal uncached :- MAIR 0x44, inner non-cacheable, outer non-cacheable; b) Normal cached :- MAIR 0xff, inner read write-back non-transient, outer read write-back non-transient; attribute setting for coherenet I/O devices. and, for non-coherent i/o devices that can allocate in system cache another type gets added - c) Normal sys-cached :- MAIR 0xf4, inner non-cacheable, outer read write-back non-transient Coherent I/O devices use system cache by marking the memory as normal cached. Non-coherent I/O devices should mark the memory as normal sys-cached in page tables to use system cache. Acked-by: Robin Murphy Signed-off-by: Vivek Gautam Signed-off-by: Will Deacon --- include/linux/iommu.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a815cf6f6f47..8ee3fbaf5855 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -41,6 +41,12 @@ * if the IOMMU page table format is equivalent. */ #define IOMMU_PRIV (1 << 5) +/* + * Non-coherent masters on few Qualcomm SoCs can use this page protection flag + * to set correct cacheability attributes to use an outer level of cache - + * last level cache, aka system cache. + */ +#define IOMMU_QCOM_SYS_CACHE (1 << 6) struct iommu_ops; struct iommu_group; -- cgit v1.2.3 From 4f41845b340783eaec9cc2840fe3cb9a00574054 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 25 Jun 2019 12:51:25 +0100 Subject: iommu/io-pgtable: Replace IO_PGTABLE_QUIRK_NO_DMA with specific flag IO_PGTABLE_QUIRK_NO_DMA is a bit of a misnomer, since it's really just an indication of whether or not the page-table walker for the IOMMU is coherent with the CPU caches. Since cache coherency is more than just a quirk, replace the flag with its own field in the io_pgtable_cfg structure. Cc: Bjorn Andersson Signed-off-by: Will Deacon --- include/linux/io-pgtable.h | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 76969a564831..b5a450a3bb47 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -44,6 +44,8 @@ struct iommu_gather_ops { * tables. * @ias: Input address (iova) size, in bits. * @oas: Output address (paddr) size, in bits. + * @coherent_walk A flag to indicate whether or not page table walks made + * by the IOMMU are coherent with the CPU caches. * @tlb: TLB management callbacks for this set of tables. * @iommu_dev: The device representing the DMA configuration for the * page table walker. @@ -68,11 +70,6 @@ struct io_pgtable_cfg { * when the SoC is in "4GB mode" and they can only access the high * remap of DRAM (0x1_00000000 to 0x1_ffffffff). * - * IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever - * be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a - * software-emulated IOMMU), such that pagetable updates need not - * be treated as explicit DMA data. - * * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs * on unmap, for DMA domains using the flush queue mechanism for * delayed invalidation. @@ -81,12 +78,12 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) - #define IO_PGTABLE_QUIRK_NO_DMA BIT(4) - #define IO_PGTABLE_QUIRK_NON_STRICT BIT(5) + #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; unsigned int oas; + bool coherent_walk; const struct iommu_gather_ops *tlb; struct device *iommu_dev; -- cgit v1.2.3