From 94aedac49d92b22995d7b9092c6551b8b9924320 Mon Sep 17 00:00:00 2001 From: Dafna Hirschfeld Date: Thu, 4 Nov 2021 09:16:20 +0200 Subject: iommu: Log iova range in map/unmap trace events In case of an iommu page fault, the faulting iova is logged in trace_io_page_fault. It is therefore convenient to log the iova range in mapping/unmapping trace events so that it is easier to see if the faulting iova was recently in any of those ranges. Signed-off-by: Dafna Hirschfeld Link: https://lore.kernel.org/r/20211104071620.27290-1-dafna.hirschfeld@collabora.com Signed-off-by: Joerg Roedel --- include/trace/events/iommu.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h index 72b4582322ff..29096fe12623 100644 --- a/include/trace/events/iommu.h +++ b/include/trace/events/iommu.h @@ -101,8 +101,9 @@ TRACE_EVENT(map, __entry->size = size; ), - TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu", - __entry->iova, __entry->paddr, __entry->size + TP_printk("IOMMU: iova=0x%016llx - 0x%016llx paddr=0x%016llx size=%zu", + __entry->iova, __entry->iova + __entry->size, __entry->paddr, + __entry->size ) ); @@ -124,8 +125,9 @@ TRACE_EVENT(unmap, __entry->unmapped_size = unmapped_size; ), - TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu", - __entry->iova, __entry->size, __entry->unmapped_size + TP_printk("IOMMU: iova=0x%016llx - 0x%016llx size=%zu unmapped_size=%zu", + __entry->iova, __entry->iova + __entry->size, + __entry->size, __entry->unmapped_size ) ); -- cgit v1.2.3 From 063ebb19d962b45a1b505748d464bd12b5074797 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Wed, 1 Dec 2021 17:33:21 +0000 Subject: iommu/virtio: Add definitions for VIRTIO_IOMMU_F_BYPASS_CONFIG Add definitions for the VIRTIO_IOMMU_F_BYPASS_CONFIG, which supersedes VIRTIO_IOMMU_F_BYPASS. Reviewed-by: Kevin Tian Signed-off-by: Jean-Philippe Brucker Reviewed-by: Eric Auger Link: https://lore.kernel.org/r/20211201173323.1045819-2-jean-philippe@linaro.org Signed-off-by: Joerg Roedel --- include/uapi/linux/virtio_iommu.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h index 237e36a280cb..1ff357f0d72e 100644 --- a/include/uapi/linux/virtio_iommu.h +++ b/include/uapi/linux/virtio_iommu.h @@ -16,6 +16,7 @@ #define VIRTIO_IOMMU_F_BYPASS 3 #define VIRTIO_IOMMU_F_PROBE 4 #define VIRTIO_IOMMU_F_MMIO 5 +#define VIRTIO_IOMMU_F_BYPASS_CONFIG 6 struct virtio_iommu_range_64 { __le64 start; @@ -36,6 +37,8 @@ struct virtio_iommu_config { struct virtio_iommu_range_32 domain_range; /* Probe buffer size */ __le32 probe_size; + __u8 bypass; + __u8 reserved[3]; }; /* Request types */ @@ -66,11 +69,14 @@ struct virtio_iommu_req_tail { __u8 reserved[3]; }; +#define VIRTIO_IOMMU_ATTACH_F_BYPASS (1 << 0) + struct virtio_iommu_req_attach { struct virtio_iommu_req_head head; __le32 domain; __le32 endpoint; - __u8 reserved[8]; + __le32 flags; + __u8 reserved[4]; struct virtio_iommu_req_tail tail; }; -- cgit v1.2.3 From 9dfa5b6f5efb85efe69fd3b7b0b912004d9547f1 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Thu, 16 Dec 2021 09:17:03 +0800 Subject: iommu/vt-d: Remove unused macros These macros has no reference in the tree anymore. Cleanup them. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211216011703.763331-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- include/linux/intel-svm.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include') diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 57cceecbe37f..1b73bab7eeff 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -8,12 +8,6 @@ #ifndef __INTEL_SVM_H__ #define __INTEL_SVM_H__ -/* Values for rxwp in fault_cb callback */ -#define SVM_REQ_READ (1<<3) -#define SVM_REQ_WRITE (1<<2) -#define SVM_REQ_EXEC (1<<1) -#define SVM_REQ_PRIV (1<<0) - /* Page Request Queue depth */ #define PRQ_ORDER 2 #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) -- cgit v1.2.3 From d5c383f2c98ac58c210b266cdaf7b86bc32d1ad1 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 17 Dec 2021 15:30:56 +0000 Subject: iommu/iova: Squash entry_dtor abstraction All flush queues are driven by iommu-dma now, so there is no need to abstract entry_dtor or its data any more. Squash the now-canonical implementation directly into the IOVA code to get it out of the way. Reviewed-by: John Garry Reviewed-by: Christoph Hellwig Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/2260f8de00ab5e0f9d2a1cf8978e6ae7cd4f182c.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- include/linux/iova.h | 26 +++----------------------- 1 file changed, 3 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/iova.h b/include/linux/iova.h index 71d8a2de6635..e746d8e41449 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -40,9 +40,6 @@ struct iova_domain; /* Call-Back from IOVA code into IOMMU drivers */ typedef void (* iova_flush_cb)(struct iova_domain *domain); -/* Destructor for per-entry data */ -typedef void (* iova_entry_dtor)(unsigned long data); - /* Number of entries per Flush Queue */ #define IOVA_FQ_SIZE 256 @@ -53,7 +50,7 @@ typedef void (* iova_entry_dtor)(unsigned long data); struct iova_fq_entry { unsigned long iova_pfn; unsigned long pages; - unsigned long data; + struct page *freelist; u64 counter; /* Flush counter when this entrie was added */ }; @@ -88,9 +85,6 @@ struct iova_domain { iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU TLBs */ - iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for - iova entry */ - struct timer_list fq_timer; /* Timer to regularily empty the flush-queues */ atomic_t fq_timer_on; /* 1 when timer is active, 0 @@ -146,15 +140,14 @@ void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size); void queue_iova(struct iova_domain *iovad, unsigned long pfn, unsigned long pages, - unsigned long data); + struct page *freelist); unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool flush_rcache); struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); -int init_iova_flush_queue(struct iova_domain *iovad, - iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); +int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); #else @@ -189,12 +182,6 @@ static inline void free_iova_fast(struct iova_domain *iovad, { } -static inline void queue_iova(struct iova_domain *iovad, - unsigned long pfn, unsigned long pages, - unsigned long data) -{ -} - static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, @@ -216,13 +203,6 @@ static inline void init_iova_domain(struct iova_domain *iovad, { } -static inline int init_iova_flush_queue(struct iova_domain *iovad, - iova_flush_cb flush_cb, - iova_entry_dtor entry_dtor) -{ - return -ENODEV; -} - static inline struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) { -- cgit v1.2.3 From 649ad9835a3783bcb6c69368fa939e0010abb2c6 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 17 Dec 2021 15:30:57 +0000 Subject: iommu/iova: Squash flush_cb abstraction Once again, with iommu-dma now being the only flush queue user, we no longer need the extra level of indirection through flush_cb. Squash that and let the flush queue code call the domain method directly. This does mean temporarily having to carry an additional copy of the IOMMU domain pointer around instead, but only until a later patch untangles it again. Reviewed-by: John Garry Reviewed-by: Christoph Hellwig Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/e3f9b4acdd6640012ef4fbc819ac868d727b64a9.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- include/linux/iova.h | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/iova.h b/include/linux/iova.h index e746d8e41449..99be4fcea4f3 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -14,6 +14,7 @@ #include #include #include +#include /* iova structure */ struct iova { @@ -35,11 +36,6 @@ struct iova_rcache { struct iova_cpu_rcache __percpu *cpu_rcaches; }; -struct iova_domain; - -/* Call-Back from IOVA code into IOMMU drivers */ -typedef void (* iova_flush_cb)(struct iova_domain *domain); - /* Number of entries per Flush Queue */ #define IOVA_FQ_SIZE 256 @@ -82,8 +78,7 @@ struct iova_domain { struct iova anchor; /* rbtree lookup anchor */ struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ - iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU - TLBs */ + struct iommu_domain *fq_domain; struct timer_list fq_timer; /* Timer to regularily empty the flush-queues */ @@ -147,7 +142,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); -int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb); +int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); #else -- cgit v1.2.3 From 87f60cc65d24939353b40aa1d9297fea080cdf8d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 17 Dec 2021 15:31:00 +0000 Subject: iommu/vt-d: Use put_pages_list page->freelist is for the use of slab. We already have the ability to free a list of pages in the core mm, but it requires the use of a list_head and for the pages to be chained together through page->lru. Switch the Intel IOMMU and IOVA code over to using free_pages_list(). Signed-off-by: Matthew Wilcox (Oracle) [rm: split from original patch, cosmetic tweaks, fix fq entries] Signed-off-by: Robin Murphy Reviewed-by: Lu Baolu Link: https://lore.kernel.org/r/2115b560d9a0ce7cd4b948bd51a2b7bde8fdfd59.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 3 ++- include/linux/iova.h | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index d2f3435e7d17..de0c57a567c8 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -186,7 +186,7 @@ struct iommu_iotlb_gather { unsigned long start; unsigned long end; size_t pgsize; - struct page *freelist; + struct list_head freelist; bool queued; }; @@ -399,6 +399,7 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) { *gather = (struct iommu_iotlb_gather) { .start = ULONG_MAX, + .freelist = LIST_HEAD_INIT(gather->freelist), }; } diff --git a/include/linux/iova.h b/include/linux/iova.h index 99be4fcea4f3..072a09c06e8a 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -46,7 +46,7 @@ struct iova_rcache { struct iova_fq_entry { unsigned long iova_pfn; unsigned long pages; - struct page *freelist; + struct list_head freelist; u64 counter; /* Flush counter when this entrie was added */ }; @@ -135,7 +135,7 @@ void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size); void queue_iova(struct iova_domain *iovad, unsigned long pfn, unsigned long pages, - struct page *freelist); + struct list_head *freelist); unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool flush_rcache); struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, -- cgit v1.2.3 From a17e3026bc4da9135ca9a42ec0b1fa67f95172e3 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 17 Dec 2021 15:31:03 +0000 Subject: iommu: Move flush queue data into iommu_dma_cookie Complete the move into iommu-dma by refactoring the flush queues themselves to belong to the DMA cookie rather than the IOVA domain. The refactoring may as well extend to some minor cosmetic aspects too, to help us stay one step ahead of the style police. Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/24304722005bc6f144e2a1fdd865d1465722fc2e.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- include/linux/iova.h | 44 +------------------------------------------- 1 file changed, 1 insertion(+), 43 deletions(-) (limited to 'include') diff --git a/include/linux/iova.h b/include/linux/iova.h index 072a09c06e8a..0abd48c5e622 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -12,9 +12,6 @@ #include #include #include -#include -#include -#include /* iova structure */ struct iova { @@ -36,27 +33,6 @@ struct iova_rcache { struct iova_cpu_rcache __percpu *cpu_rcaches; }; -/* Number of entries per Flush Queue */ -#define IOVA_FQ_SIZE 256 - -/* Timeout (in ms) after which entries are flushed from the Flush-Queue */ -#define IOVA_FQ_TIMEOUT 10 - -/* Flush Queue entry for defered flushing */ -struct iova_fq_entry { - unsigned long iova_pfn; - unsigned long pages; - struct list_head freelist; - u64 counter; /* Flush counter when this entrie was added */ -}; - -/* Per-CPU Flush Queue structure */ -struct iova_fq { - struct iova_fq_entry entries[IOVA_FQ_SIZE]; - unsigned head, tail; - spinlock_t lock; -}; - /* holds all the iova translations for a domain */ struct iova_domain { spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ @@ -67,23 +43,9 @@ struct iova_domain { unsigned long start_pfn; /* Lower limit for this domain */ unsigned long dma_32bit_pfn; unsigned long max32_alloc_size; /* Size of last failed allocation */ - struct iova_fq __percpu *fq; /* Flush Queue */ - - atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that - have been started */ - - atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that - have been finished */ - struct iova anchor; /* rbtree lookup anchor */ - struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ - - struct iommu_domain *fq_domain; - struct timer_list fq_timer; /* Timer to regularily empty the - flush-queues */ - atomic_t fq_timer_on; /* 1 when timer is active, 0 - when not */ + struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ struct hlist_node cpuhp_dead; }; @@ -133,16 +95,12 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, bool size_aligned); void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size); -void queue_iova(struct iova_domain *iovad, - unsigned long pfn, unsigned long pages, - struct list_head *freelist); unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool flush_rcache); struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); -int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); #else -- cgit v1.2.3 From aade40b62745cf0b4e8a17d43652c5faff354e6b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 20 Dec 2021 13:34:48 +0100 Subject: iommu/iova: Temporarily include dma-mapping.h from iova.h Some users of iova.h still expect that dma-mapping.h is also included. Re-add the include until these users are updated to fix compile failures in the iommu tree. Acked-by: Robin Murphy Link: https://lore.kernel.org/r/20211220123448.19996-1-joro@8bytes.org Signed-off-by: Joerg Roedel --- include/linux/iova.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/iova.h b/include/linux/iova.h index 0abd48c5e622..cea79cb9f26c 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -12,6 +12,7 @@ #include #include #include +#include /* iova structure */ struct iova { -- cgit v1.2.3