diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-11-10 11:09:07 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-11-10 11:09:07 -0800 |
| commit | 391ce5b9c46ebf23cd049bb552a899dfc0cfb838 (patch) | |
| tree | 14ad276b3b32fad2f9ed94ab402950f2b0457136 /kernel/dma/direct.c | |
| parent | ead3b62a34d829d2df38187b93a18c22b289bd9c (diff) | |
| parent | 53c87e846e335e3c18044c397cc35178163d7827 (diff) | |
Merge tag 'dma-mapping-6.7-2023-11-10' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig:
- don't leave pages decrypted for DMA in encrypted memory setups linger
around on failure (Petr Tesarik)
- fix an out of bounds access in the new dynamic swiotlb code (Petr
Tesarik)
- fix dma_addressing_limited for systems with weird physical memory
layouts (Jia He)
* tag 'dma-mapping-6.7-2023-11-10' of git://git.infradead.org/users/hch/dma-mapping:
swiotlb: fix out-of-bounds TLB allocations with CONFIG_SWIOTLB_DYNAMIC
dma-mapping: fix dma_addressing_limited() if dma_range_map can't cover all system RAM
dma-mapping: move dma_addressing_limited() out of line
swiotlb: do not free decrypted pages if dynamic
Diffstat (limited to 'kernel/dma/direct.c')
| -rw-r--r-- | kernel/dma/direct.c | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index ed3056eb20b8..73c95815789a 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -587,6 +587,46 @@ int dma_direct_supported(struct device *dev, u64 mask) return mask >= phys_to_dma_unencrypted(dev, min_mask); } +/* + * To check whether all ram resource ranges are covered by dma range map + * Returns 0 when further check is needed + * Returns 1 if there is some RAM range can't be covered by dma_range_map + */ +static int check_ram_in_range_map(unsigned long start_pfn, + unsigned long nr_pages, void *data) +{ + unsigned long end_pfn = start_pfn + nr_pages; + const struct bus_dma_region *bdr = NULL; + const struct bus_dma_region *m; + struct device *dev = data; + + while (start_pfn < end_pfn) { + for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) { + unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start); + + if (start_pfn >= cpu_start_pfn && + start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) { + bdr = m; + break; + } + } + if (!bdr) + return 1; + + start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size); + } + + return 0; +} + +bool dma_direct_all_ram_mapped(struct device *dev) +{ + if (!dev->dma_range_map) + return true; + return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev, + check_ram_in_range_map); +} + size_t dma_direct_max_mapping_size(struct device *dev) { /* If SWIOTLB is active, use its maximum mapping size */ |
