From 1ecd01d77c9bf5517617862d87b817804b67c771 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 10 May 2022 13:17:32 -0400 Subject: dma-debug: change allocation mode from GFP_NOWAIT to GFP_ATIOMIC [ Upstream commit 84bc4f1dbbbb5f8aa68706a96711dccb28b518e5 ] We observed the error "cacheline tracking ENOMEM, dma-debug disabled" during a light system load (copying some files). The reason for this error is that the dma_active_cacheline radix tree uses GFP_NOWAIT allocation - so it can't access the emergency memory reserves and it fails as soon as anybody reaches the watermark. This patch changes GFP_NOWAIT to GFP_ATOMIC, so that it can access the emergency memory reserves. Signed-off-by: Mikulas Patocka Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- kernel/dma/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/dma') diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index f8ae54679865..ee7da1f2462f 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -448,7 +448,7 @@ void debug_dma_dump_mappings(struct device *dev) * other hand, consumes a single dma_debug_entry, but inserts 'nents' * entries into the tree. */ -static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); +static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC); static DEFINE_SPINLOCK(radix_lock); #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) -- cgit v1.2.3 From d7be05aff27278c89c5c2d5518e235a315ce447a Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 1 Jun 2022 07:51:16 -0700 Subject: dma-debug: make things less spammy under memory pressure [ Upstream commit e19f8fa6ce1ca9b8b934ba7d2e8f34c95abc6e60 ] Limit the error msg to avoid flooding the console. If you have a lot of threads hitting this at once, they could have already gotten passed the dma_debug_disabled() check before they get to the point of allocation failure, resulting in quite a lot of this error message spamming the log. Use pr_err_once() to limit that. Signed-off-by: Rob Clark Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- kernel/dma/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/dma') diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index ee7da1f2462f..ae9fc1ee6d20 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -564,7 +564,7 @@ static void add_dma_entry(struct dma_debug_entry *entry) rc = active_cacheline_insert(entry); if (rc == -ENOMEM) { - pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); + pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); global_disable = true; } -- cgit v1.2.3 From 73bc8a5e8e3a902d8cc9b2f42505f647ca48fac2 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 20 May 2022 18:10:13 +0100 Subject: dma-direct: don't over-decrypt memory commit 4a37f3dd9a83186cb88d44808ab35b78375082c9 upstream. The original x86 sev_alloc() only called set_memory_decrypted() on memory returned by alloc_pages_node(), so the page order calculation fell out of that logic. However, the common dma-direct code has several potential allocators, not all of which are guaranteed to round up the underlying allocation to a power-of-two size, so carrying over that calculation for the encryption/decryption size was a mistake. Fix it by rounding to a *number* of pages, rather than an order. Until recently there was an even worse interaction with DMA_DIRECT_REMAP where we could have ended up decrypting part of the next adjacent vmalloc area, only averted by no architecture actually supporting both configs at once. Don't ask how I found that one out... Fixes: c10f07aa27da ("dma/direct: Handle force decryption for DMA coherent buffers in common code") Signed-off-by: Robin Murphy Signed-off-by: Christoph Hellwig Acked-by: David Rientjes [ backport the functional change without all the prior refactoring ] Signed-off-by: Robin Murphy Signed-off-by: Greg Kroah-Hartman --- kernel/dma/direct.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'kernel/dma') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 06c111544f61..2922250f93b4 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -188,7 +188,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, goto out_free_pages; if (force_dma_unencrypted(dev)) { err = set_memory_decrypted((unsigned long)ret, - 1 << get_order(size)); + PFN_UP(size)); if (err) goto out_free_pages; } @@ -210,7 +210,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, ret = page_address(page); if (force_dma_unencrypted(dev)) { err = set_memory_decrypted((unsigned long)ret, - 1 << get_order(size)); + PFN_UP(size)); if (err) goto out_free_pages; } @@ -231,7 +231,7 @@ done: out_encrypt_pages: if (force_dma_unencrypted(dev)) { err = set_memory_encrypted((unsigned long)page_address(page), - 1 << get_order(size)); + PFN_UP(size)); /* If memory cannot be re-encrypted, it must be leaked */ if (err) return NULL; @@ -244,8 +244,6 @@ out_free_pages: void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - unsigned int page_order = get_order(size); - if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && !force_dma_unencrypted(dev)) { /* cpu_addr is a struct page cookie, not a kernel address */ @@ -266,7 +264,7 @@ void dma_direct_free(struct device *dev, size_t size, return; if (force_dma_unencrypted(dev)) - set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); + set_memory_encrypted((unsigned long)cpu_addr, PFN_UP(size)); if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) vunmap(cpu_addr); @@ -302,8 +300,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, ret = page_address(page); if (force_dma_unencrypted(dev)) { - if (set_memory_decrypted((unsigned long)ret, - 1 << get_order(size))) + if (set_memory_decrypted((unsigned long)ret, PFN_UP(size))) goto out_free_pages; } memset(ret, 0, size); @@ -318,7 +315,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_addr, enum dma_data_direction dir) { - unsigned int page_order = get_order(size); void *vaddr = page_address(page); /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ @@ -327,7 +323,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, return; if (force_dma_unencrypted(dev)) - set_memory_encrypted((unsigned long)vaddr, 1 << page_order); + set_memory_encrypted((unsigned long)vaddr, PFN_UP(size)); dma_free_contiguous(dev, page, size); } -- cgit v1.2.3