From 5857f70c8a62377c2304d8ad27e579881728fc5a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 4 Mar 2013 14:32:54 -0800 Subject: idr: fix new kernel-doc warnings Fix new kernel-doc warnings in idr: Warning(include/linux/idr.h:113): No description found for parameter 'idr' Warning(include/linux/idr.h:113): Excess function parameter 'idp' description in 'idr_find' Warning(lib/idr.c:232): Excess function parameter 'id' description in 'sub_alloc' Warning(lib/idr.c:232): Excess function parameter 'id' description in 'sub_alloc' Signed-off-by: Randy Dunlap Acked-by: Tejun Heo Signed-off-by: Linus Torvalds --- lib/idr.c | 1 - 1 file changed, 1 deletion(-) (limited to 'lib') diff --git a/lib/idr.c b/lib/idr.c index 00739aaf95a2..4f82a284c6a2 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -214,7 +214,6 @@ EXPORT_SYMBOL(idr_pre_get); * sub_alloc - try to allocate an id without growing the tree depth * @idp: idr handle * @starting_id: id to start search at - * @id: pointer to the allocated handle * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer * @gfp_mask: allocation mask for idr_layer_alloc() * @layer_idr: optional idr passed to idr_layer_alloc() -- cgit v1.2.3 From c8615d3716fe327c2540cf514a34b227dc9b39e8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 13 Mar 2013 14:59:42 -0700 Subject: idr: deprecate idr_pre_get() and idr_get_new[_above]() Now that all in-kernel users are converted to ues the new alloc interface, mark the old interface deprecated. We should be able to remove these in a few releases. Signed-off-by: Tejun Heo Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/idr.c | 41 +++++------------------------------------ 1 file changed, 5 insertions(+), 36 deletions(-) (limited to 'lib') diff --git a/lib/idr.c b/lib/idr.c index 4f82a284c6a2..c6fb8295507b 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -184,20 +184,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) } } -/** - * idr_pre_get - reserve resources for idr allocation - * @idp: idr handle - * @gfp_mask: memory allocation flags - * - * This function should be called prior to calling the idr_get_new* functions. - * It preallocates enough memory to satisfy the worst possible allocation. The - * caller should pass in GFP_KERNEL if possible. This of course requires that - * no spinning locks be held. - * - * If the system is REALLY out of memory this function returns %0, - * otherwise %1. - */ -int idr_pre_get(struct idr *idp, gfp_t gfp_mask) +int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) { while (idp->id_free_cnt < MAX_IDR_FREE) { struct idr_layer *new; @@ -208,7 +195,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) } return 1; } -EXPORT_SYMBOL(idr_pre_get); +EXPORT_SYMBOL(__idr_pre_get); /** * sub_alloc - try to allocate an id without growing the tree depth @@ -375,25 +362,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id, idr_mark_full(pa, id); } -/** - * idr_get_new_above - allocate new idr entry above or equal to a start id - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @starting_id: id to start search at - * @id: pointer to the allocated handle - * - * This is the allocate id function. It should be called with any - * required locks. - * - * If allocation from IDR's private freelist fails, idr_get_new_above() will - * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill - * IDR's preallocation and then retry the idr_get_new_above() call. - * - * If the idr is full idr_get_new_above() will return %-ENOSPC. - * - * @id returns a value in the range @starting_id ... %0x7fffffff - */ -int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) +int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) { struct idr_layer *pa[MAX_IDR_LEVEL + 1]; int rv; @@ -406,7 +375,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) *id = rv; return 0; } -EXPORT_SYMBOL(idr_get_new_above); +EXPORT_SYMBOL(__idr_get_new_above); /** * idr_preload - preload for idr_alloc() @@ -907,7 +876,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) int ida_pre_get(struct ida *ida, gfp_t gfp_mask) { /* allocate idr_layers */ - if (!idr_pre_get(&ida->idr, gfp_mask)) + if (!__idr_pre_get(&ida->idr, gfp_mask)) return 0; /* allocate free_bitmap */ -- cgit v1.2.3 From 97da55fcec6e3898f50010a6847dfa64f7c085e6 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Wed, 13 Mar 2013 14:59:44 -0700 Subject: decompressors: fix typo "POWERPC" Commit 5dc49c75a26b ("decompressors: make the default XZ_DEC_* config match the selected architecture") added default y if POWERPC to lib/xz/Kconfig. But there is no Kconfig symbol POWERPC. The most general Kconfig symbol for the powerpc architecture is PPC. So let's use that. Signed-off-by: Paul Bolle Cc: Florian Fainelli Cc: Lasse Collin Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/xz/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig index 82a04d7ba99e..08837db52d94 100644 --- a/lib/xz/Kconfig +++ b/lib/xz/Kconfig @@ -15,7 +15,7 @@ config XZ_DEC_X86 config XZ_DEC_POWERPC bool "PowerPC BCJ filter decoder" - default y if POWERPC + default y if PPC select XZ_DEC_BCJ config XZ_DEC_IA64 -- cgit v1.2.3 From 59bfbcf01967d4d3370a2b8294673dd709e732cc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 13 Mar 2013 14:59:49 -0700 Subject: idr: idr_alloc() shouldn't trigger lowmem warning when preloaded GFP_NOIO is often used for idr_alloc() inside preloaded section as the allocation mask doesn't really matter. If the idr tree needs to be expanded, idr_alloc() first tries to allocate using the specified allocation mask and if it fails falls back to the preloaded buffer. This order prevent non-preloading idr_alloc() users from taking advantage of preloading ones by using preload buffer without filling it shifting the burden of allocation to the preload users. Unfortunately, this allowed/expected-to-fail kmem_cache allocation ends up generating spurious slab lowmem warning before succeeding the request from the preload buffer. This patch makes idr_layer_alloc() add __GFP_NOWARN to the first kmem_cache attempt and try kmem_cache again w/o __GFP_NOWARN after allocation from preload_buffer fails so that lowmem warning is generated if not suppressed by the original @gfp_mask. Signed-off-by: Tejun Heo Reported-by: David Teigland Tested-by: David Teigland Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/idr.c | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) (limited to 'lib') diff --git a/lib/idr.c b/lib/idr.c index c6fb8295507b..322e2816f2fb 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) if (layer_idr) return get_from_free_list(layer_idr); - /* try to allocate directly from kmem_cache */ - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); + /* + * Try to allocate directly from kmem_cache. We want to try this + * before preload buffer; otherwise, non-preloading idr_alloc() + * users will end up taking advantage of preloading ones. As the + * following is allowed to fail for preloaded cases, suppress + * warning this time. + */ + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); if (new) return new; @@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) * Try to fetch one from the per-cpu preload buffer if in process * context. See idr_preload() for details. */ - if (in_interrupt()) - return NULL; - - preempt_disable(); - new = __this_cpu_read(idr_preload_head); - if (new) { - __this_cpu_write(idr_preload_head, new->ary[0]); - __this_cpu_dec(idr_preload_cnt); - new->ary[0] = NULL; + if (!in_interrupt()) { + preempt_disable(); + new = __this_cpu_read(idr_preload_head); + if (new) { + __this_cpu_write(idr_preload_head, new->ary[0]); + __this_cpu_dec(idr_preload_cnt); + new->ary[0] = NULL; + } + preempt_enable(); + if (new) + return new; } - preempt_enable(); - return new; + + /* + * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so + * that memory allocation failure warning is printed as intended. + */ + return kmem_cache_zalloc(idr_layer_cache, gfp_mask); } static void idr_layer_rcu_free(struct rcu_head *head) -- cgit v1.2.3 From dc72c32e1fd872a9a4fdfe645283c9dcd68e556d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 22 Mar 2013 15:04:39 -0700 Subject: printk: Provide a wake_up_klogd() off-case wake_up_klogd() is useless when CONFIG_PRINTK=n because neither printk() nor printk_sched() are in use and there are actually no waiter on log_wait waitqueue. It should be a stub in this case for users like bust_spinlocks(). Otherwise this results in this warning when CONFIG_PRINTK=n and CONFIG_IRQ_WORK=n: kernel/built-in.o In function `wake_up_klogd': (.text.wake_up_klogd+0xb4): undefined reference to `irq_work_queue' To fix this, provide an off-case for wake_up_klogd() when CONFIG_PRINTK=n. There is much more from console_unlock() and other console related code in printk.c that should be moved under CONFIG_PRINTK. But for now, focus on a minimal fix as we passed the merged window already. [akpm@linux-foundation.org: include printk.h in bust_spinlocks.c] Signed-off-by: Frederic Weisbecker Reported-by: James Hogan Cc: James Hogan Cc: Steven Rostedt Cc: Peter Zijlstra Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/bust_spinlocks.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c index 9681d54b95d1..f8e0e5367398 100644 --- a/lib/bust_spinlocks.c +++ b/lib/bust_spinlocks.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes) wake_up_klogd(); } } - - -- cgit v1.2.3 From 8d640a51ec9e9cdefa680b67ad55f933eefc5923 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 22 Mar 2013 15:04:48 -0700 Subject: dma-debug: fix locking bug in check_unmap() In check_unmap() it is possible to get into a dead-locked state if dma_mapping_error is called. The problem is that the bucket is locked in check_unmap, and locked again by debug_dma_mapping_error which is called by dma_mapping_error. To resolve that we must release the lock on the bucket before making the call to dma_mapping_error. [akpm@linux-foundation.org: restore 80-col trickery to be consistent with the rest of the file] Signed-off-by: Alexander Duyck Cc: Joerg Roedel Reviewed-by: Shuah Khan Tested-by: Shuah Khan Cc: Jakub Kicinski Cc: Konrad Rzeszutek Wilk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/dma-debug.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 5e396accd3d0..d3e06a5e981e 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref) entry = bucket_find_exact(bucket, ref); if (!entry) { + /* must drop lock before calling dma_mapping_error */ + put_hash_bucket(bucket, &flags); + if (dma_mapping_error(ref->dev, ref->dev_addr)) { err_printk(ref->dev, NULL, - "DMA-API: device driver tries " - "to free an invalid DMA memory address\n"); - return; + "DMA-API: device driver tries to free an " + "invalid DMA memory address\n"); + } else { + err_printk(ref->dev, NULL, + "DMA-API: device driver tries to free DMA " + "memory it has not allocated [device " + "address=0x%016llx] [size=%llu bytes]\n", + ref->dev_addr, ref->size); } - err_printk(ref->dev, NULL, "DMA-API: device driver tries " - "to free DMA memory it has not allocated " - "[device address=0x%016llx] [size=%llu bytes]\n", - ref->dev_addr, ref->size); - goto out; + return; } if (ref->size != entry->size) { @@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref) hash_bucket_del(entry); dma_entry_free(entry); -out: put_hash_bucket(bucket, &flags); } -- cgit v1.2.3 From 96e7d7a1e0fc7780b4c1981c787e42473aa91a95 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 22 Mar 2013 15:04:49 -0700 Subject: dma-debug: update DMA debug API to better handle multiple mappings of a buffer There were reports of the igb driver unmapping buffers without calling dma_mapping_error. On closer inspection issues were found in the DMA debug API and how it handled multiple mappings of the same buffer. The issue I found is the fact that the debug_dma_mapping_error would only set the map_err_type to MAP_ERR_CHECKED in the case that the was only one match for device and device address. However in the case of non-IOMMU, multiple addresses existed and as a result it was not setting this field once a second mapping was instantiated. I have resolved this by changing the search so that it instead will now set MAP_ERR_CHECKED on the first buffer that matches the device and DMA address that is currently in the state MAP_ERR_NOT_CHECKED. A secondary side effect of this patch is that in the case of multiple buffers using the same address only the last mapping will have a valid map_err_type. The previous mappings will all end up with map_err_type set to MAP_ERR_CHECKED because of the dma_mapping_error call in debug_dma_map_page. However this behavior may be preferable as it means you will likely only see one real error per multi-mapped buffer, versus the current behavior of multiple false errors mer multi-mapped buffer. Signed-off-by: Alexander Duyck Cc: Joerg Roedel Reviewed-by: Shuah Khan Tested-by: Shuah Khan Cc: Jakub Kicinski Cc: Konrad Rzeszutek Wilk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/dma-debug.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index d3e06a5e981e..d87a17a819d0 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -1085,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ref.dev = dev; ref.dev_addr = dma_addr; bucket = get_hash_bucket(&ref, &flags); - entry = bucket_find_exact(bucket, &ref); - if (!entry) - goto out; + list_for_each_entry(entry, &bucket->list, list) { + if (!exact_match(&ref, entry)) + continue; + + /* + * The same physical address can be mapped multiple + * times. Without a hardware IOMMU this results in the + * same device addresses being put into the dma-debug + * hash multiple times too. This can result in false + * positives being reported. Therefore we implement a + * best-fit algorithm here which updates the first entry + * from the hash which fits the reference value and is + * not currently listed as being checked. + */ + if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { + entry->map_err_type = MAP_ERR_CHECKED; + break; + } + } - entry->map_err_type = MAP_ERR_CHECKED; -out: put_hash_bucket(bucket, &flags); } EXPORT_SYMBOL(debug_dma_mapping_error); -- cgit v1.2.3 From a49b7e82cab0f9b41f483359be83f44fbb6b4979 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 13 Apr 2013 15:15:30 -0700 Subject: kobject: fix kset_find_obj() race with concurrent last kobject_put() Anatol Pomozov identified a race condition that hits module unloading and re-loading. To quote Anatol: "This is a race codition that exists between kset_find_obj() and kobject_put(). kset_find_obj() might return kobject that has refcount equal to 0 if this kobject is freeing by kobject_put() in other thread. Here is timeline for the crash in case if kset_find_obj() searches for an object tht nobody holds and other thread is doing kobject_put() on the same kobject: THREAD A (calls kset_find_obj()) THREAD B (calls kobject_put()) splin_lock() atomic_dec_return(kobj->kref), counter gets zero here ... starts kobject cleanup .... spin_lock() // WAIT thread A in kobj_kset_leave() iterate over kset->list atomic_inc(kobj->kref) (counter becomes 1) spin_unlock() spin_lock() // taken // it does not know that thread A increased counter so it remove obj from list spin_unlock() vfree(module) // frees module object with containing kobj // kobj points to freed memory area!! kobject_put(kobj) // OOPS!!!! The race above happens because module.c tries to use kset_find_obj() when somebody unloads module. The module.c code was introduced in commit 6494a93d55fa" Anatol supplied a patch specific for module.c that worked around the problem by simply not using kset_find_obj() at all, but rather than make a local band-aid, this just fixes kset_find_obj() to be thread-safe using the proper model of refusing the get a new reference if the refcount has already dropped to zero. See examples of this proper refcount handling not only in the kref documentation, but in various other equivalent uses of this pattern by grepping for atomic_inc_not_zero(). [ Side note: the module race does indicate that module loading and unloading is not properly serialized wrt sysfs information using the module mutex. That may require further thought, but this is the correct fix at the kobject layer regardless. ] Reported-analyzed-and-tested-by: Anatol Pomozov Cc: Greg Kroah-Hartman Cc: Al Viro Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds --- lib/kobject.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/kobject.c b/lib/kobject.c index e07ee1fcd6f1..a65486613d79 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj) return kobj; } +static struct kobject *kobject_get_unless_zero(struct kobject *kobj) +{ + if (!kref_get_unless_zero(&kobj->kref)) + kobj = NULL; + return kobj; +} + /* * kobject_cleanup - free kobject resources. * @kobj: object to cleanup @@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name) list_for_each_entry(k, &kset->list, entry) { if (kobject_name(k) && !strcmp(kobject_name(k), name)) { - ret = kobject_get(k); + ret = kobject_get_unless_zero(k); break; } } -- cgit v1.2.3 From 0635eb8a54cf0fea64b174bb68bc36b9c3d622db Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Mon, 15 Apr 2013 13:09:45 -0700 Subject: Move utf16 functions to kernel core and rename We want to be able to use the utf16 functions that are currently present in the EFI variables code in platform-specific code as well. Move them to the kernel core, and in the process rename them to accurately describe what they do - they don't handle UTF16, only UCS2. Signed-off-by: Matthew Garrett Signed-off-by: Matt Fleming --- lib/Kconfig | 3 +++ lib/Makefile | 2 ++ lib/ucs2_string.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+) create mode 100644 lib/ucs2_string.c (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index 3958dc4389f9..fe01d418b09a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -404,4 +404,7 @@ config OID_REGISTRY help Enable fast lookup object identifier registry. +config UCS2_STRING + tristate + endmenu diff --git a/lib/Makefile b/lib/Makefile index d7946ff75b2e..6e2cc561f761 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -174,3 +174,5 @@ quiet_cmd_build_OID_registry = GEN $@ cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@ clean-files += oid_registry_data.c + +obj-$(CONFIG_UCS2_STRING) += ucs2_string.o diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c new file mode 100644 index 000000000000..6f500ef2301d --- /dev/null +++ b/lib/ucs2_string.c @@ -0,0 +1,51 @@ +#include +#include + +/* Return the number of unicode characters in data */ +unsigned long +ucs2_strnlen(const ucs2_char_t *s, size_t maxlength) +{ + unsigned long length = 0; + + while (*s++ != 0 && length < maxlength) + length++; + return length; +} +EXPORT_SYMBOL(ucs2_strnlen); + +unsigned long +ucs2_strlen(const ucs2_char_t *s) +{ + return ucs2_strnlen(s, ~0UL); +} +EXPORT_SYMBOL(ucs2_strlen); + +/* + * Return the number of bytes is the length of this string + * Note: this is NOT the same as the number of unicode characters + */ +unsigned long +ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength) +{ + return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t); +} +EXPORT_SYMBOL(ucs2_strsize); + +int +ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) +{ + while (1) { + if (len == 0) + return 0; + if (*a < *b) + return -1; + if (*a > *b) + return 1; + if (*a == 0) /* implies *b == 0 */ + return 0; + a++; + b++; + len--; + } +} +EXPORT_SYMBOL(ucs2_strncmp); -- cgit v1.2.3 From c729de8fcea37a1c444e81857eace12494c804a9 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 15 Apr 2013 22:23:45 -0700 Subject: x86, kdump: Set crashkernel_low automatically Chao said that kdump does does work well on his system on 3.8 without extra parameter, even iommu does not work with kdump. And now have to append crashkernel_low=Y in first kernel to make kdump work. We have now modified crashkernel=X to allocate memory beyong 4G (if available) and do not allocate low range for crashkernel if the user does not specify that with crashkernel_low=Y. This causes regression if iommu is not enabled. Without iommu, swiotlb needs to be setup in first 4G and there is no low memory available to second kernel. Set crashkernel_low automatically if the user does not specify that. For system that does support IOMMU with kdump properly, user could specify crashkernel_low=0 to save that 72M low ram. -v3: add swiotlb_size() according to Konrad. -v4: add comments what 8M is for according to hpa. also update more crashkernel_low= in kernel-parameters.txt -v5: update changelog according to Vivek. -v6: Change description about swiotlb referring according to HATAYAMA. Reported-by: WANG Chao Tested-by: WANG Chao Signed-off-by: Yinghai Lu Link: http://lkml.kernel.org/r/1366089828-19692-2-git-send-email-yinghai@kernel.org Acked-by: Vivek Goyal Signed-off-by: H. Peter Anvin --- lib/swiotlb.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index bfe02b8fc55b..d23762e6652c 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str) if (!strcmp(str, "force")) swiotlb_force = 1; - return 1; + return 0; } -__setup("swiotlb=", setup_io_tlb_npages); +early_param("swiotlb", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ unsigned long swiotlb_nr_tbl(void) @@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void) return io_tlb_nslabs; } EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); + +/* default to 64MB */ +#define IO_TLB_DEFAULT_SIZE (64UL<<20) +unsigned long swiotlb_size_or_default(void) +{ + unsigned long size; + + size = io_tlb_nslabs << IO_TLB_SHIFT; + + return size ? size : (IO_TLB_DEFAULT_SIZE); +} + /* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) @@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) void __init swiotlb_init(int verbose) { - /* default to 64MB */ - size_t default_size = 64UL<<20; + size_t default_size = IO_TLB_DEFAULT_SIZE; unsigned char *vstart; unsigned long bytes; -- cgit v1.2.3