diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/argv_split.c | 87 | ||||
-rw-r--r-- | lib/bust_spinlocks.c | 3 | ||||
-rw-r--r-- | lib/div64.c | 19 | ||||
-rw-r--r-- | lib/dma-debug.c | 45 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 1 | ||||
-rw-r--r-- | lib/fault-inject.c | 2 | ||||
-rw-r--r-- | lib/genalloc.c | 81 | ||||
-rw-r--r-- | lib/idr.c | 123 | ||||
-rw-r--r-- | lib/int_sqrt.c | 32 | ||||
-rw-r--r-- | lib/kobject.c | 9 | ||||
-rw-r--r-- | lib/list_sort.c | 2 | ||||
-rw-r--r-- | lib/show_mem.c | 3 | ||||
-rw-r--r-- | lib/swiotlb.c | 19 | ||||
-rw-r--r-- | lib/ucs2_string.c | 51 | ||||
-rw-r--r-- | lib/uuid.c | 8 | ||||
-rw-r--r-- | lib/xz/Kconfig | 2 |
18 files changed, 331 insertions, 161 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 3958dc4389f9..fe01d418b09a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -404,4 +404,7 @@ config OID_REGISTRY help Enable fast lookup object identifier registry. +config UCS2_STRING + tristate + endmenu diff --git a/lib/Makefile b/lib/Makefile index d7946ff75b2e..6e2cc561f761 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -174,3 +174,5 @@ quiet_cmd_build_OID_registry = GEN $@ cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@ clean-files += oid_registry_data.c + +obj-$(CONFIG_UCS2_STRING) += ucs2_string.o diff --git a/lib/argv_split.c b/lib/argv_split.c index 1e9a6cbc3689..e927ed0e18a8 100644 --- a/lib/argv_split.c +++ b/lib/argv_split.c @@ -8,23 +8,17 @@ #include <linux/slab.h> #include <linux/export.h> -static const char *skip_arg(const char *cp) -{ - while (*cp && !isspace(*cp)) - cp++; - - return cp; -} - static int count_argc(const char *str) { int count = 0; + bool was_space; - while (*str) { - str = skip_spaces(str); - if (*str) { + for (was_space = true; *str; str++) { + if (isspace(*str)) { + was_space = true; + } else if (was_space) { + was_space = false; count++; - str = skip_arg(str); } } @@ -39,10 +33,8 @@ static int count_argc(const char *str) */ void argv_free(char **argv) { - char **p; - for (p = argv; *p; p++) - kfree(*p); - + argv--; + kfree(argv[0]); kfree(argv); } EXPORT_SYMBOL(argv_free); @@ -59,43 +51,44 @@ EXPORT_SYMBOL(argv_free); * considered to be a single argument separator. The returned array * is always NULL-terminated. Returns NULL on memory allocation * failure. + * + * The source string at `str' may be undergoing concurrent alteration via + * userspace sysctl activity (at least). The argv_split() implementation + * attempts to handle this gracefully by taking a local copy to work on. */ char **argv_split(gfp_t gfp, const char *str, int *argcp) { - int argc = count_argc(str); - char **argv = kzalloc(sizeof(*argv) * (argc+1), gfp); - char **argvp; - - if (argv == NULL) - goto out; - - if (argcp) - *argcp = argc; - - argvp = argv; - - while (*str) { - str = skip_spaces(str); - - if (*str) { - const char *p = str; - char *t; - - str = skip_arg(str); + char *argv_str; + bool was_space; + char **argv, **argv_ret; + int argc; + + argv_str = kstrndup(str, KMALLOC_MAX_SIZE - 1, gfp); + if (!argv_str) + return NULL; + + argc = count_argc(argv_str); + argv = kmalloc(sizeof(*argv) * (argc + 2), gfp); + if (!argv) { + kfree(argv_str); + return NULL; + } - t = kstrndup(p, str-p, gfp); - if (t == NULL) - goto fail; - *argvp++ = t; + *argv = argv_str; + argv_ret = ++argv; + for (was_space = true; *argv_str; argv_str++) { + if (isspace(*argv_str)) { + was_space = true; + *argv_str = 0; + } else if (was_space) { + was_space = false; + *argv++ = argv_str; } } - *argvp = NULL; - - out: - return argv; + *argv = NULL; - fail: - argv_free(argv); - return NULL; + if (argcp) + *argcp = argc; + return argv_ret; } EXPORT_SYMBOL(argv_split); diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c index 9681d54b95d1..f8e0e5367398 100644 --- a/lib/bust_spinlocks.c +++ b/lib/bust_spinlocks.c @@ -8,6 +8,7 @@ */ #include <linux/kernel.h> +#include <linux/printk.h> #include <linux/spinlock.h> #include <linux/tty.h> #include <linux/wait.h> @@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes) wake_up_klogd(); } } - - diff --git a/lib/div64.c b/lib/div64.c index a163b6caef73..3af5728d95fd 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -79,9 +79,10 @@ EXPORT_SYMBOL(div_s64_rem); #endif /** - * div64_u64 - unsigned 64bit divide with 64bit divisor + * div64_u64_rem - unsigned 64bit divide with 64bit divisor and 64bit remainder * @dividend: 64bit dividend * @divisor: 64bit divisor + * @remainder: 64bit remainder * * This implementation is a modified version of the algorithm proposed * by the book 'Hacker's Delight'. The original source and full proof @@ -89,27 +90,33 @@ EXPORT_SYMBOL(div_s64_rem); * * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt' */ -#ifndef div64_u64 -u64 div64_u64(u64 dividend, u64 divisor) +#ifndef div64_u64_rem +u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) { u32 high = divisor >> 32; u64 quot; if (high == 0) { - quot = div_u64(dividend, divisor); + u32 rem32; + quot = div_u64_rem(dividend, divisor, &rem32); + *remainder = rem32; } else { int n = 1 + fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) quot--; - if ((dividend - quot * divisor) >= divisor) + + *remainder = dividend - quot * divisor; + if (*remainder >= divisor) { quot++; + *remainder -= divisor; + } } return quot; } -EXPORT_SYMBOL(div64_u64); +EXPORT_SYMBOL(div64_u64_rem); #endif /** diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 5e396accd3d0..d87a17a819d0 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref) entry = bucket_find_exact(bucket, ref); if (!entry) { + /* must drop lock before calling dma_mapping_error */ + put_hash_bucket(bucket, &flags); + if (dma_mapping_error(ref->dev, ref->dev_addr)) { err_printk(ref->dev, NULL, - "DMA-API: device driver tries " - "to free an invalid DMA memory address\n"); - return; + "DMA-API: device driver tries to free an " + "invalid DMA memory address\n"); + } else { + err_printk(ref->dev, NULL, + "DMA-API: device driver tries to free DMA " + "memory it has not allocated [device " + "address=0x%016llx] [size=%llu bytes]\n", + ref->dev_addr, ref->size); } - err_printk(ref->dev, NULL, "DMA-API: device driver tries " - "to free DMA memory it has not allocated " - "[device address=0x%016llx] [size=%llu bytes]\n", - ref->dev_addr, ref->size); - goto out; + return; } if (ref->size != entry->size) { @@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref) hash_bucket_del(entry); dma_entry_free(entry); -out: put_hash_bucket(bucket, &flags); } @@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ref.dev = dev; ref.dev_addr = dma_addr; bucket = get_hash_bucket(&ref, &flags); - entry = bucket_find_exact(bucket, &ref); - if (!entry) - goto out; + list_for_each_entry(entry, &bucket->list, list) { + if (!exact_match(&ref, entry)) + continue; + + /* + * The same physical address can be mapped multiple + * times. Without a hardware IOMMU this results in the + * same device addresses being put into the dma-debug + * hash multiple times too. This can result in false + * positives being reported. Therefore we implement a + * best-fit algorithm here which updates the first entry + * from the hash which fits the reference value and is + * not currently listed as being checked. + */ + if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { + entry->map_err_type = MAP_ERR_CHECKED; + break; + } + } - entry->map_err_type = MAP_ERR_CHECKED; -out: put_hash_bucket(bucket, &flags); } EXPORT_SYMBOL(debug_dma_mapping_error); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 5276b99ca650..46032453abd5 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -281,7 +281,6 @@ static inline int parse_lineno(const char *str, unsigned int *val) * allow the user to express a query which matches a format * containing embedded spaces. */ -#define isodigit(c) ((c) >= '0' && (c) <= '7') static char *unescape(char *str) { char *in = str; diff --git a/lib/fault-inject.c b/lib/fault-inject.c index f7210ad6cffd..c5c7a762b850 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -122,7 +122,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size) return false; } - if (attr->probability <= random32() % 100) + if (attr->probability <= prandom_u32() % 100) return false; if (!fail_stacktrace(attr)) diff --git a/lib/genalloc.c b/lib/genalloc.c index 54920433705a..b35cfa9bc3d4 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -34,6 +34,8 @@ #include <linux/rculist.h> #include <linux/interrupt.h> #include <linux/genalloc.h> +#include <linux/of_address.h> +#include <linux/of_device.h> static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) { @@ -480,3 +482,82 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, return start_bit; } EXPORT_SYMBOL(gen_pool_best_fit); + +static void devm_gen_pool_release(struct device *dev, void *res) +{ + gen_pool_destroy(*(struct gen_pool **)res); +} + +/** + * devm_gen_pool_create - managed gen_pool_create + * @dev: device that provides the gen_pool + * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents + * @nid: node id of the node the pool structure should be allocated on, or -1 + * + * Create a new special memory pool that can be used to manage special purpose + * memory not managed by the regular kmalloc/kfree interface. The pool will be + * automatically destroyed by the device management code. + */ +struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, + int nid) +{ + struct gen_pool **ptr, *pool; + + ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); + + pool = gen_pool_create(min_alloc_order, nid); + if (pool) { + *ptr = pool; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return pool; +} + +/** + * dev_get_gen_pool - Obtain the gen_pool (if any) for a device + * @dev: device to retrieve the gen_pool from + * @name: Optional name for the gen_pool, usually NULL + * + * Returns the gen_pool for the device if one is present, or NULL. + */ +struct gen_pool *dev_get_gen_pool(struct device *dev) +{ + struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, + NULL); + + if (!p) + return NULL; + return *p; +} +EXPORT_SYMBOL_GPL(dev_get_gen_pool); + +#ifdef CONFIG_OF +/** + * of_get_named_gen_pool - find a pool by phandle property + * @np: device node + * @propname: property name containing phandle(s) + * @index: index into the phandle array + * + * Returns the pool that contains the chunk starting at the physical + * address of the device tree node pointed at by the phandle property, + * or NULL if not found. + */ +struct gen_pool *of_get_named_gen_pool(struct device_node *np, + const char *propname, int index) +{ + struct platform_device *pdev; + struct device_node *np_pool; + + np_pool = of_parse_phandle(np, propname, index); + if (!np_pool) + return NULL; + pdev = of_find_device_by_node(np_pool); + if (!pdev) + return NULL; + return dev_get_gen_pool(&pdev->dev); +} +EXPORT_SYMBOL_GPL(of_get_named_gen_pool); +#endif /* CONFIG_OF */ diff --git a/lib/idr.c b/lib/idr.c index 73f4d53c02f3..cca4b9302a71 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) if (layer_idr) return get_from_free_list(layer_idr); - /* try to allocate directly from kmem_cache */ - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); + /* + * Try to allocate directly from kmem_cache. We want to try this + * before preload buffer; otherwise, non-preloading idr_alloc() + * users will end up taking advantage of preloading ones. As the + * following is allowed to fail for preloaded cases, suppress + * warning this time. + */ + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); if (new) return new; @@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) * Try to fetch one from the per-cpu preload buffer if in process * context. See idr_preload() for details. */ - if (in_interrupt()) - return NULL; - - preempt_disable(); - new = __this_cpu_read(idr_preload_head); - if (new) { - __this_cpu_write(idr_preload_head, new->ary[0]); - __this_cpu_dec(idr_preload_cnt); - new->ary[0] = NULL; + if (!in_interrupt()) { + preempt_disable(); + new = __this_cpu_read(idr_preload_head); + if (new) { + __this_cpu_write(idr_preload_head, new->ary[0]); + __this_cpu_dec(idr_preload_cnt); + new->ary[0] = NULL; + } + preempt_enable(); + if (new) + return new; } - preempt_enable(); - return new; + + /* + * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so + * that memory allocation failure warning is printed as intended. + */ + return kmem_cache_zalloc(idr_layer_cache, gfp_mask); } static void idr_layer_rcu_free(struct rcu_head *head) @@ -184,20 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) } } -/** - * idr_pre_get - reserve resources for idr allocation - * @idp: idr handle - * @gfp_mask: memory allocation flags - * - * This function should be called prior to calling the idr_get_new* functions. - * It preallocates enough memory to satisfy the worst possible allocation. The - * caller should pass in GFP_KERNEL if possible. This of course requires that - * no spinning locks be held. - * - * If the system is REALLY out of memory this function returns %0, - * otherwise %1. - */ -int idr_pre_get(struct idr *idp, gfp_t gfp_mask) +int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) { while (idp->id_free_cnt < MAX_IDR_FREE) { struct idr_layer *new; @@ -208,13 +207,12 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) } return 1; } -EXPORT_SYMBOL(idr_pre_get); +EXPORT_SYMBOL(__idr_pre_get); /** * sub_alloc - try to allocate an id without growing the tree depth * @idp: idr handle * @starting_id: id to start search at - * @id: pointer to the allocated handle * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer * @gfp_mask: allocation mask for idr_layer_alloc() * @layer_idr: optional idr passed to idr_layer_alloc() @@ -376,25 +374,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id, idr_mark_full(pa, id); } -/** - * idr_get_new_above - allocate new idr entry above or equal to a start id - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @starting_id: id to start search at - * @id: pointer to the allocated handle - * - * This is the allocate id function. It should be called with any - * required locks. - * - * If allocation from IDR's private freelist fails, idr_get_new_above() will - * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill - * IDR's preallocation and then retry the idr_get_new_above() call. - * - * If the idr is full idr_get_new_above() will return %-ENOSPC. - * - * @id returns a value in the range @starting_id ... %0x7fffffff - */ -int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) +int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) { struct idr_layer *pa[MAX_IDR_LEVEL + 1]; int rv; @@ -407,7 +387,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) *id = rv; return 0; } -EXPORT_SYMBOL(idr_get_new_above); +EXPORT_SYMBOL(__idr_get_new_above); /** * idr_preload - preload for idr_alloc() @@ -515,6 +495,33 @@ int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) } EXPORT_SYMBOL_GPL(idr_alloc); +/** + * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion + * @idr: the (initialized) idr + * @ptr: pointer to be associated with the new id + * @start: the minimum id (inclusive) + * @end: the maximum id (exclusive, <= 0 for max) + * @gfp_mask: memory allocation flags + * + * Essentially the same as idr_alloc, but prefers to allocate progressively + * higher ids if it can. If the "cur" counter wraps, then it will start again + * at the "start" end of the range and allocate one that has already been used. + */ +int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, + gfp_t gfp_mask) +{ + int id; + + id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask); + if (id == -ENOSPC) + id = idr_alloc(idr, ptr, start, end, gfp_mask); + + if (likely(id >= 0)) + idr->cur = id + 1; + return id; +} +EXPORT_SYMBOL(idr_alloc_cyclic); + static void idr_remove_warning(int id) { printk(KERN_WARNING @@ -569,8 +576,7 @@ void idr_remove(struct idr *idp, int id) struct idr_layer *p; struct idr_layer *to_free; - /* see comment in idr_find_slowpath() */ - if (WARN_ON_ONCE(id < 0)) + if (id < 0) return; sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); @@ -667,15 +673,7 @@ void *idr_find_slowpath(struct idr *idp, int id) int n; struct idr_layer *p; - /* - * If @id is negative, idr_find() used to ignore the sign bit and - * performed lookup with the rest of bits, which is weird and can - * lead to very obscure bugs. We're now returning NULL for all - * negative IDs but just in case somebody was depending on the sign - * bit being ignored, let's trigger WARN_ON_ONCE() so that they can - * be detected and fixed. WARN_ON_ONCE() can later be removed. - */ - if (WARN_ON_ONCE(id < 0)) + if (id < 0) return NULL; p = rcu_dereference_raw(idp->top); @@ -824,8 +822,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) int n; struct idr_layer *p, *old_p; - /* see comment in idr_find_slowpath() */ - if (WARN_ON_ONCE(id < 0)) + if (id < 0) return ERR_PTR(-EINVAL); p = idp->top; @@ -918,7 +915,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) int ida_pre_get(struct ida *ida, gfp_t gfp_mask) { /* allocate idr_layers */ - if (!idr_pre_get(&ida->idr, gfp_mask)) + if (!__idr_pre_get(&ida->idr, gfp_mask)) return 0; /* allocate free_bitmap */ diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c index fc2eeb7cb2ea..1ef4cc344977 100644 --- a/lib/int_sqrt.c +++ b/lib/int_sqrt.c @@ -1,3 +1,9 @@ +/* + * Copyright (C) 2013 Davidlohr Bueso <davidlohr.bueso@hp.com> + * + * Based on the shift-and-subtract algorithm for computing integer + * square root from Guy L. Steele. + */ #include <linux/kernel.h> #include <linux/export.h> @@ -10,23 +16,23 @@ */ unsigned long int_sqrt(unsigned long x) { - unsigned long op, res, one; + unsigned long b, m, y = 0; - op = x; - res = 0; + if (x <= 1) + return x; - one = 1UL << (BITS_PER_LONG - 2); - while (one > op) - one >>= 2; + m = 1UL << (BITS_PER_LONG - 2); + while (m != 0) { + b = y + m; + y >>= 1; - while (one != 0) { - if (op >= res + one) { - op = op - (res + one); - res = res + 2 * one; + if (x >= b) { + x -= b; + y += m; } - res /= 2; - one /= 4; + m >>= 2; } - return res; + + return y; } EXPORT_SYMBOL(int_sqrt); diff --git a/lib/kobject.c b/lib/kobject.c index e07ee1fcd6f1..a65486613d79 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj) return kobj; } +static struct kobject *kobject_get_unless_zero(struct kobject *kobj) +{ + if (!kref_get_unless_zero(&kobj->kref)) + kobj = NULL; + return kobj; +} + /* * kobject_cleanup - free kobject resources. * @kobj: object to cleanup @@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name) list_for_each_entry(k, &kset->list, entry) { if (kobject_name(k) && !strcmp(kobject_name(k), name)) { - ret = kobject_get(k); + ret = kobject_get_unless_zero(k); break; } } diff --git a/lib/list_sort.c b/lib/list_sort.c index d7325c6b103f..1183fa70a44d 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -229,7 +229,7 @@ static int __init list_sort_test(void) goto exit; } /* force some equivalencies */ - el->value = random32() % (TEST_LIST_LEN/3); + el->value = prandom_u32() % (TEST_LIST_LEN / 3); el->serial = i; el->poison1 = TEST_POISON1; el->poison2 = TEST_POISON2; diff --git a/lib/show_mem.c b/lib/show_mem.c index 4407f8c9b1f7..b7c72311ad0c 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -18,6 +18,9 @@ void show_mem(unsigned int filter) printk("Mem-Info:\n"); show_free_areas(filter); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; + for_each_online_pgdat(pgdat) { unsigned long i, flags; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index bfe02b8fc55b..d23762e6652c 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str) if (!strcmp(str, "force")) swiotlb_force = 1; - return 1; + return 0; } -__setup("swiotlb=", setup_io_tlb_npages); +early_param("swiotlb", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ unsigned long swiotlb_nr_tbl(void) @@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void) return io_tlb_nslabs; } EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); + +/* default to 64MB */ +#define IO_TLB_DEFAULT_SIZE (64UL<<20) +unsigned long swiotlb_size_or_default(void) +{ + unsigned long size; + + size = io_tlb_nslabs << IO_TLB_SHIFT; + + return size ? size : (IO_TLB_DEFAULT_SIZE); +} + /* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) @@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) void __init swiotlb_init(int verbose) { - /* default to 64MB */ - size_t default_size = 64UL<<20; + size_t default_size = IO_TLB_DEFAULT_SIZE; unsigned char *vstart; unsigned long bytes; diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c new file mode 100644 index 000000000000..6f500ef2301d --- /dev/null +++ b/lib/ucs2_string.c @@ -0,0 +1,51 @@ +#include <linux/ucs2_string.h> +#include <linux/module.h> + +/* Return the number of unicode characters in data */ +unsigned long +ucs2_strnlen(const ucs2_char_t *s, size_t maxlength) +{ + unsigned long length = 0; + + while (*s++ != 0 && length < maxlength) + length++; + return length; +} +EXPORT_SYMBOL(ucs2_strnlen); + +unsigned long +ucs2_strlen(const ucs2_char_t *s) +{ + return ucs2_strnlen(s, ~0UL); +} +EXPORT_SYMBOL(ucs2_strlen); + +/* + * Return the number of bytes is the length of this string + * Note: this is NOT the same as the number of unicode characters + */ +unsigned long +ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength) +{ + return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t); +} +EXPORT_SYMBOL(ucs2_strsize); + +int +ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) +{ + while (1) { + if (len == 0) + return 0; + if (*a < *b) + return -1; + if (*a > *b) + return 1; + if (*a == 0) /* implies *b == 0 */ + return 0; + a++; + b++; + len--; + } +} +EXPORT_SYMBOL(ucs2_strncmp); diff --git a/lib/uuid.c b/lib/uuid.c index 52a6fe6387de..398821e4dce1 100644 --- a/lib/uuid.c +++ b/lib/uuid.c @@ -25,13 +25,7 @@ static void __uuid_gen_common(__u8 b[16]) { - int i; - u32 r; - - for (i = 0; i < 4; i++) { - r = random32(); - memcpy(b + i * 4, &r, 4); - } + prandom_bytes(b, 16); /* reversion 0b10 */ b[8] = (b[8] & 0x3F) | 0x80; } diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig index 82a04d7ba99e..08837db52d94 100644 --- a/lib/xz/Kconfig +++ b/lib/xz/Kconfig @@ -15,7 +15,7 @@ config XZ_DEC_X86 config XZ_DEC_POWERPC bool "PowerPC BCJ filter decoder" - default y if POWERPC + default y if PPC select XZ_DEC_BCJ config XZ_DEC_IA64 |