diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Makefile | 4 | ||||
-rw-r--r-- | lib/debugobjects.c | 54 | ||||
-rw-r--r-- | lib/devres.c | 55 | ||||
-rw-r--r-- | lib/dma-debug.c | 2 | ||||
-rw-r--r-- | lib/dynamic_queue_limits.c | 133 | ||||
-rw-r--r-- | lib/fault-inject.c | 8 | ||||
-rw-r--r-- | lib/kobject.c | 37 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 3 | ||||
-rw-r--r-- | lib/kref.c | 97 | ||||
-rw-r--r-- | lib/reciprocal_div.c | 2 | ||||
-rw-r--r-- | lib/vsprintf.c | 19 |
12 files changed, 271 insertions, 146 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index d971366b8de2..f34be6417d71 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -244,6 +244,9 @@ config CPU_RMAP bool depends on SMP +config DQL + bool + # # Netlink attribute parsing support is select'ed if needed # diff --git a/lib/Makefile b/lib/Makefile index a4da283f5dc0..c0ffaaff6534 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -17,7 +17,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o -lib-y += kobject.o kref.o klist.o +lib-y += kobject.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ @@ -115,6 +115,8 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o obj-$(CONFIG_CORDIC) += cordic.o +obj-$(CONFIG_DQL) += dynamic_queue_limits.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/debugobjects.c b/lib/debugobjects.c index a78b7c6e042c..77cb245f8e7b 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -268,12 +268,16 @@ static void debug_print_object(struct debug_obj *obj, char *msg) * Try to repair the damage, so we have a better chance to get useful * debug output. */ -static void +static int debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), void * addr, enum debug_obj_state state) { + int fixed = 0; + if (fixup) - debug_objects_fixups += fixup(addr, state); + fixed = fixup(addr, state); + debug_objects_fixups += fixed; + return fixed; } static void debug_object_is_on_stack(void *addr, int onstack) @@ -386,6 +390,9 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; + struct debug_obj o = { .object = addr, + .state = ODEBUG_STATE_NOTAVAILABLE, + .descr = descr }; if (!debug_objects_enabled) return; @@ -425,8 +432,9 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) * let the type specific code decide whether this is * true or not. */ - debug_object_fixup(descr->fixup_activate, addr, - ODEBUG_STATE_NOTAVAILABLE); + if (debug_object_fixup(descr->fixup_activate, addr, + ODEBUG_STATE_NOTAVAILABLE)) + debug_print_object(&o, "activate"); } /** @@ -563,6 +571,44 @@ out_unlock: } /** + * debug_object_assert_init - debug checks when object should be init-ed + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) +{ + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + raw_spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj) { + struct debug_obj o = { .object = addr, + .state = ODEBUG_STATE_NOTAVAILABLE, + .descr = descr }; + + raw_spin_unlock_irqrestore(&db->lock, flags); + /* + * Maybe the object is static. Let the type specific + * code decide what to do. + */ + if (debug_object_fixup(descr->fixup_assert_init, addr, + ODEBUG_STATE_NOTAVAILABLE)) + debug_print_object(&o, "assert_init"); + return; + } + + raw_spin_unlock_irqrestore(&db->lock, flags); +} + +/** * debug_object_active_state - debug checks object usage state machine * @addr: address of the object * @descr: pointer to an object specific debug description structure diff --git a/lib/devres.c b/lib/devres.c index 7c0e953a7486..4fbc09e6e9e6 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -85,6 +85,57 @@ void devm_iounmap(struct device *dev, void __iomem *addr) } EXPORT_SYMBOL(devm_iounmap); +/** + * devm_request_and_ioremap() - Check, request region, and ioremap resource + * @dev: Generic device to handle the resource for + * @res: resource to be handled + * + * Takes all necessary steps to ioremap a mem resource. Uses managed device, so + * everything is undone on driver detach. Checks arguments, so you can feed + * it the result from e.g. platform_get_resource() directly. Returns the + * remapped pointer or NULL on error. Usage example: + * + * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + * base = devm_request_and_ioremap(&pdev->dev, res); + * if (!base) + * return -EADDRNOTAVAIL; + */ +void __iomem *devm_request_and_ioremap(struct device *dev, + struct resource *res) +{ + resource_size_t size; + const char *name; + void __iomem *dest_ptr; + + BUG_ON(!dev); + + if (!res || resource_type(res) != IORESOURCE_MEM) { + dev_err(dev, "invalid resource\n"); + return NULL; + } + + size = resource_size(res); + name = res->name ?: dev_name(dev); + + if (!devm_request_mem_region(dev, res->start, size, name)) { + dev_err(dev, "can't request region for resource %pR\n", res); + return NULL; + } + + if (res->flags & IORESOURCE_CACHEABLE) + dest_ptr = devm_ioremap(dev, res->start, size); + else + dest_ptr = devm_ioremap_nocache(dev, res->start, size); + + if (!dest_ptr) { + dev_err(dev, "ioremap failed for resource %pR\n", res); + devm_release_mem_region(dev, res->start, size); + } + + return dest_ptr; +} +EXPORT_SYMBOL(devm_request_and_ioremap); + #ifdef CONFIG_HAS_IOPORT /* * Generic iomap devres @@ -348,5 +399,5 @@ void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) } } EXPORT_SYMBOL(pcim_iounmap_regions); -#endif -#endif +#endif /* CONFIG_PCI */ +#endif /* CONFIG_HAS_IOPORT */ diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 74c6c7fce749..fea790a2b176 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket, static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) { - return ((a->dev_addr == a->dev_addr) && + return ((a->dev_addr == b->dev_addr) && (a->dev == b->dev)) ? true : false; } diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c new file mode 100644 index 000000000000..3d1bdcdd7db4 --- /dev/null +++ b/lib/dynamic_queue_limits.c @@ -0,0 +1,133 @@ +/* + * Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h + * + * Copyright (c) 2011, Tom Herbert <therbert@google.com> + */ +#include <linux/module.h> +#include <linux/types.h> +#include <linux/ctype.h> +#include <linux/kernel.h> +#include <linux/dynamic_queue_limits.h> + +#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0) + +/* Records completed count and recalculates the queue limit */ +void dql_completed(struct dql *dql, unsigned int count) +{ + unsigned int inprogress, prev_inprogress, limit; + unsigned int ovlimit, all_prev_completed, completed; + + /* Can't complete more than what's in queue */ + BUG_ON(count > dql->num_queued - dql->num_completed); + + completed = dql->num_completed + count; + limit = dql->limit; + ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit); + inprogress = dql->num_queued - completed; + prev_inprogress = dql->prev_num_queued - dql->num_completed; + all_prev_completed = POSDIFF(completed, dql->prev_num_queued); + + if ((ovlimit && !inprogress) || + (dql->prev_ovlimit && all_prev_completed)) { + /* + * Queue considered starved if: + * - The queue was over-limit in the last interval, + * and there is no more data in the queue. + * OR + * - The queue was over-limit in the previous interval and + * when enqueuing it was possible that all queued data + * had been consumed. This covers the case when queue + * may have becomes starved between completion processing + * running and next time enqueue was scheduled. + * + * When queue is starved increase the limit by the amount + * of bytes both sent and completed in the last interval, + * plus any previous over-limit. + */ + limit += POSDIFF(completed, dql->prev_num_queued) + + dql->prev_ovlimit; + dql->slack_start_time = jiffies; + dql->lowest_slack = UINT_MAX; + } else if (inprogress && prev_inprogress && !all_prev_completed) { + /* + * Queue was not starved, check if the limit can be decreased. + * A decrease is only considered if the queue has been busy in + * the whole interval (the check above). + * + * If there is slack, the amount of execess data queued above + * the the amount needed to prevent starvation, the queue limit + * can be decreased. To avoid hysteresis we consider the + * minimum amount of slack found over several iterations of the + * completion routine. + */ + unsigned int slack, slack_last_objs; + + /* + * Slack is the maximum of + * - The queue limit plus previous over-limit minus twice + * the number of objects completed. Note that two times + * number of completed bytes is a basis for an upper bound + * of the limit. + * - Portion of objects in the last queuing operation that + * was not part of non-zero previous over-limit. That is + * "round down" by non-overlimit portion of the last + * queueing operation. + */ + slack = POSDIFF(limit + dql->prev_ovlimit, + 2 * (completed - dql->num_completed)); + slack_last_objs = dql->prev_ovlimit ? + POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0; + + slack = max(slack, slack_last_objs); + + if (slack < dql->lowest_slack) + dql->lowest_slack = slack; + + if (time_after(jiffies, + dql->slack_start_time + dql->slack_hold_time)) { + limit = POSDIFF(limit, dql->lowest_slack); + dql->slack_start_time = jiffies; + dql->lowest_slack = UINT_MAX; + } + } + + /* Enforce bounds on limit */ + limit = clamp(limit, dql->min_limit, dql->max_limit); + + if (limit != dql->limit) { + dql->limit = limit; + ovlimit = 0; + } + + dql->adj_limit = limit + completed; + dql->prev_ovlimit = ovlimit; + dql->prev_last_obj_cnt = dql->last_obj_cnt; + dql->num_completed = completed; + dql->prev_num_queued = dql->num_queued; +} +EXPORT_SYMBOL(dql_completed); + +void dql_reset(struct dql *dql) +{ + /* Reset all dynamic values */ + dql->limit = 0; + dql->num_queued = 0; + dql->num_completed = 0; + dql->last_obj_cnt = 0; + dql->prev_num_queued = 0; + dql->prev_last_obj_cnt = 0; + dql->prev_ovlimit = 0; + dql->lowest_slack = UINT_MAX; + dql->slack_start_time = jiffies; +} +EXPORT_SYMBOL(dql_reset); + +int dql_init(struct dql *dql, unsigned hold_time) +{ + dql->max_limit = DQL_MAX_LIMIT; + dql->min_limit = 0; + dql->slack_hold_time = hold_time; + dql_reset(dql); + return 0; +} +EXPORT_SYMBOL(dql_init); diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 4f7554025e30..b4801f51b607 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -149,7 +149,7 @@ static int debugfs_ul_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n"); -static struct dentry *debugfs_create_ul(const char *name, mode_t mode, +static struct dentry *debugfs_create_ul(const char *name, umode_t mode, struct dentry *parent, unsigned long *value) { return debugfs_create_file(name, mode, parent, value, &fops_ul); @@ -169,7 +169,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get, debugfs_stacktrace_depth_set, "%llu\n"); static struct dentry *debugfs_create_stacktrace_depth( - const char *name, mode_t mode, + const char *name, umode_t mode, struct dentry *parent, unsigned long *value) { return debugfs_create_file(name, mode, parent, value, @@ -193,7 +193,7 @@ static int debugfs_atomic_t_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get, debugfs_atomic_t_set, "%lld\n"); -static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode, +static struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, struct dentry *parent, atomic_t *value) { return debugfs_create_file(name, mode, parent, value, &fops_atomic_t); @@ -202,7 +202,7 @@ static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode, struct dentry *fault_create_debugfs_attr(const char *name, struct dentry *parent, struct fault_attr *attr) { - mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; + umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; dir = debugfs_create_dir(name, parent); diff --git a/lib/kobject.c b/lib/kobject.c index 640bd98a4c8a..c33d7a18d635 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -746,43 +746,11 @@ void kset_unregister(struct kset *k) */ struct kobject *kset_find_obj(struct kset *kset, const char *name) { - return kset_find_obj_hinted(kset, name, NULL); -} - -/** - * kset_find_obj_hinted - search for object in kset given a predecessor hint. - * @kset: kset we're looking in. - * @name: object's name. - * @hint: hint to possible object's predecessor. - * - * Check the hint's next object and if it is a match return it directly, - * otherwise, fall back to the behavior of kset_find_obj(). Either way - * a reference for the returned object is held and the reference on the - * hinted object is released. - */ -struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name, - struct kobject *hint) -{ struct kobject *k; struct kobject *ret = NULL; spin_lock(&kset->list_lock); - if (!hint) - goto slow_search; - - /* end of list detection */ - if (hint->entry.next == kset->list.next) - goto slow_search; - - k = container_of(hint->entry.next, struct kobject, entry); - if (!kobject_name(k) || strcmp(kobject_name(k), name)) - goto slow_search; - - ret = kobject_get(k); - goto unlock_exit; - -slow_search: list_for_each_entry(k, &kset->list, entry) { if (kobject_name(k) && !strcmp(kobject_name(k), name)) { ret = kobject_get(k); @@ -790,12 +758,7 @@ slow_search: } } -unlock_exit: spin_unlock(&kset->list_lock); - - if (hint) - kobject_put(hint); - return ret; } diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index ad72a03ce5e9..e66e9b632617 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -259,6 +259,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, struct sk_buff *skb; size_t len; + if (!netlink_has_listeners(uevent_sock, 1)) + continue; + /* allocate message with the maximum possible size */ len = strlen(action_string) + strlen(devpath) + 2; skb = alloc_skb(len + env->buflen, GFP_KERNEL); diff --git a/lib/kref.c b/lib/kref.c deleted file mode 100644 index 3efb882b11db..000000000000 --- a/lib/kref.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * kref.c - library routines for handling generic reference counted objects - * - * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> - * Copyright (C) 2004 IBM Corp. - * - * based on lib/kobject.c which was: - * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> - * - * This file is released under the GPLv2. - * - */ - -#include <linux/kref.h> -#include <linux/module.h> -#include <linux/slab.h> - -/** - * kref_init - initialize object. - * @kref: object in question. - */ -void kref_init(struct kref *kref) -{ - atomic_set(&kref->refcount, 1); - smp_mb(); -} - -/** - * kref_get - increment refcount for object. - * @kref: object. - */ -void kref_get(struct kref *kref) -{ - WARN_ON(!atomic_read(&kref->refcount)); - atomic_inc(&kref->refcount); - smp_mb__after_atomic_inc(); -} - -/** - * kref_put - decrement refcount for object. - * @kref: object. - * @release: pointer to the function that will clean up the object when the - * last reference to the object is released. - * This pointer is required, and it is not acceptable to pass kfree - * in as this function. - * - * Decrement the refcount, and if 0, call release(). - * Return 1 if the object was removed, otherwise return 0. Beware, if this - * function returns 0, you still can not count on the kref from remaining in - * memory. Only use the return value if you want to see if the kref is now - * gone, not present. - */ -int kref_put(struct kref *kref, void (*release)(struct kref *kref)) -{ - WARN_ON(release == NULL); - WARN_ON(release == (void (*)(struct kref *))kfree); - - if (atomic_dec_and_test(&kref->refcount)) { - release(kref); - return 1; - } - return 0; -} - - -/** - * kref_sub - subtract a number of refcounts for object. - * @kref: object. - * @count: Number of recounts to subtract. - * @release: pointer to the function that will clean up the object when the - * last reference to the object is released. - * This pointer is required, and it is not acceptable to pass kfree - * in as this function. - * - * Subtract @count from the refcount, and if 0, call release(). - * Return 1 if the object was removed, otherwise return 0. Beware, if this - * function returns 0, you still can not count on the kref from remaining in - * memory. Only use the return value if you want to see if the kref is now - * gone, not present. - */ -int kref_sub(struct kref *kref, unsigned int count, - void (*release)(struct kref *kref)) -{ - WARN_ON(release == NULL); - WARN_ON(release == (void (*)(struct kref *))kfree); - - if (atomic_sub_and_test((int) count, &kref->refcount)) { - release(kref); - return 1; - } - return 0; -} - -EXPORT_SYMBOL(kref_init); -EXPORT_SYMBOL(kref_get); -EXPORT_SYMBOL(kref_put); -EXPORT_SYMBOL(kref_sub); diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c index 6a3bd48fa2a0..75510e94f7d0 100644 --- a/lib/reciprocal_div.c +++ b/lib/reciprocal_div.c @@ -1,5 +1,6 @@ #include <asm/div64.h> #include <linux/reciprocal_div.h> +#include <linux/export.h> u32 reciprocal_value(u32 k) { @@ -7,3 +8,4 @@ u32 reciprocal_value(u32 k) do_div(val, k); return (u32)val; } +EXPORT_SYMBOL(reciprocal_value); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 993599e66e5a..8e75003d62f6 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -777,6 +777,18 @@ char *uuid_string(char *buf, char *end, const u8 *addr, return string(buf, end, uuid, spec); } +static +char *netdev_feature_string(char *buf, char *end, const u8 *addr, + struct printf_spec spec) +{ + spec.flags |= SPECIAL | SMALL | ZEROPAD; + if (spec.field_width == -1) + spec.field_width = 2 + 2 * sizeof(netdev_features_t); + spec.base = 16; + + return number(buf, end, *(const netdev_features_t *)addr, spec); +} + int kptr_restrict __read_mostly; /* @@ -824,6 +836,7 @@ int kptr_restrict __read_mostly; * Do not use this feature without some mechanism to verify the * correctness of the format string and va_list arguments. * - 'K' For a kernel pointer that should be hidden from unprivileged users + * - 'NF' For a netdev_features_t * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a @@ -896,6 +909,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, has_capability_noaudit(current, CAP_SYSLOG)))) ptr = NULL; break; + case 'N': + switch (fmt[1]) { + case 'F': + return netdev_feature_string(buf, end, ptr, spec); + } + break; } spec.flags |= SMALL; if (spec.field_width == -1) { |