diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.kgdb | 16 | ||||
-rw-r--r-- | lib/bitmap.c | 16 | ||||
-rw-r--r-- | lib/bitrev.c | 3 | ||||
-rw-r--r-- | lib/bug.c | 2 | ||||
-rw-r--r-- | lib/debugobjects.c | 15 | ||||
-rw-r--r-- | lib/devres.c | 2 | ||||
-rw-r--r-- | lib/div64.c | 10 | ||||
-rw-r--r-- | lib/hexdump.c | 7 | ||||
-rw-r--r-- | lib/kernel_lock.c | 120 | ||||
-rw-r--r-- | lib/lmb.c | 46 | ||||
-rw-r--r-- | lib/parser.c | 32 | ||||
-rw-r--r-- | lib/radix-tree.c | 122 | ||||
-rw-r--r-- | lib/ts_bm.c | 2 | ||||
-rw-r--r-- | lib/vsprintf.c | 128 |
14 files changed, 324 insertions, 197 deletions
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index f2e01ac5ab09..a5d4b1dac2a5 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -1,4 +1,10 @@ +config HAVE_ARCH_KGDB_SHADOW_INFO + bool + +config HAVE_ARCH_KGDB + bool + menuconfig KGDB bool "KGDB: kernel debugging with remote gdb" select FRAME_POINTER @@ -10,15 +16,10 @@ menuconfig KGDB at http://kgdb.sourceforge.net as well as in DocBook form in Documentation/DocBook/. If unsure, say N. -config HAVE_ARCH_KGDB_SHADOW_INFO - bool - -config HAVE_ARCH_KGDB - bool +if KGDB config KGDB_SERIAL_CONSOLE tristate "KGDB: use kgdb over the serial console" - depends on KGDB select CONSOLE_POLL select MAGIC_SYSRQ default y @@ -28,7 +29,6 @@ config KGDB_SERIAL_CONSOLE config KGDB_TESTS bool "KGDB: internal test suite" - depends on KGDB default n help This is a kgdb I/O module specifically designed to test @@ -56,3 +56,5 @@ config KGDB_TESTS_BOOT_STRING boot. See the drivers/misc/kgdbts.c for detailed information about other strings you could use beyond the default of V1F100. + +endif # KGDB diff --git a/lib/bitmap.c b/lib/bitmap.c index c4cb48f77f0c..482df94ea21e 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -316,22 +316,6 @@ int bitmap_scnprintf(char *buf, unsigned int buflen, EXPORT_SYMBOL(bitmap_scnprintf); /** - * bitmap_scnprintf_len - return buffer length needed to convert - * bitmap to an ASCII hex string. - * @len: number of bits to be converted - */ -int bitmap_scnprintf_len(unsigned int len) -{ - /* we need 9 chars per word for 32 bit words (8 hexdigits + sep/null) */ - int bitslen = ALIGN(len, CHUNKSZ); - int wordlen = CHUNKSZ / 4; - int buflen = (bitslen / wordlen) * (wordlen + 1) * sizeof(char); - - return buflen; -} -EXPORT_SYMBOL(bitmap_scnprintf_len); - -/** * __bitmap_parse - convert an ASCII hex string into a bitmap. * @buf: pointer to buffer containing string. * @buflen: buffer size in bytes. If string is smaller than this diff --git a/lib/bitrev.c b/lib/bitrev.c index 989aff73f881..3956203456d4 100644 --- a/lib/bitrev.c +++ b/lib/bitrev.c @@ -42,10 +42,11 @@ const u8 byte_rev_table[256] = { }; EXPORT_SYMBOL_GPL(byte_rev_table); -static __always_inline u16 bitrev16(u16 x) +u16 bitrev16(u16 x) { return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); } +EXPORT_SYMBOL(bitrev16); /** * bitrev32 - reverse the order of bits in a u32 value diff --git a/lib/bug.c b/lib/bug.c index 530f38f55787..bfeafd60ee9f 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -37,6 +37,7 @@ */ #include <linux/list.h> #include <linux/module.h> +#include <linux/kernel.h> #include <linux/bug.h> #include <linux/sched.h> @@ -149,6 +150,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) (void *)bugaddr); show_regs(regs); + add_taint(TAINT_WARN); return BUG_TRAP_TYPE_WARN; } diff --git a/lib/debugobjects.c b/lib/debugobjects.c index a76a5e122ae1..85b18d79be89 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -68,6 +68,7 @@ static int fill_pool(void) { gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; struct debug_obj *new; + unsigned long flags; if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) return obj_pool_free; @@ -81,10 +82,10 @@ static int fill_pool(void) if (!new) return obj_pool_free; - spin_lock(&pool_lock); + spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&new->node, &obj_pool); obj_pool_free++; - spin_unlock(&pool_lock); + spin_unlock_irqrestore(&pool_lock, flags); } return obj_pool_free; } @@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) } /* - * Allocate a new object. If the pool is empty and no refill possible, - * switch off the debugger. + * Allocate a new object. If the pool is empty, switch off the debugger. */ static struct debug_obj * alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) { struct debug_obj *obj = NULL; - int retry = 0; -repeat: spin_lock(&pool_lock); if (obj_pool.first) { obj = hlist_entry(obj_pool.first, typeof(*obj), node); @@ -141,9 +139,6 @@ repeat: } spin_unlock(&pool_lock); - if (fill_pool() && !obj && !retry++) - goto repeat; - return obj; } @@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) struct debug_obj *obj; unsigned long flags; + fill_pool(); + db = get_bucket((unsigned long) addr); spin_lock_irqsave(&db->lock, flags); diff --git a/lib/devres.c b/lib/devres.c index 26c87c49d776..72c8909006da 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -2,7 +2,7 @@ #include <linux/io.h> #include <linux/module.h> -static void devm_ioremap_release(struct device *dev, void *res) +void devm_ioremap_release(struct device *dev, void *res) { iounmap(*(void __iomem **)res); } diff --git a/lib/div64.c b/lib/div64.c index bb5bd0c0f030..a111eb8de9cf 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64); #endif #endif /* BITS_PER_LONG == 32 */ + +/* + * Iterative div/mod for use when dividend is not expected to be much + * bigger than divisor. + */ +u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) +{ + return __iter_div_u64_rem(dividend, divisor, remainder); +} +EXPORT_SYMBOL(iter_div_u64_rem); diff --git a/lib/hexdump.c b/lib/hexdump.c index 343546550dc9..f07c0db81d26 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -12,6 +12,9 @@ #include <linux/kernel.h> #include <linux/module.h> +const char hex_asc[] = "0123456789abcdef"; +EXPORT_SYMBOL(hex_asc); + /** * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory * @buf: data blob to dump @@ -93,8 +96,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; j++) { ch = ptr[j]; - linebuf[lx++] = hex_asc(ch >> 4); - linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = hex_asc_hi(ch); + linebuf[lx++] = hex_asc_lo(ch); linebuf[lx++] = ' '; } ascii_column = 3 * rowsize + 2; diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index cd3e82530b03..01a3c22c1b5a 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -11,79 +11,121 @@ #include <linux/semaphore.h> /* - * The 'big kernel semaphore' + * The 'big kernel lock' * - * This mutex is taken and released recursively by lock_kernel() + * This spinlock is taken and released recursively by lock_kernel() * and unlock_kernel(). It is transparently dropped and reacquired * over schedule(). It is used to protect legacy code that hasn't * been migrated to a proper locking design yet. * - * Note: code locked by this semaphore will only be serialized against - * other code using the same locking facility. The code guarantees that - * the task remains on the same CPU. - * * Don't use in new code. */ -static DECLARE_MUTEX(kernel_sem); +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); + /* - * Re-acquire the kernel semaphore. + * Acquire/release the underlying lock from the scheduler. * - * This function is called with preemption off. + * This is called with preemption disabled, and should + * return an error value if it cannot get the lock and + * TIF_NEED_RESCHED gets set. * - * We are executing in schedule() so the code must be extremely careful - * about recursion, both due to the down() and due to the enabling of - * preemption. schedule() will re-check the preemption flag after - * reacquiring the semaphore. + * If it successfully gets the lock, it should increment + * the preemption count like any spinlock does. + * + * (This works on UP too - _raw_spin_trylock will never + * return false in that case) */ int __lockfunc __reacquire_kernel_lock(void) { - struct task_struct *task = current; - int saved_lock_depth = task->lock_depth; - - BUG_ON(saved_lock_depth < 0); - - task->lock_depth = -1; - preempt_enable_no_resched(); - - down(&kernel_sem); - + while (!_raw_spin_trylock(&kernel_flag)) { + if (test_thread_flag(TIF_NEED_RESCHED)) + return -EAGAIN; + cpu_relax(); + } preempt_disable(); - task->lock_depth = saved_lock_depth; - return 0; } void __lockfunc __release_kernel_lock(void) { - up(&kernel_sem); + _raw_spin_unlock(&kernel_flag); + preempt_enable_no_resched(); } /* - * Getting the big kernel semaphore. + * These are the BKL spinlocks - we try to be polite about preemption. + * If SMP is not on (ie UP preemption), this all goes away because the + * _raw_spin_trylock() will always succeed. */ -void __lockfunc lock_kernel(void) +#ifdef CONFIG_PREEMPT +static inline void __lock_kernel(void) { - struct task_struct *task = current; - int depth = task->lock_depth + 1; + preempt_disable(); + if (unlikely(!_raw_spin_trylock(&kernel_flag))) { + /* + * If preemption was disabled even before this + * was called, there's nothing we can be polite + * about - just spin. + */ + if (preempt_count() > 1) { + _raw_spin_lock(&kernel_flag); + return; + } - if (likely(!depth)) /* - * No recursion worries - we set up lock_depth _after_ + * Otherwise, let's wait for the kernel lock + * with preemption enabled.. */ - down(&kernel_sem); + do { + preempt_enable(); + while (spin_is_locked(&kernel_flag)) + cpu_relax(); + preempt_disable(); + } while (!_raw_spin_trylock(&kernel_flag)); + } +} - task->lock_depth = depth; +#else + +/* + * Non-preemption case - just get the spinlock + */ +static inline void __lock_kernel(void) +{ + _raw_spin_lock(&kernel_flag); } +#endif -void __lockfunc unlock_kernel(void) +static inline void __unlock_kernel(void) { - struct task_struct *task = current; + /* + * the BKL is not covered by lockdep, so we open-code the + * unlocking sequence (and thus avoid the dep-chain ops): + */ + _raw_spin_unlock(&kernel_flag); + preempt_enable(); +} - BUG_ON(task->lock_depth < 0); +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, so we only need to + * worry about other CPU's. + */ +void __lockfunc lock_kernel(void) +{ + int depth = current->lock_depth+1; + if (likely(!depth)) + __lock_kernel(); + current->lock_depth = depth; +} - if (likely(--task->lock_depth < 0)) - up(&kernel_sem); +void __lockfunc unlock_kernel(void) +{ + BUG_ON(current->lock_depth < 0); + if (likely(--current->lock_depth < 0)) + __unlock_kernel(); } EXPORT_SYMBOL(lock_kernel); diff --git a/lib/lmb.c b/lib/lmb.c index 83287d3869a3..5d7b9286503e 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -19,31 +19,43 @@ struct lmb lmb; +static int lmb_debug; + +static int __init early_lmb(char *p) +{ + if (p && strstr(p, "debug")) + lmb_debug = 1; + return 0; +} +early_param("lmb", early_lmb); + void lmb_dump_all(void) { -#ifdef DEBUG unsigned long i; - pr_debug("lmb_dump_all:\n"); - pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt); - pr_debug(" memory.size = 0x%llx\n", + if (!lmb_debug) + return; + + pr_info("lmb_dump_all:\n"); + pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt); + pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size); for (i=0; i < lmb.memory.cnt ;i++) { - pr_debug(" memory.region[0x%x].base = 0x%llx\n", + pr_info(" memory.region[0x%lx].base = 0x%llx\n", i, (unsigned long long)lmb.memory.region[i].base); - pr_debug(" .size = 0x%llx\n", + pr_info(" .size = 0x%llx\n", (unsigned long long)lmb.memory.region[i].size); } - pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); - pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size); + pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); + pr_info(" reserved.size = 0x%llx\n", + (unsigned long long)lmb.memory.size); for (i=0; i < lmb.reserved.cnt ;i++) { - pr_debug(" reserved.region[0x%x].base = 0x%llx\n", + pr_info(" reserved.region[0x%lx].base = 0x%llx\n", i, (unsigned long long)lmb.reserved.region[i].base); - pr_debug(" .size = 0x%llx\n", + pr_info(" .size = 0x%llx\n", (unsigned long long)lmb.reserved.region[i].size); } -#endif /* DEBUG */ } static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, @@ -286,8 +298,7 @@ static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, j = lmb_overlaps_region(&lmb.reserved, base, size); if (j < 0) { /* this area isn't reserved, take it */ - if (lmb_add_region(&lmb.reserved, base, - lmb_align_up(size, align)) < 0) + if (lmb_add_region(&lmb.reserved, base, size) < 0) base = ~(u64)0; return base; } @@ -333,6 +344,10 @@ u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, struct lmb_region *mem = &lmb.memory; int i; + BUG_ON(0 == size); + + size = lmb_align_up(size, align); + for (i = 0; i < mem->cnt; i++) { u64 ret = lmb_alloc_nid_region(&mem->region[i], nid_range, @@ -370,6 +385,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) BUG_ON(0 == size); + size = lmb_align_up(size, align); + /* On some platforms, make sure we allocate lowmem */ /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ if (max_addr == LMB_ALLOC_ANYWHERE) @@ -393,8 +410,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) j = lmb_overlaps_region(&lmb.reserved, base, size); if (j < 0) { /* this area isn't reserved, take it */ - if (lmb_add_region(&lmb.reserved, base, - lmb_align_up(size, align)) < 0) + if (lmb_add_region(&lmb.reserved, base, size) < 0) return 0; return base; } diff --git a/lib/parser.c b/lib/parser.c index 703c8c13b346..4f0cbc03e0e8 100644 --- a/lib/parser.c +++ b/lib/parser.c @@ -182,18 +182,25 @@ int match_hex(substring_t *s, int *result) } /** - * match_strcpy: - copies the characters from a substring_t to a string - * @to: string to copy characters to. - * @s: &substring_t to copy + * match_strlcpy: - Copy the characters from a substring_t to a sized buffer + * @dest: where to copy to + * @src: &substring_t to copy + * @size: size of destination buffer * - * Description: Copies the set of characters represented by the given - * &substring_t @s to the c-style string @to. Caller guarantees that @to is - * large enough to hold the characters of @s. + * Description: Copy the characters in &substring_t @src to the + * c-style string @dest. Copy no more than @size - 1 characters, plus + * the terminating NUL. Return length of @src. */ -void match_strcpy(char *to, const substring_t *s) +size_t match_strlcpy(char *dest, const substring_t *src, size_t size) { - memcpy(to, s->from, s->to - s->from); - to[s->to - s->from] = '\0'; + size_t ret = src->to - src->from; + + if (size) { + size_t len = ret >= size ? size - 1 : ret; + memcpy(dest, src->from, len); + dest[len] = '\0'; + } + return ret; } /** @@ -206,9 +213,10 @@ void match_strcpy(char *to, const substring_t *s) */ char *match_strdup(const substring_t *s) { - char *p = kmalloc(s->to - s->from + 1, GFP_KERNEL); + size_t sz = s->to - s->from + 1; + char *p = kmalloc(sz, GFP_KERNEL); if (p) - match_strcpy(p, s); + match_strlcpy(p, s, sz); return p; } @@ -216,5 +224,5 @@ EXPORT_SYMBOL(match_token); EXPORT_SYMBOL(match_int); EXPORT_SYMBOL(match_octal); EXPORT_SYMBOL(match_hex); -EXPORT_SYMBOL(match_strcpy); +EXPORT_SYMBOL(match_strlcpy); EXPORT_SYMBOL(match_strdup); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index bd521716ab1a..56ec21a7f73d 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig - * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com> + * Copyright (C) 2005 SGI, Christoph Lameter * Copyright (C) 2006 Nick Piggin * * This program is free software; you can redistribute it and/or @@ -88,6 +88,57 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root) return root->gfp_mask & __GFP_BITS_MASK; } +static inline void tag_set(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + __set_bit(offset, node->tags[tag]); +} + +static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + __clear_bit(offset, node->tags[tag]); +} + +static inline int tag_get(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + return test_bit(offset, node->tags[tag]); +} + +static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) +{ + root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); +} + +static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) +{ + root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); +} + +static inline void root_tag_clear_all(struct radix_tree_root *root) +{ + root->gfp_mask &= __GFP_BITS_MASK; +} + +static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) +{ + return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); +} + +/* + * Returns 1 if any slot in the node has this tag set. + * Otherwise returns 0. + */ +static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) +{ + int idx; + for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { + if (node->tags[tag][idx]) + return 1; + } + return 0; +} /* * This assumes that the caller has performed appropriate preallocation, and * that the caller has pinned this thread of control to the current CPU. @@ -124,6 +175,17 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) { struct radix_tree_node *node = container_of(head, struct radix_tree_node, rcu_head); + + /* + * must only free zeroed nodes into the slab. radix_tree_shrink + * can leave us with a non-NULL entry in the first slot, so clear + * that here to make sure. + */ + tag_clear(node, 0, 0); + tag_clear(node, 1, 0); + node->slots[0] = NULL; + node->count = 0; + kmem_cache_free(radix_tree_node_cachep, node); } @@ -165,59 +227,6 @@ out: } EXPORT_SYMBOL(radix_tree_preload); -static inline void tag_set(struct radix_tree_node *node, unsigned int tag, - int offset) -{ - __set_bit(offset, node->tags[tag]); -} - -static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, - int offset) -{ - __clear_bit(offset, node->tags[tag]); -} - -static inline int tag_get(struct radix_tree_node *node, unsigned int tag, - int offset) -{ - return test_bit(offset, node->tags[tag]); -} - -static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) -{ - root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); -} - - -static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) -{ - root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); -} - -static inline void root_tag_clear_all(struct radix_tree_root *root) -{ - root->gfp_mask &= __GFP_BITS_MASK; -} - -static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) -{ - return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); -} - -/* - * Returns 1 if any slot in the node has this tag set. - * Otherwise returns 0. - */ -static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) -{ - int idx; - for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (node->tags[tag][idx]) - return 1; - } - return 0; -} - /* * Return the maximum key which can be store into a * radix tree with height HEIGHT. @@ -930,11 +939,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) newptr = radix_tree_ptr_to_indirect(newptr); root->rnode = newptr; root->height--; - /* must only free zeroed nodes into the slab */ - tag_clear(to_free, 0, 0); - tag_clear(to_free, 1, 0); - to_free->slots[0] = NULL; - to_free->count = 0; radix_tree_node_free(to_free); } } diff --git a/lib/ts_bm.c b/lib/ts_bm.c index d90822c378a4..4a7fce72898e 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -63,7 +63,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) struct ts_bm *bm = ts_config_priv(conf); unsigned int i, text_len, consumed = state->offset; const u8 *text; - int shift = bm->patlen, bs; + int shift = bm->patlen - 1, bs; for (;;) { text_len = conf->get_next_block(consumed, &text, conf, state); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 6021757a4496..1dc2d1d18fa8 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -22,6 +22,8 @@ #include <linux/string.h> #include <linux/ctype.h> #include <linux/kernel.h> +#include <linux/kallsyms.h> +#include <linux/uaccess.h> #include <asm/page.h> /* for PAGE_SIZE */ #include <asm/div64.h> @@ -482,6 +484,89 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int return buf; } +static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags) +{ + int len, i; + + if ((unsigned long)s < PAGE_SIZE) + s = "<NULL>"; + + len = strnlen(s, precision); + + if (!(flags & LEFT)) { + while (len < field_width--) { + if (buf < end) + *buf = ' '; + ++buf; + } + } + for (i = 0; i < len; ++i) { + if (buf < end) + *buf = *s; + ++buf; ++s; + } + while (len < field_width--) { + if (buf < end) + *buf = ' '; + ++buf; + } + return buf; +} + +static inline void *dereference_function_descriptor(void *ptr) +{ +#if defined(CONFIG_IA64) || defined(CONFIG_PPC64) + void *p; + if (!probe_kernel_address(ptr, p)) + ptr = p; +#endif + return ptr; +} + +static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) +{ + unsigned long value = (unsigned long) ptr; +#ifdef CONFIG_KALLSYMS + char sym[KSYM_SYMBOL_LEN]; + sprint_symbol(sym, value); + return string(buf, end, sym, field_width, precision, flags); +#else + field_width = 2*sizeof(void *); + flags |= SPECIAL | SMALL | ZEROPAD; + return number(buf, end, value, 16, field_width, precision, flags); +#endif +} + +/* + * Show a '%p' thing. A kernel extension is that the '%p' is followed + * by an extra set of alphanumeric characters that are extended format + * specifiers. + * + * Right now we just handle 'F' (for symbolic Function descriptor pointers) + * and 'S' (for Symbolic direct pointers), but this can easily be + * extended in the future (network address types etc). + * + * The difference between 'S' and 'F' is that on ia64 and ppc64 function + * pointers are really function descriptors, which contain a pointer the + * real address. + */ +static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) +{ + switch (*fmt) { + case 'F': + ptr = dereference_function_descriptor(ptr); + /* Fallthrough */ + case 'S': + return symbol_string(buf, end, ptr, field_width, precision, flags); + } + flags |= SMALL; + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= ZEROPAD; + } + return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags); +} + /** * vsnprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into @@ -502,11 +587,9 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int */ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) { - int len; unsigned long long num; - int i, base; + int base; char *str, *end, c; - const char *s; int flags; /* flags to number() */ @@ -622,43 +705,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) continue; case 's': - s = va_arg(args, char *); - if ((unsigned long)s < PAGE_SIZE) - s = "<NULL>"; - - len = strnlen(s, precision); - - if (!(flags & LEFT)) { - while (len < field_width--) { - if (str < end) - *str = ' '; - ++str; - } - } - for (i = 0; i < len; ++i) { - if (str < end) - *str = *s; - ++str; ++s; - } - while (len < field_width--) { - if (str < end) - *str = ' '; - ++str; - } + str = string(str, end, va_arg(args, char *), field_width, precision, flags); continue; case 'p': - flags |= SMALL; - if (field_width == -1) { - field_width = 2*sizeof(void *); - flags |= ZEROPAD; - } - str = number(str, end, - (unsigned long) va_arg(args, void *), - 16, field_width, precision, flags); + str = pointer(fmt+1, str, end, + va_arg(args, void *), + field_width, precision, flags); + /* Skip all alphanumeric pointer suffixes */ + while (isalnum(fmt[1])) + fmt++; continue; - case 'n': /* FIXME: * What does C99 say about the overflow case here? */ |