summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.kgdb16
-rw-r--r--lib/bitmap.c16
-rw-r--r--lib/devres.c2
-rw-r--r--lib/hexdump.c7
-rw-r--r--lib/kernel_lock.c120
-rw-r--r--lib/lmb.c45
-rw-r--r--lib/parser.c32
7 files changed, 146 insertions, 92 deletions
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index f2e01ac5ab09..a5d4b1dac2a5 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,4 +1,10 @@
+config HAVE_ARCH_KGDB_SHADOW_INFO
+ bool
+
+config HAVE_ARCH_KGDB
+ bool
+
menuconfig KGDB
bool "KGDB: kernel debugging with remote gdb"
select FRAME_POINTER
@@ -10,15 +16,10 @@ menuconfig KGDB
at http://kgdb.sourceforge.net as well as in DocBook form
in Documentation/DocBook/. If unsure, say N.
-config HAVE_ARCH_KGDB_SHADOW_INFO
- bool
-
-config HAVE_ARCH_KGDB
- bool
+if KGDB
config KGDB_SERIAL_CONSOLE
tristate "KGDB: use kgdb over the serial console"
- depends on KGDB
select CONSOLE_POLL
select MAGIC_SYSRQ
default y
@@ -28,7 +29,6 @@ config KGDB_SERIAL_CONSOLE
config KGDB_TESTS
bool "KGDB: internal test suite"
- depends on KGDB
default n
help
This is a kgdb I/O module specifically designed to test
@@ -56,3 +56,5 @@ config KGDB_TESTS_BOOT_STRING
boot. See the drivers/misc/kgdbts.c for detailed
information about other strings you could use beyond the
default of V1F100.
+
+endif # KGDB
diff --git a/lib/bitmap.c b/lib/bitmap.c
index c4cb48f77f0c..482df94ea21e 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,22 +316,6 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
EXPORT_SYMBOL(bitmap_scnprintf);
/**
- * bitmap_scnprintf_len - return buffer length needed to convert
- * bitmap to an ASCII hex string.
- * @len: number of bits to be converted
- */
-int bitmap_scnprintf_len(unsigned int len)
-{
- /* we need 9 chars per word for 32 bit words (8 hexdigits + sep/null) */
- int bitslen = ALIGN(len, CHUNKSZ);
- int wordlen = CHUNKSZ / 4;
- int buflen = (bitslen / wordlen) * (wordlen + 1) * sizeof(char);
-
- return buflen;
-}
-EXPORT_SYMBOL(bitmap_scnprintf_len);
-
-/**
* __bitmap_parse - convert an ASCII hex string into a bitmap.
* @buf: pointer to buffer containing string.
* @buflen: buffer size in bytes. If string is smaller than this
diff --git a/lib/devres.c b/lib/devres.c
index 26c87c49d776..72c8909006da 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -2,7 +2,7 @@
#include <linux/io.h>
#include <linux/module.h>
-static void devm_ioremap_release(struct device *dev, void *res)
+void devm_ioremap_release(struct device *dev, void *res)
{
iounmap(*(void __iomem **)res);
}
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 343546550dc9..f07c0db81d26 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -12,6 +12,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
+const char hex_asc[] = "0123456789abcdef";
+EXPORT_SYMBOL(hex_asc);
+
/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
@@ -93,8 +96,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen;
j++) {
ch = ptr[j];
- linebuf[lx++] = hex_asc(ch >> 4);
- linebuf[lx++] = hex_asc(ch & 0x0f);
+ linebuf[lx++] = hex_asc_hi(ch);
+ linebuf[lx++] = hex_asc_lo(ch);
linebuf[lx++] = ' ';
}
ascii_column = 3 * rowsize + 2;
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index cd3e82530b03..01a3c22c1b5a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -11,79 +11,121 @@
#include <linux/semaphore.h>
/*
- * The 'big kernel semaphore'
+ * The 'big kernel lock'
*
- * This mutex is taken and released recursively by lock_kernel()
+ * This spinlock is taken and released recursively by lock_kernel()
* and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
- * Note: code locked by this semaphore will only be serialized against
- * other code using the same locking facility. The code guarantees that
- * the task remains on the same CPU.
- *
* Don't use in new code.
*/
-static DECLARE_MUTEX(kernel_sem);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
+
/*
- * Re-acquire the kernel semaphore.
+ * Acquire/release the underlying lock from the scheduler.
*
- * This function is called with preemption off.
+ * This is called with preemption disabled, and should
+ * return an error value if it cannot get the lock and
+ * TIF_NEED_RESCHED gets set.
*
- * We are executing in schedule() so the code must be extremely careful
- * about recursion, both due to the down() and due to the enabling of
- * preemption. schedule() will re-check the preemption flag after
- * reacquiring the semaphore.
+ * If it successfully gets the lock, it should increment
+ * the preemption count like any spinlock does.
+ *
+ * (This works on UP too - _raw_spin_trylock will never
+ * return false in that case)
*/
int __lockfunc __reacquire_kernel_lock(void)
{
- struct task_struct *task = current;
- int saved_lock_depth = task->lock_depth;
-
- BUG_ON(saved_lock_depth < 0);
-
- task->lock_depth = -1;
- preempt_enable_no_resched();
-
- down(&kernel_sem);
-
+ while (!_raw_spin_trylock(&kernel_flag)) {
+ if (test_thread_flag(TIF_NEED_RESCHED))
+ return -EAGAIN;
+ cpu_relax();
+ }
preempt_disable();
- task->lock_depth = saved_lock_depth;
-
return 0;
}
void __lockfunc __release_kernel_lock(void)
{
- up(&kernel_sem);
+ _raw_spin_unlock(&kernel_flag);
+ preempt_enable_no_resched();
}
/*
- * Getting the big kernel semaphore.
+ * These are the BKL spinlocks - we try to be polite about preemption.
+ * If SMP is not on (ie UP preemption), this all goes away because the
+ * _raw_spin_trylock() will always succeed.
*/
-void __lockfunc lock_kernel(void)
+#ifdef CONFIG_PREEMPT
+static inline void __lock_kernel(void)
{
- struct task_struct *task = current;
- int depth = task->lock_depth + 1;
+ preempt_disable();
+ if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
+ /*
+ * If preemption was disabled even before this
+ * was called, there's nothing we can be polite
+ * about - just spin.
+ */
+ if (preempt_count() > 1) {
+ _raw_spin_lock(&kernel_flag);
+ return;
+ }
- if (likely(!depth))
/*
- * No recursion worries - we set up lock_depth _after_
+ * Otherwise, let's wait for the kernel lock
+ * with preemption enabled..
*/
- down(&kernel_sem);
+ do {
+ preempt_enable();
+ while (spin_is_locked(&kernel_flag))
+ cpu_relax();
+ preempt_disable();
+ } while (!_raw_spin_trylock(&kernel_flag));
+ }
+}
- task->lock_depth = depth;
+#else
+
+/*
+ * Non-preemption case - just get the spinlock
+ */
+static inline void __lock_kernel(void)
+{
+ _raw_spin_lock(&kernel_flag);
}
+#endif
-void __lockfunc unlock_kernel(void)
+static inline void __unlock_kernel(void)
{
- struct task_struct *task = current;
+ /*
+ * the BKL is not covered by lockdep, so we open-code the
+ * unlocking sequence (and thus avoid the dep-chain ops):
+ */
+ _raw_spin_unlock(&kernel_flag);
+ preempt_enable();
+}
- BUG_ON(task->lock_depth < 0);
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously, so we only need to
+ * worry about other CPU's.
+ */
+void __lockfunc lock_kernel(void)
+{
+ int depth = current->lock_depth+1;
+ if (likely(!depth))
+ __lock_kernel();
+ current->lock_depth = depth;
+}
- if (likely(--task->lock_depth < 0))
- up(&kernel_sem);
+void __lockfunc unlock_kernel(void)
+{
+ BUG_ON(current->lock_depth < 0);
+ if (likely(--current->lock_depth < 0))
+ __unlock_kernel();
}
EXPORT_SYMBOL(lock_kernel);
diff --git a/lib/lmb.c b/lib/lmb.c
index 83287d3869a3..867f7b5a8231 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -19,31 +19,42 @@
struct lmb lmb;
+static int lmb_debug;
+
+static int __init early_lmb(char *p)
+{
+ if (p && strstr(p, "debug"))
+ lmb_debug = 1;
+ return 0;
+}
+early_param("lmb", early_lmb);
+
void lmb_dump_all(void)
{
-#ifdef DEBUG
unsigned long i;
- pr_debug("lmb_dump_all:\n");
- pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
- pr_debug(" memory.size = 0x%llx\n",
+ if (!lmb_debug)
+ return;
+
+ pr_info("lmb_dump_all:\n");
+ pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
+ pr_info(" memory.size = 0x%llx\n",
(unsigned long long)lmb.memory.size);
for (i=0; i < lmb.memory.cnt ;i++) {
- pr_debug(" memory.region[0x%x].base = 0x%llx\n",
+ pr_info(" memory.region[0x%lx].base = 0x%llx\n",
i, (unsigned long long)lmb.memory.region[i].base);
- pr_debug(" .size = 0x%llx\n",
+ pr_info(" .size = 0x%llx\n",
(unsigned long long)lmb.memory.region[i].size);
}
- pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
- pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size);
+ pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
+ pr_info(" reserved.size = 0x%lx\n", lmb.reserved.size);
for (i=0; i < lmb.reserved.cnt ;i++) {
- pr_debug(" reserved.region[0x%x].base = 0x%llx\n",
+ pr_info(" reserved.region[0x%lx].base = 0x%llx\n",
i, (unsigned long long)lmb.reserved.region[i].base);
- pr_debug(" .size = 0x%llx\n",
+ pr_info(" .size = 0x%llx\n",
(unsigned long long)lmb.reserved.region[i].size);
}
-#endif /* DEBUG */
}
static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
@@ -286,8 +297,7 @@ static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
j = lmb_overlaps_region(&lmb.reserved, base, size);
if (j < 0) {
/* this area isn't reserved, take it */
- if (lmb_add_region(&lmb.reserved, base,
- lmb_align_up(size, align)) < 0)
+ if (lmb_add_region(&lmb.reserved, base, size) < 0)
base = ~(u64)0;
return base;
}
@@ -333,6 +343,10 @@ u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
struct lmb_region *mem = &lmb.memory;
int i;
+ BUG_ON(0 == size);
+
+ size = lmb_align_up(size, align);
+
for (i = 0; i < mem->cnt; i++) {
u64 ret = lmb_alloc_nid_region(&mem->region[i],
nid_range,
@@ -370,6 +384,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
BUG_ON(0 == size);
+ size = lmb_align_up(size, align);
+
/* On some platforms, make sure we allocate lowmem */
/* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
if (max_addr == LMB_ALLOC_ANYWHERE)
@@ -393,8 +409,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
j = lmb_overlaps_region(&lmb.reserved, base, size);
if (j < 0) {
/* this area isn't reserved, take it */
- if (lmb_add_region(&lmb.reserved, base,
- lmb_align_up(size, align)) < 0)
+ if (lmb_add_region(&lmb.reserved, base, size) < 0)
return 0;
return base;
}
diff --git a/lib/parser.c b/lib/parser.c
index 703c8c13b346..4f0cbc03e0e8 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -182,18 +182,25 @@ int match_hex(substring_t *s, int *result)
}
/**
- * match_strcpy: - copies the characters from a substring_t to a string
- * @to: string to copy characters to.
- * @s: &substring_t to copy
+ * match_strlcpy: - Copy the characters from a substring_t to a sized buffer
+ * @dest: where to copy to
+ * @src: &substring_t to copy
+ * @size: size of destination buffer
*
- * Description: Copies the set of characters represented by the given
- * &substring_t @s to the c-style string @to. Caller guarantees that @to is
- * large enough to hold the characters of @s.
+ * Description: Copy the characters in &substring_t @src to the
+ * c-style string @dest. Copy no more than @size - 1 characters, plus
+ * the terminating NUL. Return length of @src.
*/
-void match_strcpy(char *to, const substring_t *s)
+size_t match_strlcpy(char *dest, const substring_t *src, size_t size)
{
- memcpy(to, s->from, s->to - s->from);
- to[s->to - s->from] = '\0';
+ size_t ret = src->to - src->from;
+
+ if (size) {
+ size_t len = ret >= size ? size - 1 : ret;
+ memcpy(dest, src->from, len);
+ dest[len] = '\0';
+ }
+ return ret;
}
/**
@@ -206,9 +213,10 @@ void match_strcpy(char *to, const substring_t *s)
*/
char *match_strdup(const substring_t *s)
{
- char *p = kmalloc(s->to - s->from + 1, GFP_KERNEL);
+ size_t sz = s->to - s->from + 1;
+ char *p = kmalloc(sz, GFP_KERNEL);
if (p)
- match_strcpy(p, s);
+ match_strlcpy(p, s, sz);
return p;
}
@@ -216,5 +224,5 @@ EXPORT_SYMBOL(match_token);
EXPORT_SYMBOL(match_int);
EXPORT_SYMBOL(match_octal);
EXPORT_SYMBOL(match_hex);
-EXPORT_SYMBOL(match_strcpy);
+EXPORT_SYMBOL(match_strlcpy);
EXPORT_SYMBOL(match_strdup);