summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug41
-rw-r--r--lib/Kconfig.kgdb3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bcd.c14
-rw-r--r--lib/cmdline.c16
-rw-r--r--lib/cpumask.c9
-rw-r--r--lib/debugobjects.c4
-rw-r--r--lib/idr.c140
-rw-r--r--lib/inflate.c52
-rw-r--r--lib/kobject.c19
-rw-r--r--lib/kobject_uevent.c3
-rw-r--r--lib/list_debug.c50
-rw-r--r--lib/lzo/lzo1x_decompress.c6
-rw-r--r--lib/ratelimit.c55
-rw-r--r--lib/scatterlist.c176
-rw-r--r--lib/smp_processor_id.c6
-rw-r--r--lib/textsearch.c2
17 files changed, 396 insertions, 202 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index df27132a56f4..e1d4764435ed 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -74,6 +74,9 @@ config DEBUG_FS
debugging files into. Enable this option to be able to read and
write to these files.
+ For detailed documentation on the debugfs API, see
+ Documentation/DocBook/filesystems.
+
If unsure, say N.
config HEADERS_CHECK
@@ -147,7 +150,7 @@ config DETECT_SOFTLOCKUP
help
Say Y here to enable the kernel to detect "soft lockups",
which are bugs that cause the kernel to loop in kernel
- mode for more than 10 seconds, without giving other tasks a
+ mode for more than 60 seconds, without giving other tasks a
chance to run.
When a soft-lockup is detected, the kernel will print the
@@ -159,6 +162,30 @@ config DETECT_SOFTLOCKUP
can be detected via the NMI-watchdog, on platforms that
support it.)
+config BOOTPARAM_SOFTLOCKUP_PANIC
+ bool "Panic (Reboot) On Soft Lockups"
+ depends on DETECT_SOFTLOCKUP
+ help
+ Say Y here to enable the kernel to panic on "soft lockups",
+ which are bugs that cause the kernel to loop in kernel
+ mode for more than 60 seconds, without giving other tasks a
+ chance to run.
+
+ The panic can be used in combination with panic_timeout,
+ to cause the system to reboot automatically after a
+ lockup has been detected. This feature is useful for
+ high-availability systems that have uptime guarantees and
+ where a lockup must be resolved ASAP.
+
+ Say N if unsure.
+
+config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
+ int
+ depends on DETECT_SOFTLOCKUP
+ range 0 1
+ default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
+ default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
+
config SCHED_DEBUG
bool "Collect scheduler debugging info"
depends on DEBUG_KERNEL && PROC_FS
@@ -478,6 +505,18 @@ config DEBUG_WRITECOUNT
If unsure, say N.
+config DEBUG_MEMORY_INIT
+ bool "Debug memory initialisation" if EMBEDDED
+ default !EMBEDDED
+ help
+ Enable this for additional checks during memory initialisation.
+ The sanity checks verify aspects of the VM such as the memory model
+ and other information provided by the architecture. Verbose
+ information will be printed at KERN_DEBUG loglevel depending
+ on the mminit_loglevel= command-line option.
+
+ If unsure, say Y
+
config DEBUG_LIST
bool "Debug linked list manipulation"
depends on DEBUG_KERNEL
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index a5d4b1dac2a5..2cfd2721f7ed 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,7 +1,4 @@
-config HAVE_ARCH_KGDB_SHADOW_INFO
- bool
-
config HAVE_ARCH_KGDB
bool
diff --git a/lib/Makefile b/lib/Makefile
index 818c4d455518..9085ad6fa53d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -18,7 +18,7 @@ lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o klist.o
-obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
+obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
diff --git a/lib/bcd.c b/lib/bcd.c
new file mode 100644
index 000000000000..d74257fd0fe7
--- /dev/null
+++ b/lib/bcd.c
@@ -0,0 +1,14 @@
+#include <linux/bcd.h>
+#include <linux/module.h>
+
+unsigned bcd2bin(unsigned char val)
+{
+ return (val & 0x0f) + (val >> 4) * 10;
+}
+EXPORT_SYMBOL(bcd2bin);
+
+unsigned char bin2bcd(unsigned val)
+{
+ return ((val / 10) << 4) + val % 10;
+}
+EXPORT_SYMBOL(bin2bcd);
diff --git a/lib/cmdline.c b/lib/cmdline.c
index f596c08d213a..5ba8a942a478 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -116,7 +116,7 @@ char *get_options(const char *str, int nints, int *ints)
/**
* memparse - parse a string with mem suffixes into a number
* @ptr: Where parse begins
- * @retptr: (output) Pointer to next char after parse completes
+ * @retptr: (output) Optional pointer to next char after parse completes
*
* Parses a string into a number. The number stored at @ptr is
* potentially suffixed with %K (for kilobytes, or 1024 bytes),
@@ -126,11 +126,13 @@ char *get_options(const char *str, int nints, int *ints)
* megabyte, or one gigabyte, respectively.
*/
-unsigned long long memparse (char *ptr, char **retptr)
+unsigned long long memparse(char *ptr, char **retptr)
{
- unsigned long long ret = simple_strtoull (ptr, retptr, 0);
+ char *endptr; /* local pointer to end of parsed string */
- switch (**retptr) {
+ unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
+
+ switch (*endptr) {
case 'G':
case 'g':
ret <<= 10;
@@ -140,10 +142,14 @@ unsigned long long memparse (char *ptr, char **retptr)
case 'K':
case 'k':
ret <<= 10;
- (*retptr)++;
+ endptr++;
default:
break;
}
+
+ if (retptr)
+ *retptr = endptr;
+
return ret;
}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index bb4f76d3c3e7..5f97dc25ef9c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
}
EXPORT_SYMBOL(__next_cpu);
+#if NR_CPUS > 64
+int __next_cpu_nr(int n, const cpumask_t *srcp)
+{
+ return min_t(int, nr_cpu_ids,
+ find_next_bit(srcp->bits, nr_cpu_ids, n+1));
+}
+EXPORT_SYMBOL(__next_cpu_nr);
+#endif
+
int __any_online_cpu(const cpumask_t *mask)
{
int cpu;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 85b18d79be89..f86196390cfd 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -226,15 +226,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
static void debug_object_is_on_stack(void *addr, int onstack)
{
- void *stack = current->stack;
int is_on_stack;
static int limit;
if (limit > 4)
return;
- is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE));
-
+ is_on_stack = object_is_on_stack(addr);
if (is_on_stack == onstack)
return;
diff --git a/lib/idr.c b/lib/idr.c
index 7a02e173f027..3476f8203e97 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,6 +6,8 @@
* Modified by George Anzinger to reuse immediately and to use
* find bit instructions. Also removed _irq on spinlocks.
*
+ * Modified by Nadia Derbey to make it RCU safe.
+ *
* Small id to pointer translation service.
*
* It uses a radix tree like structure as a sparse array indexed
@@ -35,7 +37,7 @@
static struct kmem_cache *idr_layer_cache;
-static struct idr_layer *alloc_layer(struct idr *idp)
+static struct idr_layer *get_from_free_list(struct idr *idp)
{
struct idr_layer *p;
unsigned long flags;
@@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp)
return(p);
}
+static void idr_layer_rcu_free(struct rcu_head *head)
+{
+ struct idr_layer *layer;
+
+ layer = container_of(head, struct idr_layer, rcu_head);
+ kmem_cache_free(idr_layer_cache, layer);
+}
+
+static inline void free_layer(struct idr_layer *p)
+{
+ call_rcu(&p->rcu_head, idr_layer_rcu_free);
+}
+
/* only called when idp->lock is held */
-static void __free_layer(struct idr *idp, struct idr_layer *p)
+static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
{
p->ary[0] = idp->id_free;
idp->id_free = p;
idp->id_free_cnt++;
}
-static void free_layer(struct idr *idp, struct idr_layer *p)
+static void move_to_free_list(struct idr *idp, struct idr_layer *p)
{
unsigned long flags;
@@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
* Depends on the return element being zeroed.
*/
spin_lock_irqsave(&idp->lock, flags);
- __free_layer(idp, p);
+ __move_to_free_list(idp, p);
spin_unlock_irqrestore(&idp->lock, flags);
}
@@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
* @gfp_mask: memory allocation flags
*
* This function should be called prior to locking and calling the
- * following function. It preallocates enough memory to satisfy
+ * idr_get_new* functions. It preallocates enough memory to satisfy
* the worst possible allocation.
*
* If the system is REALLY out of memory this function returns 0,
@@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
if (new == NULL)
return (0);
- free_layer(idp, new);
+ move_to_free_list(idp, new);
}
return 1;
}
@@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
/* if already at the top layer, we need to grow */
if (!(p = pa[l])) {
*starting_id = id;
- return -2;
+ return IDR_NEED_TO_GROW;
}
/* If we need to go up one layer, continue the
@@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
id = ((id >> sh) ^ n ^ m) << sh;
}
if ((id >= MAX_ID_BIT) || (id < 0))
- return -3;
+ return IDR_NOMORE_SPACE;
if (l == 0)
break;
/*
* Create the layer below if it is missing.
*/
if (!p->ary[m]) {
- if (!(new = alloc_layer(idp)))
+ new = get_from_free_list(idp);
+ if (!new)
return -1;
- p->ary[m] = new;
+ rcu_assign_pointer(p->ary[m], new);
p->count++;
}
pa[l--] = p;
@@ -192,7 +208,7 @@ build_up:
p = idp->top;
layers = idp->layers;
if (unlikely(!p)) {
- if (!(p = alloc_layer(idp)))
+ if (!(p = get_from_free_list(idp)))
return -1;
layers = 1;
}
@@ -204,7 +220,7 @@ build_up:
layers++;
if (!p->count)
continue;
- if (!(new = alloc_layer(idp))) {
+ if (!(new = get_from_free_list(idp))) {
/*
* The allocation failed. If we built part of
* the structure tear it down.
@@ -214,7 +230,7 @@ build_up:
p = p->ary[0];
new->ary[0] = NULL;
new->bitmap = new->count = 0;
- __free_layer(idp, new);
+ __move_to_free_list(idp, new);
}
spin_unlock_irqrestore(&idp->lock, flags);
return -1;
@@ -225,10 +241,10 @@ build_up:
__set_bit(0, &new->bitmap);
p = new;
}
- idp->top = p;
+ rcu_assign_pointer(idp->top, p);
idp->layers = layers;
v = sub_alloc(idp, &id, pa);
- if (v == -2)
+ if (v == IDR_NEED_TO_GROW)
goto build_up;
return(v);
}
@@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
* Successfully found an empty slot. Install the user
* pointer and mark the slot full.
*/
- pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr;
+ rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
+ (struct idr_layer *)ptr);
pa[0]->count++;
idr_mark_full(pa, id);
}
@@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
* This is a cheap hack until the IDR code can be fixed to
* return proper error values.
*/
- if (rv < 0) {
- if (rv == -1)
- return -EAGAIN;
- else /* Will be -3 */
- return -ENOSPC;
- }
+ if (rv < 0)
+ return _idr_rc_to_errno(rv);
*id = rv;
return 0;
}
@@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id)
* This is a cheap hack until the IDR code can be fixed to
* return proper error values.
*/
- if (rv < 0) {
- if (rv == -1)
- return -EAGAIN;
- else /* Will be -3 */
- return -ENOSPC;
- }
+ if (rv < 0)
+ return _idr_rc_to_errno(rv);
*id = rv;
return 0;
}
@@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new);
static void idr_remove_warning(int id)
{
- printk("idr_remove called for id=%d which is not allocated.\n", id);
+ printk(KERN_WARNING
+ "idr_remove called for id=%d which is not allocated.\n", id);
dump_stack();
}
@@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
struct idr_layer *p = idp->top;
struct idr_layer **pa[MAX_LEVEL];
struct idr_layer ***paa = &pa[0];
+ struct idr_layer *to_free;
int n;
*paa = NULL;
@@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id)
n = id & IDR_MASK;
if (likely(p != NULL && test_bit(n, &p->bitmap))){
__clear_bit(n, &p->bitmap);
- p->ary[n] = NULL;
+ rcu_assign_pointer(p->ary[n], NULL);
+ to_free = NULL;
while(*paa && ! --((**paa)->count)){
- free_layer(idp, **paa);
+ if (to_free)
+ free_layer(to_free);
+ to_free = **paa;
**paa-- = NULL;
}
if (!*paa)
idp->layers = 0;
+ if (to_free)
+ free_layer(to_free);
} else
idr_remove_warning(id);
}
@@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id)
void idr_remove(struct idr *idp, int id)
{
struct idr_layer *p;
+ struct idr_layer *to_free;
/* Mask off upper bits we don't use for the search. */
id &= MAX_ID_MASK;
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
- idp->top->ary[0]) { // We can drop a layer
-
+ idp->top->ary[0]) {
+ /*
+ * Single child at leftmost slot: we can shrink the tree.
+ * This level is not needed anymore since when layers are
+ * inserted, they are inserted at the top of the existing
+ * tree.
+ */
+ to_free = idp->top;
p = idp->top->ary[0];
- idp->top->bitmap = idp->top->count = 0;
- free_layer(idp, idp->top);
- idp->top = p;
+ rcu_assign_pointer(idp->top, p);
--idp->layers;
+ to_free->bitmap = to_free->count = 0;
+ free_layer(to_free);
}
while (idp->id_free_cnt >= IDR_FREE_MAX) {
- p = alloc_layer(idp);
+ p = get_from_free_list(idp);
+ /*
+ * Note: we don't call the rcu callback here, since the only
+ * layers that fall into the freelist are those that have been
+ * preallocated.
+ */
kmem_cache_free(idr_layer_cache, p);
}
return;
@@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp)
id += 1 << n;
while (n < fls(id)) {
- if (p) {
- memset(p, 0, sizeof *p);
- free_layer(idp, p);
- }
+ if (p)
+ free_layer(p);
n += IDR_BITS;
p = *--paa;
}
}
- idp->top = NULL;
+ rcu_assign_pointer(idp->top, NULL);
idp->layers = 0;
}
EXPORT_SYMBOL(idr_remove_all);
@@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all);
void idr_destroy(struct idr *idp)
{
while (idp->id_free_cnt) {
- struct idr_layer *p = alloc_layer(idp);
+ struct idr_layer *p = get_from_free_list(idp);
kmem_cache_free(idr_layer_cache, p);
}
}
@@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy);
* return indicates that @id is not valid or you passed %NULL in
* idr_get_new().
*
- * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
+ * This function can be called under rcu_read_lock(), given that the leaf
+ * pointers lifetimes are correctly managed.
*/
void *idr_find(struct idr *idp, int id)
{
@@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id)
struct idr_layer *p;
n = idp->layers * IDR_BITS;
- p = idp->top;
+ p = rcu_dereference(idp->top);
/* Mask off upper bits we don't use for the search. */
id &= MAX_ID_MASK;
@@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id)
while (n > 0 && p) {
n -= IDR_BITS;
- p = p->ary[(id >> n) & IDR_MASK];
+ p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
}
return((void *)p);
}
@@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp,
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
- p = idp->top;
+ p = rcu_dereference(idp->top);
max = 1 << n;
id = 0;
@@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp,
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
- p = p->ary[(id >> n) & IDR_MASK];
+ p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
}
if (p) {
@@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each);
* A -ENOENT return indicates that @id was not found.
* A -EINVAL return indicates that @id was not within valid constraints.
*
- * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
+ * The caller must serialize with writers.
*/
void *idr_replace(struct idr *idp, void *ptr, int id)
{
@@ -574,7 +601,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
return ERR_PTR(-ENOENT);
old_p = p->ary[n];
- p->ary[n] = ptr;
+ rcu_assign_pointer(p->ary[n], ptr);
return old_p;
}
@@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
restart:
/* get vacant slot */
t = idr_get_empty_slot(&ida->idr, idr_id, pa);
- if (t < 0) {
- if (t == -1)
- return -EAGAIN;
- else /* will be -3 */
- return -ENOSPC;
- }
+ if (t < 0)
+ return _idr_rc_to_errno(t);
if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
return -ENOSPC;
@@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
return -EAGAIN;
memset(bitmap, 0, sizeof(struct ida_bitmap));
- pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap;
+ rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
+ (void *)bitmap);
pa[0]->count++;
}
@@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
* allocation.
*/
if (ida->idr.id_free_cnt || ida->free_bitmap) {
- struct idr_layer *p = alloc_layer(&ida->idr);
+ struct idr_layer *p = get_from_free_list(&ida->idr);
if (p)
kmem_cache_free(idr_layer_cache, p);
}
diff --git a/lib/inflate.c b/lib/inflate.c
index 9762294be062..1a8e8a978128 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = {
#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
#define DUMPBITS(n) {b>>=(n);k-=(n);}
+#ifndef NO_INFLATE_MALLOC
+/* A trivial malloc implementation, adapted from
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ */
+
+static unsigned long malloc_ptr;
+static int malloc_count;
+
+static void *malloc(int size)
+{
+ void *p;
+
+ if (size < 0)
+ error("Malloc error");
+ if (!malloc_ptr)
+ malloc_ptr = free_mem_ptr;
+
+ malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
+
+ p = (void *)malloc_ptr;
+ malloc_ptr += size;
+
+ if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
+ error("Out of memory");
+
+ malloc_count++;
+ return p;
+}
+
+static void free(void *where)
+{
+ malloc_count--;
+ if (!malloc_count)
+ malloc_ptr = free_mem_ptr;
+}
+#else
+#define malloc(a) kmalloc(a, GFP_KERNEL)
+#define free(a) kfree(a)
+#endif
/*
Huffman code decoding is performed using a multi-level table lookup.
@@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void)
int e; /* last block flag */
int r; /* result code */
unsigned h; /* maximum struct huft's malloc'ed */
- void *ptr;
/* initialize window, bit buffer */
wp = 0;
@@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void)
h = 0;
do {
hufts = 0;
- gzip_mark(&ptr);
- if ((r = inflate_block(&e)) != 0) {
- gzip_release(&ptr);
- return r;
- }
- gzip_release(&ptr);
+#ifdef ARCH_HAS_DECOMP_WDOG
+ arch_decomp_wdog();
+#endif
+ r = inflate_block(&e);
+ if (r)
+ return r;
if (hufts > h)
h = hufts;
} while (!e);
diff --git a/lib/kobject.c b/lib/kobject.c
index dcade0543bd2..bd732ffebc85 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -164,9 +164,8 @@ static int kobject_add_internal(struct kobject *kobj)
return -ENOENT;
if (!kobj->name || !kobj->name[0]) {
- pr_debug("kobject: (%p): attempted to be registered with empty "
+ WARN(1, "kobject: (%p): attempted to be registered with empty "
"name!\n", kobj);
- WARN_ON(1);
return -EINVAL;
}
@@ -216,13 +215,19 @@ static int kobject_add_internal(struct kobject *kobj)
static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs)
{
- /* Free the old name, if necessary. */
- kfree(kobj->name);
+ const char *old_name = kobj->name;
+ char *s;
kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
if (!kobj->name)
return -ENOMEM;
+ /* ewww... some of these buggers have '/' in the name ... */
+ s = strchr(kobj->name, '/');
+ if (s)
+ s[0] = '!';
+
+ kfree(old_name);
return 0;
}
@@ -577,12 +582,10 @@ static void kobject_release(struct kref *kref)
void kobject_put(struct kobject *kobj)
{
if (kobj) {
- if (!kobj->state_initialized) {
- printk(KERN_WARNING "kobject: '%s' (%p): is not "
+ if (!kobj->state_initialized)
+ WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
"initialized, yet kobject_put() is being "
"called.\n", kobject_name(kobj), kobj);
- WARN_ON(1);
- }
kref_put(&kobj->kref, kobject_release);
}
}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2fa545a63160..9f8d599459d1 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -245,7 +245,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (retval)
goto exit;
- call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC);
+ retval = call_usermodehelper(argv[0], argv,
+ env->envp, UMH_WAIT_EXEC);
}
exit:
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 4350ba9655bd..1a39f4e3ae1f 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,18 +20,14 @@ void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
- if (unlikely(next->prev != prev)) {
- printk(KERN_ERR "list_add corruption. next->prev should be "
- "prev (%p), but was %p. (next=%p).\n",
- prev, next->prev, next);
- BUG();
- }
- if (unlikely(prev->next != next)) {
- printk(KERN_ERR "list_add corruption. prev->next should be "
- "next (%p), but was %p. (prev=%p).\n",
- next, prev->next, prev);
- BUG();
- }
+ WARN(next->prev != prev,
+ "list_add corruption. next->prev should be "
+ "prev (%p), but was %p. (next=%p).\n",
+ prev, next->prev, next);
+ WARN(prev->next != next,
+ "list_add corruption. prev->next should be "
+ "next (%p), but was %p. (prev=%p).\n",
+ next, prev->next, prev);
next->prev = new;
new->next = next;
new->prev = prev;
@@ -40,20 +36,6 @@ void __list_add(struct list_head *new,
EXPORT_SYMBOL(__list_add);
/**
- * list_add - add a new entry
- * @new: new entry to be added
- * @head: list head to add it after
- *
- * Insert a new entry after the specified head.
- * This is good for implementing stacks.
- */
-void list_add(struct list_head *new, struct list_head *head)
-{
- __list_add(new, head, head->next);
-}
-EXPORT_SYMBOL(list_add);
-
-/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty on entry does not return true after this, the entry is
@@ -61,16 +43,12 @@ EXPORT_SYMBOL(list_add);
*/
void list_del(struct list_head *entry)
{
- if (unlikely(entry->prev->next != entry)) {
- printk(KERN_ERR "list_del corruption. prev->next should be %p, "
- "but was %p\n", entry, entry->prev->next);
- BUG();
- }
- if (unlikely(entry->next->prev != entry)) {
- printk(KERN_ERR "list_del corruption. next->prev should be %p, "
- "but was %p\n", entry, entry->next->prev);
- BUG();
- }
+ WARN(entry->prev->next != entry,
+ "list_del corruption. prev->next should be %p, "
+ "but was %p\n", entry, entry->prev->next);
+ WARN(entry->next->prev != entry,
+ "list_del corruption. next->prev should be %p, "
+ "but was %p\n", entry, entry->next->prev);
__list_del(entry->prev, entry->next);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
index 77f0f9b775a9..5dc6b29c1575 100644
--- a/lib/lzo/lzo1x_decompress.c
+++ b/lib/lzo/lzo1x_decompress.c
@@ -138,8 +138,7 @@ match:
t += 31 + *ip++;
}
m_pos = op - 1;
- m_pos -= le16_to_cpu(get_unaligned(
- (const unsigned short *)ip)) >> 2;
+ m_pos -= get_unaligned_le16(ip) >> 2;
ip += 2;
} else if (t >= 16) {
m_pos = op;
@@ -157,8 +156,7 @@ match:
}
t += 7 + *ip++;
}
- m_pos -= le16_to_cpu(get_unaligned(
- (const unsigned short *)ip)) >> 2;
+ m_pos -= get_unaligned_le16(ip) >> 2;
ip += 2;
if (m_pos == op)
goto eof_found;
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 485e3040dcd4..35136671b215 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -3,6 +3,9 @@
*
* Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
*
+ * 2008-05-01 rewrite the function and use a ratelimit_state data struct as
+ * parameter. Now every user can use their own standalone ratelimit_state.
+ *
* This file is released under the GPLv2.
*
*/
@@ -11,41 +14,43 @@
#include <linux/jiffies.h>
#include <linux/module.h>
+static DEFINE_SPINLOCK(ratelimit_lock);
+static unsigned long flags;
+
/*
* __ratelimit - rate limiting
- * @ratelimit_jiffies: minimum time in jiffies between two callbacks
- * @ratelimit_burst: number of callbacks we do before ratelimiting
+ * @rs: ratelimit_state data
*
- * This enforces a rate limit: not more than @ratelimit_burst callbacks
- * in every ratelimit_jiffies
+ * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
+ * in every @rs->ratelimit_jiffies
*/
-int __ratelimit(int ratelimit_jiffies, int ratelimit_burst)
+int __ratelimit(struct ratelimit_state *rs)
{
- static DEFINE_SPINLOCK(ratelimit_lock);
- static unsigned toks = 10 * 5 * HZ;
- static unsigned long last_msg;
- static int missed;
- unsigned long flags;
- unsigned long now = jiffies;
+ if (!rs->interval)
+ return 1;
spin_lock_irqsave(&ratelimit_lock, flags);
- toks += now - last_msg;
- last_msg = now;
- if (toks > (ratelimit_burst * ratelimit_jiffies))
- toks = ratelimit_burst * ratelimit_jiffies;
- if (toks >= ratelimit_jiffies) {
- int lost = missed;
+ if (!rs->begin)
+ rs->begin = jiffies;
- missed = 0;
- toks -= ratelimit_jiffies;
- spin_unlock_irqrestore(&ratelimit_lock, flags);
- if (lost)
- printk(KERN_WARNING "%s: %d messages suppressed\n",
- __func__, lost);
- return 1;
+ if (time_is_before_jiffies(rs->begin + rs->interval)) {
+ if (rs->missed)
+ printk(KERN_WARNING "%s: %d callbacks suppressed\n",
+ __func__, rs->missed);
+ rs->begin = 0;
+ rs->printed = 0;
+ rs->missed = 0;
}
- missed++;
+ if (rs->burst && rs->burst > rs->printed)
+ goto print;
+
+ rs->missed++;
spin_unlock_irqrestore(&ratelimit_lock, flags);
return 0;
+
+print:
+ rs->printed++;
+ spin_unlock_irqrestore(&ratelimit_lock, flags);
+ return 1;
}
EXPORT_SYMBOL(__ratelimit);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b80c21100d78..876ba6d5b670 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
EXPORT_SYMBOL(sg_alloc_table);
/**
+ * sg_miter_start - start mapping iteration over a sg list
+ * @miter: sg mapping iter to be started
+ * @sgl: sg list to iterate over
+ * @nents: number of sg entries
+ *
+ * Description:
+ * Starts mapping iterator @miter.
+ *
+ * Context:
+ * Don't care.
+ */
+void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
+ unsigned int nents, unsigned int flags)
+{
+ memset(miter, 0, sizeof(struct sg_mapping_iter));
+
+ miter->__sg = sgl;
+ miter->__nents = nents;
+ miter->__offset = 0;
+ miter->__flags = flags;
+}
+EXPORT_SYMBOL(sg_miter_start);
+
+/**
+ * sg_miter_next - proceed mapping iterator to the next mapping
+ * @miter: sg mapping iter to proceed
+ *
+ * Description:
+ * Proceeds @miter@ to the next mapping. @miter@ should have been
+ * started using sg_miter_start(). On successful return,
+ * @miter@->page, @miter@->addr and @miter@->length point to the
+ * current mapping.
+ *
+ * Context:
+ * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
+ * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
+ *
+ * Returns:
+ * true if @miter contains the next mapping. false if end of sg
+ * list is reached.
+ */
+bool sg_miter_next(struct sg_mapping_iter *miter)
+{
+ unsigned int off, len;
+
+ /* check for end and drop resources from the last iteration */
+ if (!miter->__nents)
+ return false;
+
+ sg_miter_stop(miter);
+
+ /* get to the next sg if necessary. __offset is adjusted by stop */
+ if (miter->__offset == miter->__sg->length && --miter->__nents) {
+ miter->__sg = sg_next(miter->__sg);
+ miter->__offset = 0;
+ }
+
+ /* map the next page */
+ off = miter->__sg->offset + miter->__offset;
+ len = miter->__sg->length - miter->__offset;
+
+ miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
+ off &= ~PAGE_MASK;
+ miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
+ miter->consumed = miter->length;
+
+ if (miter->__flags & SG_MITER_ATOMIC)
+ miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
+ else
+ miter->addr = kmap(miter->page) + off;
+
+ return true;
+}
+EXPORT_SYMBOL(sg_miter_next);
+
+/**
+ * sg_miter_stop - stop mapping iteration
+ * @miter: sg mapping iter to be stopped
+ *
+ * Description:
+ * Stops mapping iterator @miter. @miter should have been started
+ * started using sg_miter_start(). A stopped iteration can be
+ * resumed by calling sg_miter_next() on it. This is useful when
+ * resources (kmap) need to be released during iteration.
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+void sg_miter_stop(struct sg_mapping_iter *miter)
+{
+ WARN_ON(miter->consumed > miter->length);
+
+ /* drop resources from the last iteration */
+ if (miter->addr) {
+ miter->__offset += miter->consumed;
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+ WARN_ON(!irqs_disabled());
+ kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
+ } else
+ kunmap(miter->addr);
+
+ miter->page = NULL;
+ miter->addr = NULL;
+ miter->length = 0;
+ miter->consumed = 0;
+ }
+}
+EXPORT_SYMBOL(sg_miter_stop);
+
+/**
* sg_copy_buffer - Copy data between a linear buffer and an SG list
* @sgl: The SG list
* @nents: Number of SG entries
@@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table);
static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, int to_buffer)
{
- struct scatterlist *sg;
- size_t buf_off = 0;
- int i;
-
- WARN_ON(!irqs_disabled());
-
- for_each_sg(sgl, sg, nents, i) {
- struct page *page;
- int n = 0;
- unsigned int sg_off = sg->offset;
- unsigned int sg_copy = sg->length;
-
- if (sg_copy > buflen)
- sg_copy = buflen;
- buflen -= sg_copy;
-
- while (sg_copy > 0) {
- unsigned int page_copy;
- void *p;
-
- page_copy = PAGE_SIZE - sg_off;
- if (page_copy > sg_copy)
- page_copy = sg_copy;
-
- page = nth_page(sg_page(sg), n);
- p = kmap_atomic(page, KM_BIO_SRC_IRQ);
-
- if (to_buffer)
- memcpy(buf + buf_off, p + sg_off, page_copy);
- else {
- memcpy(p + sg_off, buf + buf_off, page_copy);
- flush_kernel_dcache_page(page);
- }
-
- kunmap_atomic(p, KM_BIO_SRC_IRQ);
-
- buf_off += page_copy;
- sg_off += page_copy;
- if (sg_off == PAGE_SIZE) {
- sg_off = 0;
- n++;
- }
- sg_copy -= page_copy;
+ unsigned int offset = 0;
+ struct sg_mapping_iter miter;
+
+ sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
+
+ while (sg_miter_next(&miter) && offset < buflen) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+
+ if (to_buffer)
+ memcpy(buf + offset, miter.addr, len);
+ else {
+ memcpy(miter.addr, buf + offset, len);
+ flush_kernel_dcache_page(miter.page);
}
- if (!buflen)
- break;
+ offset += len;
}
- return buf_off;
+ sg_miter_stop(&miter);
+
+ return offset;
}
/**
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 3b4dc098181e..c4381d9516f6 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -11,7 +11,7 @@ notrace unsigned int debug_smp_processor_id(void)
{
unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id();
- cpumask_t this_mask;
+ cpumask_of_cpu_ptr_declare(this_mask);
if (likely(preempt_count))
goto out;
@@ -23,9 +23,9 @@ notrace unsigned int debug_smp_processor_id(void)
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
- this_mask = cpumask_of_cpu(this_cpu);
+ cpumask_of_cpu_ptr_next(this_mask, this_cpu);
- if (cpus_equal(current->cpus_allowed, this_mask))
+ if (cpus_equal(current->cpus_allowed, *this_mask))
goto out;
/*
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 4b7c6075256f..9fbcb44c554f 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -267,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
return ERR_PTR(-EINVAL);
ops = lookup_ts_algo(algo);
-#ifdef CONFIG_KMOD
+#ifdef CONFIG_MODULES
/*
* Why not always autoload you may ask. Some users are
* in a situation where requesting a module may deadlock,