summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/dynamic_debug.c50
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/idr.c2
-rw-r--r--lib/percpu-refcount.c16
-rw-r--r--lib/rhashtable.c13
-rw-r--r--lib/vsprintf.c2
8 files changed, 48 insertions, 42 deletions
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 1e031f2c9aba..cd250a2e14cb 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -884,7 +884,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
* read it, you're overqualified for your current job.)
*
* In other words, @orig is mapped onto (surjectively) @dst,
- * using the the map { <n, m> | the n-th bit of @relmap is the
+ * using the map { <n, m> | the n-th bit of @relmap is the
* m-th set bit of @relmap }.
*
* Any set bits in @orig above bit number W, where W is the
@@ -932,7 +932,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
*
* Further lets say we use the following code, invoking
* bitmap_fold() then bitmap_onto, as suggested above to
- * avoid the possitility of an empty @dst result:
+ * avoid the possibility of an empty @dst result:
*
* unsigned long *tmp; // a temporary bitmap's bits
*
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 98f2d7e91a91..add80cc02dbe 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1149,7 +1149,7 @@ static void check_unmap(struct dma_debug_entry *ref)
static void check_for_stack(struct device *dev, void *addr)
{
if (object_is_on_stack(addr))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from"
+ err_printk(dev, NULL, "DMA-API: device driver maps memory from "
"stack [addr=%p]\n", addr);
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c9afbe2c445a..31fe79e31ab8 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -537,10 +537,9 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
return buf;
}
-int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
+void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
- int res;
struct va_format vaf;
char buf[PREFIX_SIZE];
@@ -552,21 +551,17 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- res = printk(KERN_DEBUG "%s%pV",
- dynamic_emit_prefix(descriptor, buf), &vaf);
+ printk(KERN_DEBUG "%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_pr_debug);
-int __dynamic_dev_dbg(struct _ddebug *descriptor,
+void __dynamic_dev_dbg(struct _ddebug *descriptor,
const struct device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int res;
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -577,30 +572,27 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (!dev) {
- res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
+ printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
} else {
char buf[PREFIX_SIZE];
- res = dev_printk_emit(7, dev, "%s%s %s: %pV",
- dynamic_emit_prefix(descriptor, buf),
- dev_driver_string(dev), dev_name(dev),
- &vaf);
+ dev_printk_emit(7, dev, "%s%s %s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev), dev_name(dev),
+ &vaf);
}
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_dev_dbg);
#ifdef CONFIG_NET
-int __dynamic_netdev_dbg(struct _ddebug *descriptor,
- const struct net_device *dev, const char *fmt, ...)
+void __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int res;
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -613,23 +605,21 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
if (dev && dev->dev.parent) {
char buf[PREFIX_SIZE];
- res = dev_printk_emit(7, dev->dev.parent,
- "%s%s %s %s%s: %pV",
- dynamic_emit_prefix(descriptor, buf),
- dev_driver_string(dev->dev.parent),
- dev_name(dev->dev.parent),
- netdev_name(dev), netdev_reg_state(dev),
- &vaf);
+ dev_printk_emit(7, dev->dev.parent,
+ "%s%s %s %s%s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev->dev.parent),
+ dev_name(dev->dev.parent),
+ netdev_name(dev), netdev_reg_state(dev),
+ &vaf);
} else if (dev) {
- res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
- netdev_reg_state(dev), &vaf);
+ printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
+ netdev_reg_state(dev), &vaf);
} else {
- res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
+ printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
}
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_netdev_dbg);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index bdb9a456bcbb..38d2db82228c 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -588,6 +588,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
if (!np_pool)
return NULL;
pdev = of_find_device_by_node(np_pool);
+ of_node_put(np_pool);
if (!pdev)
return NULL;
return dev_get_gen_pool(&pdev->dev);
diff --git a/lib/idr.c b/lib/idr.c
index 50be3fa9b657..e654aebd5f80 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -626,7 +626,7 @@ static void __idr_remove_all(struct idr *idp)
* idr_destroy().
*
* A typical clean-up sequence for objects stored in an idr tree will use
- * idr_for_each() to free all objects, if necessay, then idr_destroy() to
+ * idr_for_each() to free all objects, if necessary, then idr_destroy() to
* free up the id mappings and cached idr_layers.
*/
void idr_destroy(struct idr *idp)
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index fe5a3342e960..a89cf09a8268 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -184,3 +184,19 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
+
+/*
+ * XXX: Temporary kludge to work around SCSI blk-mq stall. Used only by
+ * block/blk-mq.c::blk_mq_freeze_queue(). Will be removed during v3.18
+ * devel cycle. Do not use anywhere else.
+ */
+void __percpu_ref_kill_expedited(struct percpu_ref *ref)
+{
+ WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
+ "percpu_ref_kill() called more than once on %pf!",
+ ref->release);
+
+ ref->pcpu_count_ptr |= PCPU_REF_DEAD;
+ synchronize_sched_expedited();
+ percpu_ref_kill_rcu(&ref->rcu);
+}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a2c78810ebc1..3d2b4733f6cb 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -23,7 +23,6 @@
#include <linux/hash.h>
#include <linux/random.h>
#include <linux/rhashtable.h>
-#include <linux/log2.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4UL
@@ -55,7 +54,7 @@ static u32 __hashfn(const struct rhashtable *ht, const void *key,
/**
* rhashtable_hashfn - compute hash for key of given length
- * @ht: hash table to compuate for
+ * @ht: hash table to compute for
* @key: pointer to key
* @len: length of key
*
@@ -86,7 +85,7 @@ static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
/**
* rhashtable_obj_hashfn - compute hash for hashed object
- * @ht: hash table to compuate for
+ * @ht: hash table to compute for
* @ptr: pointer to hashed object
*
* Computes the hash value using the hash function `hashfn` respectively
@@ -589,13 +588,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
* rhashtable_destroy - destroy hash table
* @ht: the hash table to destroy
*
- * Frees the bucket array.
+ * Frees the bucket array. This function is not rcu safe, therefore the caller
+ * has to make sure that no resizing may happen by unpublishing the hashtable
+ * and waiting for the quiescent cycle before releasing the bucket array.
*/
void rhashtable_destroy(const struct rhashtable *ht)
{
- const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
-
- bucket_table_free(tbl);
+ bucket_table_free(ht->tbl);
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6fe2c84eb055..ba3cd0a35640 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1937,7 +1937,7 @@ EXPORT_SYMBOL(sprintf);
* @args: Arguments for the format string
*
* The format follows C99 vsnprintf, except %n is ignored, and its argument
- * is skiped.
+ * is skipped.
*
* The return value is the number of words(32bits) which would be generated for
* the given input.