summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h25
-rw-r--r--arch/arm/include/asm/xen/page.h9
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/enlighten.c6
-rw-r--r--arch/arm/xen/mm32.c202
-rw-r--r--arch/arm/xen/p2m.c66
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/bitops.h2
-rw-r--r--block/blk-merge.c17
-rw-r--r--block/blk-mq.c91
-rw-r--r--block/blk-sysfs.c6
-rw-r--r--block/genhd.c24
-rw-r--r--block/partition-generic.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1
-rw-r--r--drivers/block/null_blk.c29
-rw-r--r--drivers/md/dm-cache-target.c4
-rw-r--r--fs/dcache.c3
-rw-r--r--fs/namei.c4
-rw-r--r--include/linux/hash.h4
-rw-r--r--include/linux/jiffies.h12
-rw-r--r--include/xen/interface/features.h3
-rw-r--r--kernel/time/alarmtimer.c34
-rw-r--r--kernel/time/time.c56
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/hweight.c4
-rw-r--r--lib/string.c4
26 files changed, 414 insertions, 200 deletions
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 1109017499e5..e8275ea88e88 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
}
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- if (__generic_dma_ops(hwdev)->unmap_page)
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-}
+ struct dma_attrs *attrs);
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-}
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (__generic_dma_ops(hwdev)->sync_single_for_device)
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-}
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index ded062f9b358..135c24a5ba26 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -33,7 +33,6 @@ typedef struct xpaddr {
#define INVALID_P2M_ENTRY (~0UL)
unsigned long __pfn_to_mfn(unsigned long pfn);
-unsigned long __mfn_to_pfn(unsigned long mfn);
extern struct rb_root phys_to_mach;
static inline unsigned long pfn_to_mfn(unsigned long pfn)
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
- unsigned long pfn;
-
- if (phys_to_mach.rb_node != NULL) {
- pfn = __mfn_to_pfn(mfn);
- if (pfn != INVALID_P2M_ENTRY)
- return pfn;
- }
-
return mfn;
}
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 12969523414c..1f85bfe6b470 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
-obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
+obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 98544c5f86e9..0e15f011f9c8 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -260,6 +260,12 @@ static int __init xen_guest_init(void)
xen_domain_type = XEN_HVM_DOMAIN;
xen_setup_features();
+
+ if (!xen_feature(XENFEAT_grant_map_identity)) {
+ pr_warn("Please upgrade your Xen.\n"
+ "If your platform has any non-coherent DMA devices, they won't work properly.\n");
+ }
+
if (xen_feature(XENFEAT_dom0))
xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
else
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
new file mode 100644
index 000000000000..3b99860fd7ae
--- /dev/null
+++ b/arch/arm/xen/mm32.c
@@ -0,0 +1,202 @@
+#include <linux/cpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+#include <xen/features.h>
+
+static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
+static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
+
+static int alloc_xen_mm32_scratch_page(int cpu)
+{
+ struct page *page;
+ unsigned long virt;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
+ return 0;
+
+ page = alloc_page(GFP_KERNEL);
+ if (page == NULL) {
+ pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
+ return -ENOMEM;
+ }
+
+ virt = (unsigned long)__va(page_to_phys(page));
+ pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+ ptep = pte_offset_kernel(pmdp, virt);
+
+ per_cpu(xen_mm32_scratch_virt, cpu) = virt;
+ per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
+
+ return 0;
+}
+
+static int xen_mm32_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (long)hcpu;
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (alloc_xen_mm32_scratch_page(cpu))
+ return NOTIFY_BAD;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block xen_mm32_cpu_notifier = {
+ .notifier_call = xen_mm32_cpu_notify,
+};
+
+static void* xen_mm32_remap_page(dma_addr_t handle)
+{
+ unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
+ pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
+
+ *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
+ local_flush_tlb_kernel_page(virt);
+
+ return (void*)virt;
+}
+
+static void xen_mm32_unmap(void *vaddr)
+{
+ put_cpu_var(xen_mm32_scratch_virt);
+}
+
+
+/* functions called by SWIOTLB */
+
+static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ void (*op)(const void *, size_t, int))
+{
+ unsigned long pfn;
+ size_t left = size;
+
+ pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+
+ do {
+ size_t len = left;
+ void *vaddr;
+
+ if (!pfn_valid(pfn))
+ {
+ /* Cannot map the page, we don't know its physical address.
+ * Return and hope for the best */
+ if (!xen_feature(XENFEAT_grant_map_identity))
+ return;
+ vaddr = xen_mm32_remap_page(handle) + offset;
+ op(vaddr, len, dir);
+ xen_mm32_unmap(vaddr - offset);
+ } else {
+ struct page *page = pfn_to_page(pfn);
+
+ if (PageHighMem(page)) {
+ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
+ if (cache_is_vipt_nonaliasing()) {
+ vaddr = kmap_atomic(page);
+ op(vaddr + offset, len, dir);
+ kunmap_atomic(vaddr);
+ } else {
+ vaddr = kmap_high_get(page);
+ if (vaddr) {
+ op(vaddr + offset, len, dir);
+ kunmap_high(page);
+ }
+ }
+ } else {
+ vaddr = page_address(page) + offset;
+ op(vaddr, len, dir);
+ }
+ }
+
+ offset = 0;
+ pfn++;
+ left -= len;
+ } while (left);
+}
+
+static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ /* Cannot use __dma_page_dev_to_cpu because we don't have a
+ * struct page for handle */
+
+ if (dir != DMA_TO_DEVICE)
+ outer_inv_range(handle, handle + size);
+
+ dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
+}
+
+static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+
+ dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
+
+ if (dir == DMA_FROM_DEVICE) {
+ outer_inv_range(handle, handle + size);
+ } else {
+ outer_clean_range(handle, handle + size);
+ }
+}
+
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+
+{
+ if (!__generic_dma_ops(hwdev)->unmap_page)
+ return;
+ if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ return;
+
+ __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
+ return;
+ __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__generic_dma_ops(hwdev)->sync_single_for_device)
+ return;
+ __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
+}
+
+int __init xen_mm32_init(void)
+{
+ int cpu;
+
+ if (!xen_initial_domain())
+ return 0;
+
+ register_cpu_notifier(&xen_mm32_cpu_notifier);
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (alloc_xen_mm32_scratch_page(cpu)) {
+ put_online_cpus();
+ unregister_cpu_notifier(&xen_mm32_cpu_notifier);
+ return -ENOMEM;
+ }
+ }
+ put_online_cpus();
+
+ return 0;
+}
+arch_initcall(xen_mm32_init);
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 97baf4427817..054857776254 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -21,14 +21,12 @@ struct xen_p2m_entry {
unsigned long pfn;
unsigned long mfn;
unsigned long nr_pages;
- struct rb_node rbnode_mach;
struct rb_node rbnode_phys;
};
static rwlock_t p2m_lock;
struct rb_root phys_to_mach = RB_ROOT;
EXPORT_SYMBOL_GPL(phys_to_mach);
-static struct rb_root mach_to_phys = RB_ROOT;
static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
{
@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
parent = *link;
entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
- if (new->mfn == entry->mfn)
- goto err_out;
if (new->pfn == entry->pfn)
goto err_out;
@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn)
}
EXPORT_SYMBOL_GPL(__pfn_to_mfn);
-static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
-{
- struct rb_node **link = &mach_to_phys.rb_node;
- struct rb_node *parent = NULL;
- struct xen_p2m_entry *entry;
- int rc = 0;
-
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
-
- if (new->mfn == entry->mfn)
- goto err_out;
- if (new->pfn == entry->pfn)
- goto err_out;
-
- if (new->mfn < entry->mfn)
- link = &(*link)->rb_left;
- else
- link = &(*link)->rb_right;
- }
- rb_link_node(&new->rbnode_mach, parent, link);
- rb_insert_color(&new->rbnode_mach, &mach_to_phys);
- goto out;
-
-err_out:
- rc = -EINVAL;
- pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
- __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
-out:
- return rc;
-}
-
-unsigned long __mfn_to_pfn(unsigned long mfn)
-{
- struct rb_node *n = mach_to_phys.rb_node;
- struct xen_p2m_entry *entry;
- unsigned long irqflags;
-
- read_lock_irqsave(&p2m_lock, irqflags);
- while (n) {
- entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
- if (entry->mfn <= mfn &&
- entry->mfn + entry->nr_pages > mfn) {
- read_unlock_irqrestore(&p2m_lock, irqflags);
- return entry->pfn + (mfn - entry->mfn);
- }
- if (mfn < entry->mfn)
- n = n->rb_left;
- else
- n = n->rb_right;
- }
- read_unlock_irqrestore(&p2m_lock, irqflags);
-
- return INVALID_P2M_ENTRY;
-}
-EXPORT_SYMBOL_GPL(__mfn_to_pfn);
-
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
if (p2m_entry->pfn <= pfn &&
p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
- rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
write_unlock_irqrestore(&p2m_lock, irqflags);
kfree(p2m_entry);
@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
p2m_entry->mfn = mfn;
write_lock_irqsave(&p2m_lock, irqflags);
- if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
- (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
+ if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) {
write_unlock_irqrestore(&p2m_lock, irqflags);
return false;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 778178f4c7d1..36327438caf0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -23,6 +23,7 @@ config X86
def_bool y
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ select ARCH_HAS_FAST_MULTIPLIER
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select HAVE_AOUT if X86_32
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index afcd35d331de..cfe3b954d5e4 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -497,8 +497,6 @@ static __always_inline int fls64(__u64 x)
#include <asm-generic/bitops/sched.h>
-#define ARCH_HAS_FAST_MULTIPLIER 1
-
#include <asm/arch_hweight.h>
#include <asm-generic/bitops/const_hweight.h>
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 54535831f1e1..77881798f793 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -10,10 +10,11 @@
#include "blk.h"
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
- struct bio *bio)
+ struct bio *bio,
+ bool no_sg_merge)
{
struct bio_vec bv, bvprv = { NULL };
- int cluster, high, highprv = 1, no_sg_merge;
+ int cluster, high, highprv = 1;
unsigned int seg_size, nr_phys_segs;
struct bio *fbio, *bbio;
struct bvec_iter iter;
@@ -35,7 +36,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
cluster = blk_queue_cluster(q);
seg_size = 0;
nr_phys_segs = 0;
- no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
high = 0;
for_each_bio(bio) {
bio_for_each_segment(bv, bio, iter) {
@@ -88,18 +88,23 @@ new_segment:
void blk_recalc_rq_segments(struct request *rq)
{
- rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
+ bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
+ &rq->q->queue_flags);
+
+ rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
+ no_sg_merge);
}
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
- if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
+ if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
+ bio->bi_vcnt < queue_max_segments(q))
bio->bi_phys_segments = bio->bi_vcnt;
else {
struct bio *nxt = bio->bi_next;
bio->bi_next = NULL;
- bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+ bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
bio->bi_next = nxt;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4aac82615a46..383ea0cb1f0a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1321,6 +1321,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
continue;
set->ops->exit_request(set->driver_data, tags->rqs[i],
hctx_idx, i);
+ tags->rqs[i] = NULL;
}
}
@@ -1354,8 +1355,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
INIT_LIST_HEAD(&tags->page_list);
- tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
- GFP_KERNEL, set->numa_node);
+ tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ set->numa_node);
if (!tags->rqs) {
blk_mq_free_tags(tags);
return NULL;
@@ -1379,8 +1381,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
this_order--;
do {
- page = alloc_pages_node(set->numa_node, GFP_KERNEL,
- this_order);
+ page = alloc_pages_node(set->numa_node,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ this_order);
if (page)
break;
if (!this_order--)
@@ -1404,8 +1407,10 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
if (set->ops->init_request) {
if (set->ops->init_request(set->driver_data,
tags->rqs[i], hctx_idx, i,
- set->numa_node))
+ set->numa_node)) {
+ tags->rqs[i] = NULL;
goto fail;
+ }
}
p += rq_size;
@@ -1416,7 +1421,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
return tags;
fail:
- pr_warn("%s: failed to allocate requests\n", __func__);
blk_mq_free_rq_map(set, tags, hctx_idx);
return NULL;
}
@@ -1936,6 +1940,61 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
return NOTIFY_OK;
}
+static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+ int i;
+
+ for (i = 0; i < set->nr_hw_queues; i++) {
+ set->tags[i] = blk_mq_init_rq_map(set, i);
+ if (!set->tags[i])
+ goto out_unwind;
+ }
+
+ return 0;
+
+out_unwind:
+ while (--i >= 0)
+ blk_mq_free_rq_map(set, set->tags[i], i);
+
+ set->tags = NULL;
+ return -ENOMEM;
+}
+
+/*
+ * Allocate the request maps associated with this tag_set. Note that this
+ * may reduce the depth asked for, if memory is tight. set->queue_depth
+ * will be updated to reflect the allocated depth.
+ */
+static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+ unsigned int depth;
+ int err;
+
+ depth = set->queue_depth;
+ do {
+ err = __blk_mq_alloc_rq_maps(set);
+ if (!err)
+ break;
+
+ set->queue_depth >>= 1;
+ if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
+ err = -ENOMEM;
+ break;
+ }
+ } while (set->queue_depth);
+
+ if (!set->queue_depth || err) {
+ pr_err("blk-mq: failed to allocate request map\n");
+ return -ENOMEM;
+ }
+
+ if (depth != set->queue_depth)
+ pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
+ depth, set->queue_depth);
+
+ return 0;
+}
+
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
@@ -1944,8 +2003,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
- int i;
-
if (!set->nr_hw_queues)
return -EINVAL;
if (!set->queue_depth)
@@ -1966,23 +2023,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
if (!set->tags)
- goto out;
+ return -ENOMEM;
- for (i = 0; i < set->nr_hw_queues; i++) {
- set->tags[i] = blk_mq_init_rq_map(set, i);
- if (!set->tags[i])
- goto out_unwind;
- }
+ if (blk_mq_alloc_rq_maps(set))
+ goto enomem;
mutex_init(&set->tag_list_lock);
INIT_LIST_HEAD(&set->tag_list);
return 0;
-
-out_unwind:
- while (--i >= 0)
- blk_mq_free_rq_map(set, set->tags[i], i);
-out:
+enomem:
+ kfree(set->tags);
+ set->tags = NULL;
return -ENOMEM;
}
EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@ -1997,6 +2049,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
}
kfree(set->tags);
+ set->tags = NULL;
}
EXPORT_SYMBOL(blk_mq_free_tag_set);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 4db5abf96b9e..17f5c84ce7bf 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -554,8 +554,10 @@ int blk_register_queue(struct gendisk *disk)
* Initialization must be complete by now. Finish the initial
* bypass from queue allocation.
*/
- queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
- blk_queue_bypass_end(q);
+ if (!blk_queue_init_done(q)) {
+ queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+ blk_queue_bypass_end(q);
+ }
ret = blk_trace_init_sysfs(dev);
if (ret)
diff --git a/block/genhd.c b/block/genhd.c
index 791f41943132..09da5e4a8e03 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -28,10 +28,10 @@ struct kobject *block_depr;
/* for extended dynamic devt allocation, currently only one major is used */
#define NR_EXT_DEVT (1 << MINORBITS)
-/* For extended devt allocation. ext_devt_mutex prevents look up
+/* For extended devt allocation. ext_devt_lock prevents look up
* results from going away underneath its user.
*/
-static DEFINE_MUTEX(ext_devt_mutex);
+static DEFINE_SPINLOCK(ext_devt_lock);
static DEFINE_IDR(ext_devt_idr);
static struct device_type disk_type;
@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
}
/* allocate ext devt */
- mutex_lock(&ext_devt_mutex);
- idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
- mutex_unlock(&ext_devt_mutex);
+ idr_preload(GFP_KERNEL);
+
+ spin_lock(&ext_devt_lock);
+ idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
+ spin_unlock(&ext_devt_lock);
+
+ idr_preload_end();
if (idx < 0)
return idx == -ENOSPC ? -EBUSY : idx;
@@ -447,9 +451,9 @@ void blk_free_devt(dev_t devt)
return;
if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
- mutex_lock(&ext_devt_mutex);
+ spin_lock(&ext_devt_lock);
idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
- mutex_unlock(&ext_devt_mutex);
+ spin_unlock(&ext_devt_lock);
}
}
@@ -665,7 +669,6 @@ void del_gendisk(struct gendisk *disk)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
device_del(disk_to_dev(disk));
- blk_free_devt(disk_to_dev(disk)->devt);
}
EXPORT_SYMBOL(del_gendisk);
@@ -690,13 +693,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
} else {
struct hd_struct *part;
- mutex_lock(&ext_devt_mutex);
+ spin_lock(&ext_devt_lock);
part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
if (part && get_disk(part_to_disk(part))) {
*partno = part->partno;
disk = part_to_disk(part);
}
- mutex_unlock(&ext_devt_mutex);
+ spin_unlock(&ext_devt_lock);
}
return disk;
@@ -1098,6 +1101,7 @@ static void disk_release(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
+ blk_free_devt(dev->devt);
disk_release_events(disk);
kfree(disk->random);
disk_replace_part_tbl(disk, NULL);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 789cdea05893..0d9e5f97f0a8 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
static void part_release(struct device *dev)
{
struct hd_struct *p = dev_to_part(dev);
+ blk_free_devt(dev->devt);
free_part_stats(p);
free_part_info(p);
kfree(p);
@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno)
rcu_assign_pointer(ptbl->last_lookup, NULL);
kobject_put(part->holder_dir);
device_del(part_to_dev(part));
- blk_free_devt(part_devt(part));
hd_struct_put(part);
}
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index db1e9560d8a7..5c8e7fe07745 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3918,7 +3918,6 @@ skip_create_disk:
if (rv) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
- rv = -ENOMEM;
goto block_queue_alloc_init_error;
}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a3b042c4d448..00d469c7f9f7 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -462,17 +462,21 @@ static int null_add_dev(void)
struct gendisk *disk;
struct nullb *nullb;
sector_t size;
+ int rv;
nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
- if (!nullb)
+ if (!nullb) {
+ rv = -ENOMEM;
goto out;
+ }
spin_lock_init(&nullb->lock);
if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
submit_queues = nr_online_nodes;
- if (setup_queues(nullb))
+ rv = setup_queues(nullb);
+ if (rv)
goto out_free_nullb;
if (queue_mode == NULL_Q_MQ) {
@@ -484,22 +488,29 @@ static int null_add_dev(void)
nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
nullb->tag_set.driver_data = nullb;
- if (blk_mq_alloc_tag_set(&nullb->tag_set))
+ rv = blk_mq_alloc_tag_set(&nullb->tag_set);
+ if (rv)
goto out_cleanup_queues;
nullb->q = blk_mq_init_queue(&nullb->tag_set);
- if (!nullb->q)
+ if (!nullb->q) {
+ rv = -ENOMEM;
goto out_cleanup_tags;
+ }
} else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
- if (!nullb->q)
+ if (!nullb->q) {
+ rv = -ENOMEM;
goto out_cleanup_queues;
+ }
blk_queue_make_request(nullb->q, null_queue_bio);
init_driver_queues(nullb);
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
- if (!nullb->q)
+ if (!nullb->q) {
+ rv = -ENOMEM;
goto out_cleanup_queues;
+ }
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
init_driver_queues(nullb);
@@ -509,8 +520,10 @@ static int null_add_dev(void)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
disk = nullb->disk = alloc_disk_node(1, home_node);
- if (!disk)
+ if (!disk) {
+ rv = -ENOMEM;
goto out_cleanup_blk_queue;
+ }
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
@@ -544,7 +557,7 @@ out_cleanup_queues:
out_free_nullb:
kfree(nullb);
out:
- return -ENOMEM;
+ return rv;
}
static int __init null_init(void)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1af40ee209e2..7130505c2425 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -895,8 +895,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
struct cache *cache = mg->cache;
if (mg->writeback) {
- cell_defer(cache, mg->old_ocell, false);
clear_dirty(cache, mg->old_oblock, mg->cblock);
+ cell_defer(cache, mg->old_ocell, false);
cleanup_migration(mg);
return;
@@ -951,13 +951,13 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
}
} else {
+ clear_dirty(cache, mg->new_oblock, mg->cblock);
if (mg->requeue_holder)
cell_defer(cache, mg->new_ocell, true);
else {
bio_endio(mg->new_ocell->holder, 0);
cell_defer(cache, mg->new_ocell, false);
}
- clear_dirty(cache, mg->new_oblock, mg->cblock);
cleanup_migration(mg);
}
}
diff --git a/fs/dcache.c b/fs/dcache.c
index d30ce699ae4b..4023e77b800e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -106,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
unsigned int hash)
{
hash += (unsigned long) parent / L1_CACHE_BYTES;
- hash = hash + (hash >> d_hash_shift);
- return dentry_hashtable + (hash & d_hash_mask);
+ return dentry_hashtable + hash_32(hash, d_hash_shift);
}
/* Statistics gathering. */
diff --git a/fs/namei.c b/fs/namei.c
index a996bb48dfab..229235862e50 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -34,6 +34,7 @@
#include <linux/device_cgroup.h>
#include <linux/fs_struct.h>
#include <linux/posix_acl.h>
+#include <linux/hash.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -1634,8 +1635,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
static inline unsigned int fold_hash(unsigned long hash)
{
- hash += hash >> (8*sizeof(int));
- return hash;
+ return hash_64(hash, 32);
}
#else /* 32-bit case */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index bd1754c7ecef..d0494c399392 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -37,6 +37,9 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
{
u64 hash = val;
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+ hash = hash * GOLDEN_RATIO_PRIME_64;
+#else
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
u64 n = hash;
n <<= 18;
@@ -51,6 +54,7 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
hash += n;
n <<= 2;
hash += n;
+#endif
/* High bits are more random, so use them. */
return hash >> (64 - bits);
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 1f44466c1e9d..c367cbdf73ab 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -258,23 +258,11 @@ extern unsigned long preset_lpj;
#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
#endif
#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
-#define USEC_CONVERSION \
- ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-/*
- * USEC_ROUND is used in the timeval to jiffie conversion. See there
- * for more details. It is the scaled resolution rounding value. Note
- * that it is a 64-bit value. Since, when it is applied, we are already
- * in jiffies (albit scaled), it is nothing but the bits we will shift
- * off.
- */
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
/*
* The maximum jiffie value is (MAX_INT >> 1). Here we translate that
* into seconds. The 64-bit case will overflow if we are not careful,
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 131a6ccdba25..14334d0161d5 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -53,6 +53,9 @@
/* operation as Dom0 is supported */
#define XENFEAT_dom0 11
+/* Xen also maps grant references at pfn = mfn */
+#define XENFEAT_grant_map_identity 12
+
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 4aec4a457431..a7077d3ae52f 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
ktime_t now)
{
+ unsigned long flags;
struct k_itimer *ptr = container_of(alarm, struct k_itimer,
it.alarm.alarmtimer);
- if (posix_timer_event(ptr, 0) != 0)
- ptr->it_overrun++;
+ enum alarmtimer_restart result = ALARMTIMER_NORESTART;
+
+ spin_lock_irqsave(&ptr->it_lock, flags);
+ if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
+ if (posix_timer_event(ptr, 0) != 0)
+ ptr->it_overrun++;
+ }
/* Re-add periodic timers */
if (ptr->it.alarm.interval.tv64) {
ptr->it_overrun += alarm_forward(alarm, now,
ptr->it.alarm.interval);
- return ALARMTIMER_RESTART;
+ result = ALARMTIMER_RESTART;
}
- return ALARMTIMER_NORESTART;
+ spin_unlock_irqrestore(&ptr->it_lock, flags);
+
+ return result;
}
/**
@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
* @new_timer: k_itimer pointer
* @cur_setting: itimerspec data to fill
*
- * Copies the itimerspec data out from the k_itimer
+ * Copies out the current itimerspec data
*/
static void alarm_timer_get(struct k_itimer *timr,
struct itimerspec *cur_setting)
{
- memset(cur_setting, 0, sizeof(struct itimerspec));
+ ktime_t relative_expiry_time =
+ alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
+
+ if (ktime_to_ns(relative_expiry_time) > 0) {
+ cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
+ } else {
+ cur_setting->it_value.tv_sec = 0;
+ cur_setting->it_value.tv_nsec = 0;
+ }
- cur_setting->it_interval =
- ktime_to_timespec(timr->it.alarm.interval);
- cur_setting->it_value =
- ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
- return;
+ cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
}
/**
diff --git a/kernel/time/time.c b/kernel/time/time.c
index f0294ba14634..a9ae20fb0b11 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -559,17 +559,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
* that a remainder subtract here would not do the right thing as the
* resolution values don't fall on second boundries. I.e. the line:
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
+ * Note that due to the small error in the multiplier here, this
+ * rounding is incorrect for sufficiently large values of tv_nsec, but
+ * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
+ * OK.
*
* Rather, we just shift the bits off the right.
*
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
* value to a scaled second value.
*/
-unsigned long
-timespec_to_jiffies(const struct timespec *value)
+static unsigned long
+__timespec_to_jiffies(unsigned long sec, long nsec)
{
- unsigned long sec = value->tv_sec;
- long nsec = value->tv_nsec + TICK_NSEC - 1;
+ nsec = nsec + TICK_NSEC - 1;
if (sec >= MAX_SEC_IN_JIFFIES){
sec = MAX_SEC_IN_JIFFIES;
@@ -580,6 +583,13 @@ timespec_to_jiffies(const struct timespec *value)
(NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
}
+
+unsigned long
+timespec_to_jiffies(const struct timespec *value)
+{
+ return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
+}
+
EXPORT_SYMBOL(timespec_to_jiffies);
void
@@ -596,31 +606,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
}
EXPORT_SYMBOL(jiffies_to_timespec);
-/* Same for "timeval"
- *
- * Well, almost. The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
+/*
+ * We could use a similar algorithm to timespec_to_jiffies (with a
+ * different multiplier for usec instead of nsec). But this has a
+ * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
+ * usec value, since it's not necessarily integral.
+ *
+ * We could instead round in the intermediate scaled representation
+ * (i.e. in units of 1/2^(large scale) jiffies) but that's also
+ * perilous: the scaling introduces a small positive error, which
+ * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
+ * units to the intermediate before shifting) leads to accidental
+ * overflow and overestimates.
+ *
+ * At the cost of one additional multiplication by a constant, just
+ * use the timespec implementation.
*/
unsigned long
timeval_to_jiffies(const struct timeval *value)
{
- unsigned long sec = value->tv_sec;
- long usec = value->tv_usec;
-
- if (sec >= MAX_SEC_IN_JIFFIES){
- sec = MAX_SEC_IN_JIFFIES;
- usec = 0;
- }
- return (((u64)sec * SEC_CONVERSION) +
- (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
- (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+ return __timespec_to_jiffies(value->tv_sec,
+ value->tv_usec * NSEC_PER_USEC);
}
EXPORT_SYMBOL(timeval_to_jiffies);
diff --git a/lib/Kconfig b/lib/Kconfig
index a5ce0c7f6c30..54cf309a92a5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -51,6 +51,9 @@ config PERCPU_RWSEM
config ARCH_USE_CMPXCHG_LOCKREF
bool
+config ARCH_HAS_FAST_MULTIPLIER
+ bool
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/hweight.c b/lib/hweight.c
index b7d81ba143d1..9a5c1f221558 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -11,7 +11,7 @@
unsigned int __sw_hweight32(unsigned int w)
{
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
w = (w + (w >> 4)) & 0x0f0f0f0f;
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
return __sw_hweight32((unsigned int)(w >> 32)) +
__sw_hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
diff --git a/lib/string.c b/lib/string.c
index 992bf30af759..f3c6ff596414 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
return check_bytes8(start, value, bytes);
value64 = value;
-#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101;
-#elif defined(ARCH_HAS_FAST_MULTIPLIER)
+#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else