summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/context.c7
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c8
4 files changed, 12 insertions, 8 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 119bc52ab93e..4e07eec1270d 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
pid = task_pid_nr(thread->task) << ASID_BITS;
asm volatile(
" mrc p15, 0, %0, c13, c0, 1\n"
- " bfi %1, %0, #0, %2\n"
- " mcr p15, 0, %1, c13, c0, 1\n"
+ " and %0, %0, %2\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c13, c0, 1\n"
: "=r" (contextidr), "+r" (pid)
- : "I" (ASID_BITS));
+ : "I" (~ASID_MASK));
isb();
return NOTIFY_OK;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 051204fc4617..e59c4ab71bcb 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -489,7 +489,7 @@ static bool __in_atomic_pool(void *start, size_t size)
void *pool_start = pool->vaddr;
void *pool_end = pool->vaddr + pool->size;
- if (start < pool_start || start > pool_end)
+ if (start < pool_start || start >= pool_end)
return false;
if (end <= pool_end)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 6776160618ef..a8ee92da3544 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
/* permanent static mappings from iotable_init() */
#define VM_ARM_STATIC_MAPPING 0x40000000
+/* empty mapping */
+#define VM_ARM_EMPTY_MAPPING 0x20000000
+
/* mapping type (attributes) for permanent static mappings */
#define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4c2d0451e84a..c2fa21d0103e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
vm->addr = (void *)addr;
vm->size = SECTION_SIZE;
- vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = pmd_empty_section_gap;
vm_area_add_early(vm);
}
@@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
/* we're still single threaded hence no lock needed here */
for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
continue;
addr = (unsigned long)vm->addr;
if (addr < next)
@@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
* Check whether this memory bank would partially overlap
* the vmalloc area.
*/
- if (__va(bank->start + bank->size) > vmalloc_min ||
- __va(bank->start + bank->size) < __va(bank->start)) {
+ if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
+ __va(bank->start + bank->size - 1) <= __va(bank->start)) {
unsigned long newsize = vmalloc_min - __va(bank->start);
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
"to -%.8llx (vmalloc region overlap).\n",