diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2012-11-25 03:24:32 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-11-26 12:23:53 +0000 |
commit | 3e99675af1b25a191c467700499b1cbe5585a778 (patch) | |
tree | 516c40a67498aaee1246eeb9e7d2efec335176aa /arch | |
parent | ce7b175656a1903605f0184bf33acebff70bfe7f (diff) |
ARM: 7582/2: rename kvm_seq to vmalloc_seq so to avoid confusion with KVM
The kvm_seq value has nothing to do what so ever with this other KVM.
Given that KVM support on ARM is imminent, it's best to rename kvm_seq
into something else to clearly identify what it is about i.e. a sequence
number for vmalloc section mappings.
Signed-off-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 6 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 16 |
4 files changed, 14 insertions, 14 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 5b53b53ab5cf..9f77e7804f3b 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -7,7 +7,7 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID u64 id; #endif - unsigned int kvm_seq; + unsigned int vmalloc_seq; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a64f61cb23d1..e1f644bc7cc5 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -20,7 +20,7 @@ #include <asm/proc-fns.h> #include <asm-generic/mm_hooks.h> -void __check_kvm_seq(struct mm_struct *mm); +void __check_vmalloc_seq(struct mm_struct *mm); #ifdef CONFIG_CPU_HAS_ASID @@ -34,8 +34,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); static inline void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { - if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) - __check_kvm_seq(mm); + if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) + __check_vmalloc_seq(mm); if (irqs_disabled()) /* diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 7a27d7363be2..bc4a5e9ebb78 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -186,8 +186,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) unsigned long flags; unsigned int cpu = smp_processor_id(); - if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) - __check_kvm_seq(mm); + if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) + __check_vmalloc_seq(mm); /* * Required during context switch to avoid speculative page table diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 5dcc2fd46c46..88fd86cf3d9a 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -47,18 +47,18 @@ int ioremap_page(unsigned long virt, unsigned long phys, } EXPORT_SYMBOL(ioremap_page); -void __check_kvm_seq(struct mm_struct *mm) +void __check_vmalloc_seq(struct mm_struct *mm) { unsigned int seq; do { - seq = init_mm.context.kvm_seq; + seq = init_mm.context.vmalloc_seq; memcpy(pgd_offset(mm, VMALLOC_START), pgd_offset_k(VMALLOC_START), sizeof(pgd_t) * (pgd_index(VMALLOC_END) - pgd_index(VMALLOC_START))); - mm->context.kvm_seq = seq; - } while (seq != init_mm.context.kvm_seq); + mm->context.vmalloc_seq = seq; + } while (seq != init_mm.context.vmalloc_seq); } #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) @@ -89,13 +89,13 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) if (!pmd_none(pmd)) { /* * Clear the PMD from the page table, and - * increment the kvm sequence so others + * increment the vmalloc sequence so others * notice this change. * * Note: this is still racy on SMP machines. */ pmd_clear(pmdp); - init_mm.context.kvm_seq++; + init_mm.context.vmalloc_seq++; /* * Free the page table, if there was one. @@ -112,8 +112,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) * Ensure that the active_mm is up to date - we want to * catch any use-after-iounmap cases. */ - if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) - __check_kvm_seq(current->active_mm); + if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq) + __check_vmalloc_seq(current->active_mm); flush_tlb_kernel_range(virt, end); } |