summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/tlb_hash64.c
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2011-03-23 02:33:54 -0400
committerLen Brown <len.brown@intel.com>2011-03-23 02:33:54 -0400
commit5c129a8600100a5d0f5fdbc1014c5dba1d307bc4 (patch)
tree9877a14b49cff43d0ba10c12f407ec551c77daa5 /arch/powerpc/mm/tlb_hash64.c
parent797b10a07069e153d41aedb4ae8e76660279e2ee (diff)
parent521cb40b0c44418a4fd36dc633f575813d59a43d (diff)
Merge commit 'v2.6.38' into release
Diffstat (limited to 'arch/powerpc/mm/tlb_hash64.c')
-rw-r--r--arch/powerpc/mm/tlb_hash64.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 1ec06576f619..c14d09f614f3 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
* neesd to be flushed. This function will either perform the flush
* immediately or will batch it up if the current CPU has an active
* batch on it.
- *
- * Must be called from within some kind of spinlock/non-preempt region...
*/
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge)
{
- struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+ struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
unsigned long vsid, vaddr;
unsigned int psize;
int ssize;
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
*/
if (!batch->active) {
flush_hash_page(vaddr, rpte, psize, ssize, 0);
+ put_cpu_var(ppc64_tlb_batch);
return;
}
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
batch->index = ++i;
if (i >= PPC64_TLB_BATCH_NR)
__flush_tlb_pending(batch);
+ put_cpu_var(ppc64_tlb_batch);
}
/*