diff options
author | Rich Wiley <rwiley@nvidia.com> | 2014-05-05 18:10:10 -0700 |
---|---|---|
committer | Riham Haidar <rhaidar@nvidia.com> | 2014-05-16 12:02:43 -0700 |
commit | b728e1e51d85305e456807ffa2cad5dab0fe5d04 (patch) | |
tree | f0f8068df3fee7adf9de40b378445995e9d6dbfa /mm | |
parent | e744de8f3801efaf21838b4d8e30df415cd1adbe (diff) |
mm: vmalloc: skip unneeded TLB flushes during lazy vfree
currently, __purge_vmap_area_lazy will do a TLBI on the smallest
single range that includes all purgable vmap_areas. This patch
will do TLBIs per vmap_area, reducing the number of invalidated
pages.
Change-Id: I40b9c8c533e6814be076d454ef7ebdbe408c08e0
Signed-off-by: Rich Wiley <rwiley@nvidia.com>
Reviewed-on: http://git-master/r/407748
Reviewed-by: Alexander Van Brunt <avanbrunt@nvidia.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmalloc.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 666a6085ce83..35b19d43fb6e 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -636,10 +636,11 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, atomic_sub(nr, &vmap_lazy_nr); if (nr || force_flush) { - if (*end - *start > sysctl_lazy_vfree_tlb_flush_all_threshold) + if (nr > (sysctl_lazy_vfree_tlb_flush_all_threshold >> PAGE_SHIFT)) flush_tlb_all(); else - flush_tlb_kernel_range(*start, *end); + list_for_each_entry(va, &valist, purge_list) + flush_tlb_kernel_range(va->va_start, va->va_end); } if (nr) { |