summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-10-25 19:43:17 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-11-21 10:06:41 +0100
commit2ba06323db413dd0b8f501dfc32df5d55f17c520 (patch)
tree2d41f9a219e822fed3d453ef4f8e9cf47da39f03
parent75931800733c68082c530f6c5421928e4288ee9e (diff)
sparc64: Handle extremely large kernel TSB range flushes sanely.
[ Upstream commit 849c498766060a16aad5b0e0d03206726e7d2fa4 ] If the number of pages we are flushing is more than twice the number of entries in the TSB, just scan the TSB table for matches rather than probing each and every page in the range. Based upon a patch and report by James Clarke. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/sparc/mm/tsb.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 6725ed45580e..9cdeca0fa955 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -27,6 +27,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr)
return (tag == (vaddr >> 22));
}
+static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
+{
+ unsigned long idx;
+
+ for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
+ struct tsb *ent = &swapper_tsb[idx];
+ unsigned long match = idx << 13;
+
+ match |= (ent->tag << 22);
+ if (match >= start && match < end)
+ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+ }
+}
+
/* TSB flushes need only occur on the processor initiating the address
* space modification, not on each cpu the address space has run on.
* Only the TLB flush needs that treatment.
@@ -36,6 +50,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long v;
+ if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
+ return flush_tsb_kernel_range_scan(start, end);
+
for (v = start; v < end; v += PAGE_SIZE) {
unsigned long hash = tsb_hash(v, PAGE_SHIFT,
KERNEL_TSB_NENTRIES);