summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c62
1 files changed, 23 insertions, 39 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e7ccbac25b72..62a08a7badc4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2758,63 +2758,47 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
update_mmu_cache(vma, address, pte);
}
-#define FAULT_AROUND_ORDER 4
+static unsigned long fault_around_bytes = 65536;
+
+static inline unsigned long fault_around_pages(void)
+{
+ return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE;
+}
+
+static inline unsigned long fault_around_mask(void)
+{
+ return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK;
+}
-#ifdef CONFIG_DEBUG_FS
-static unsigned int fault_around_order = FAULT_AROUND_ORDER;
-static int fault_around_order_get(void *data, u64 *val)
+#ifdef CONFIG_DEBUG_FS
+static int fault_around_bytes_get(void *data, u64 *val)
{
- *val = fault_around_order;
+ *val = fault_around_bytes;
return 0;
}
-static int fault_around_order_set(void *data, u64 val)
+static int fault_around_bytes_set(void *data, u64 val)
{
- BUILD_BUG_ON((1UL << FAULT_AROUND_ORDER) > PTRS_PER_PTE);
- if (1UL << val > PTRS_PER_PTE)
+ if (val / PAGE_SIZE > PTRS_PER_PTE)
return -EINVAL;
- fault_around_order = val;
+ fault_around_bytes = val;
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fault_around_order_fops,
- fault_around_order_get, fault_around_order_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
+ fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
static int __init fault_around_debugfs(void)
{
void *ret;
- ret = debugfs_create_file("fault_around_order", 0644, NULL, NULL,
- &fault_around_order_fops);
+ ret = debugfs_create_file("fault_around_bytes", 0644, NULL, NULL,
+ &fault_around_bytes_fops);
if (!ret)
- pr_warn("Failed to create fault_around_order in debugfs");
+ pr_warn("Failed to create fault_around_bytes in debugfs");
return 0;
}
late_initcall(fault_around_debugfs);
-
-static inline unsigned long fault_around_pages(void)
-{
- return 1UL << fault_around_order;
-}
-
-static inline unsigned long fault_around_mask(void)
-{
- return ~((1UL << (PAGE_SHIFT + fault_around_order)) - 1);
-}
-#else
-static inline unsigned long fault_around_pages(void)
-{
- unsigned long nr_pages;
-
- nr_pages = 1UL << FAULT_AROUND_ORDER;
- BUILD_BUG_ON(nr_pages > PTRS_PER_PTE);
- return nr_pages;
-}
-
-static inline unsigned long fault_around_mask(void)
-{
- return ~((1UL << (PAGE_SHIFT + FAULT_AROUND_ORDER)) - 1);
-}
#endif
static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
@@ -2871,7 +2855,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* if page by the offset is not ready to be mapped (cold cache or
* something).
*/
- if (vma->vm_ops->map_pages) {
+ if (vma->vm_ops->map_pages && fault_around_pages() > 1) {
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
do_fault_around(vma, address, pte, pgoff, flags);
if (!pte_same(*pte, orig_pte))