From 1f7ef657740344541645349a8bece90cbff898f5 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 24 Feb 2021 12:01:42 -0800 Subject: mm/filemap: remove unused parameter and change to void type for replace_page_cache_page() Since commit 74d609585d8b ("page cache: Add and replace pages using the XArray") was merged, the replace_page_cache_page() can not fail and always return 0, we can remove the redundant return value and void it. Moreover remove the unused gfp_mask. Link: https://lkml.kernel.org/r/609c30e5274ba15d8b90c872fd0d8ac437a9b2bb.1610071401.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Matthew Wilcox Cc: Miklos Szeredi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagemap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index d5570deff400..74e466e5a2ba 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -757,7 +757,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern void delete_from_page_cache(struct page *page); extern void __delete_from_page_cache(struct page *page, void *shadow); -int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); +void replace_page_cache_page(struct page *old, struct page *new); void delete_from_page_cache_batch(struct address_space *mapping, struct pagevec *pvec); -- cgit v1.2.3 From 4805462598113f350838d612d0895db2dbb3992b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 24 Feb 2021 12:02:02 -0800 Subject: mm/filemap: pass a sleep state to put_and_wait_on_page_locked This is prep work for the next patch, but I think at least one of the current callers would prefer a killable sleep to an uninterruptible one. Link: https://lkml.kernel.org/r/20210122160140.223228-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Kent Overstreet Reviewed-by: Christoph Hellwig Cc: Miaohe Lin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagemap.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 74e466e5a2ba..bd629d676a27 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -681,8 +681,7 @@ static inline int wait_on_page_locked_killable(struct page *page) return wait_on_page_bit_killable(compound_head(page), PG_locked); } -extern void put_and_wait_on_page_locked(struct page *page); - +int put_and_wait_on_page_locked(struct page *page, int state); void wait_on_page_writeback(struct page *page); extern void end_page_writeback(struct page *page); void wait_for_stable_page(struct page *page); -- cgit v1.2.3 From 87fa0f3eb267eed966ee194907bc15376c1b758f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 24 Feb 2021 12:02:42 -0800 Subject: mm/filemap: rename generic_file_buffered_read to filemap_read Rename generic_file_buffered_read to match the naming of filemap_fault, also update the written parameter to a more descriptive name and improve the kerneldoc comment. Link: https://lkml.kernel.org/r/20210122160140.223228-18-willy@infradead.org Signed-off-by: Christoph Hellwig Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Kent Overstreet Cc: Miaohe Lin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/fs.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 418b772d6eca..ec8f3ddf4a6a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3080,8 +3080,8 @@ extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); extern int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count); extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); -extern ssize_t generic_file_buffered_read(struct kiocb *iocb, - struct iov_iter *to, ssize_t already_read); +ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to, + ssize_t already_read); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); -- cgit v1.2.3 From 2e9bd483159939ed2c0704b914294653c8341d25 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Wed, 24 Feb 2021 12:03:11 -0800 Subject: mm: memcg/slab: pre-allocate obj_cgroups for slab caches with SLAB_ACCOUNT In general it's unknown in advance if a slab page will contain accounted objects or not. In order to avoid memory waste, an obj_cgroup vector is allocated dynamically when a need to account of a new object arises. Such approach is memory efficient, but requires an expensive cmpxchg() to set up the memcg/objcgs pointer, because an allocation can race with a different allocation on another cpu. But in some common cases it's known for sure that a slab page will contain accounted objects: if the page belongs to a slab cache with a SLAB_ACCOUNT flag set. It includes such popular objects like vm_area_struct, anon_vma, task_struct, etc. In such cases we can pre-allocate the objcgs vector and simple assign it to the page without any atomic operations, because at this early stage the page is not visible to anyone else. A very simplistic benchmark (allocating 10000000 64-bytes objects in a row) shows ~15% win. In the real life it seems that most workloads are not very sensitive to the speed of (accounted) slab allocations. [guro@fb.com: open-code set_page_objcgs() and add some comments, by Johannes] Link: https://lkml.kernel.org/r/20201113001926.GA2934489@carbon.dhcp.thefacebook.com [akpm@linux-foundation.org: fix it for mm-slub-call-account_slab_page-after-slab-page-initialization-fix.patch] Link: https://lkml.kernel.org/r/20201110195753.530157-2-guro@fb.com Signed-off-by: Roman Gushchin Acked-by: Johannes Weiner Reviewed-by: Shakeel Butt Cc: Michal Hocko Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 19 ------------------- 1 file changed, 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index eeb0b52203e9..7a4dd1cb19fe 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -475,19 +475,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page) return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); } -/* - * set_page_objcgs - associate a page with a object cgroups vector - * @page: a pointer to the page struct - * @objcgs: a pointer to the object cgroups vector - * - * Atomically associates a page with a vector of object cgroups. - */ -static inline bool set_page_objcgs(struct page *page, - struct obj_cgroup **objcgs) -{ - return !cmpxchg(&page->memcg_data, 0, (unsigned long)objcgs | - MEMCG_DATA_OBJCGS); -} #else static inline struct obj_cgroup **page_objcgs(struct page *page) { @@ -498,12 +485,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page) { return NULL; } - -static inline bool set_page_objcgs(struct page *page, - struct obj_cgroup **objcgs) -{ - return true; -} #endif static __always_inline bool memcg_stat_item_in_bytes(int idx) -- cgit v1.2.3 From f3344adf38bdb3107d40483dd9501215ad40edce Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Wed, 24 Feb 2021 12:03:15 -0800 Subject: mm: memcontrol: optimize per-lruvec stats counter memory usage The vmstat threshold is 32 (MEMCG_CHARGE_BATCH), Actually the threshold can be as big as MEMCG_CHARGE_BATCH * PAGE_SIZE. It still fits into s32. So introduce struct batched_lruvec_stat to optimize memory usage. The size of struct lruvec_stat is 304 bytes on 64 bit systems. As it is a per-cpu structure. So with this patch, we can save 304 / 2 * ncpu bytes per-memcg per-node where ncpu is the number of the possible CPU. If there are c memory cgroup (include dying cgroup) and n NUMA node in the system. Finally, we can save (152 * ncpu * c * n) bytes. [akpm@linux-foundation.org: fix typo in comment] Link: https://lkml.kernel.org/r/20201210042121.39665-1-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Shakeel Butt Cc: Johannes Weiner Cc: Michal Hocko Cc: Vladimir Davydov Cc: Shakeel Butt Cc: Roman Gushchin Cc: Stephen Rothwell Cc: Chris Down Cc: Yafang Shao Cc: Wei Yang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 7a4dd1cb19fe..41bbf71edd9f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -92,6 +92,10 @@ struct lruvec_stat { long count[NR_VM_NODE_STAT_ITEMS]; }; +struct batched_lruvec_stat { + s32 count[NR_VM_NODE_STAT_ITEMS]; +}; + /* * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, * which have elements charged to this memcg. @@ -107,11 +111,17 @@ struct memcg_shrinker_map { struct mem_cgroup_per_node { struct lruvec lruvec; - /* Legacy local VM stats */ + /* + * Legacy local VM stats. This should be struct lruvec_stat and + * cannot be optimized to struct batched_lruvec_stat. Because + * the threshold of the lruvec_stat_cpu can be as big as + * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this + * filed has no upper limit. + */ struct lruvec_stat __percpu *lruvec_stat_local; /* Subtree VM stats (batched updates) */ - struct lruvec_stat __percpu *lruvec_stat_cpu; + struct batched_lruvec_stat __percpu *lruvec_stat_cpu; atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; -- cgit v1.2.3 From 69473e5de87389be6c0fa4a5d574a50c8f904fb3 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Wed, 24 Feb 2021 12:03:23 -0800 Subject: mm: memcontrol: convert NR_ANON_THPS account to pages Currently we use struct per_cpu_nodestat to cache the vmstat counters, which leads to inaccurate statistics especially THP vmstat counters. In the systems with hundreds of processors it can be GBs of memory. For example, for a 96 CPUs system, the threshold is the maximum number of 125. And the per cpu counters can cache 23.4375 GB in total. The THP page is already a form of batched addition (it will add 512 worth of memory in one go) so skipping the batching seems like sensible. Although every THP stats update overflows the per-cpu counter, resorting to atomic global updates. But it can make the statistics more accuracy for the THP vmstat counters. So we convert the NR_ANON_THPS account to pages. This patch is consistent with 8f182270dfec ("mm/swap.c: flush lru pvecs on compound page arrival"). Doing this also can make the unit of vmstat counters more unified. Finally, the unit of the vmstat counters are pages, kB and bytes. The B/KB suffix can tell us that the unit is bytes or kB. The rest which is without suffix are pages. Link: https://lkml.kernel.org/r/20201228164110.2838-3-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Greg Kroah-Hartman Cc: Rafael. J. Wysocki Cc: Alexey Dobriyan Cc: Johannes Weiner Cc: Vladimir Davydov Cc: Hugh Dickins Cc: Shakeel Butt Cc: Roman Gushchin Cc: Sami Tolvanen Cc: Feng Tang Cc: NeilBrown Cc: Joonsoo Kim Cc: Randy Dunlap Cc: Michal Hocko Cc: Pankaj Gupta Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b593316bff3d..67d50ef5dd20 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -209,6 +209,19 @@ enum node_stat_item { NR_VM_NODE_STAT_ITEMS }; +/* + * Returns true if the item should be printed in THPs (/proc/vmstat + * currently prints number of anon, file and shmem THPs. But the item + * is charged in pages). + */ +static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) +{ + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + return false; + + return item == NR_ANON_THPS; +} + /* * Returns true if the value is measured in bytes (most vmstat values are * measured in pages). This defines the API part, the internal representation -- cgit v1.2.3 From bf9ecead53c89d3d2cf60acbc460174ebbcf0027 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Wed, 24 Feb 2021 12:03:27 -0800 Subject: mm: memcontrol: convert NR_FILE_THPS account to pages Currently we use struct per_cpu_nodestat to cache the vmstat counters, which leads to inaccurate statistics especially THP vmstat counters. In the systems with if hundreds of processors it can be GBs of memory. For example, for a 96 CPUs system, the threshold is the maximum number of 125. And the per cpu counters can cache 23.4375 GB in total. The THP page is already a form of batched addition (it will add 512 worth of memory in one go) so skipping the batching seems like sensible. Although every THP stats update overflows the per-cpu counter, resorting to atomic global updates. But it can make the statistics more accuracy for the THP vmstat counters. So we convert the NR_FILE_THPS account to pages. This patch is consistent with 8f182270dfec ("mm/swap.c: flush lru pvecs on compound page arrival"). Doing this also can make the unit of vmstat counters more unified. Finally, the unit of the vmstat counters are pages, kB and bytes. The B/KB suffix can tell us that the unit is bytes or kB. The rest which is without suffix are pages. Link: https://lkml.kernel.org/r/20201228164110.2838-4-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Alexey Dobriyan Cc: Feng Tang Cc: Greg Kroah-Hartman Cc: Hugh Dickins Cc: Johannes Weiner Cc: Joonsoo Kim Cc: Michal Hocko Cc: NeilBrown Cc: Pankaj Gupta Cc: Rafael. J. Wysocki Cc: Randy Dunlap Cc: Roman Gushchin Cc: Sami Tolvanen Cc: Shakeel Butt Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 67d50ef5dd20..b751a9898bb6 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -219,7 +219,8 @@ static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return false; - return item == NR_ANON_THPS; + return item == NR_ANON_THPS || + item == NR_FILE_THPS; } /* -- cgit v1.2.3 From 57b2847d3c1dc154923578efb47a12302a57d700 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Wed, 24 Feb 2021 12:03:31 -0800 Subject: mm: memcontrol: convert NR_SHMEM_THPS account to pages Currently we use struct per_cpu_nodestat to cache the vmstat counters, which leads to inaccurate statistics especially THP vmstat counters. In the systems with hundreds of processors it can be GBs of memory. For example, for a 96 CPUs system, the threshold is the maximum number of 125. And the per cpu counters can cache 23.4375 GB in total. The THP page is already a form of batched addition (it will add 512 worth of memory in one go) so skipping the batching seems like sensible. Although every THP stats update overflows the per-cpu counter, resorting to atomic global updates. But it can make the statistics more accuracy for the THP vmstat counters. So we convert the NR_SHMEM_THPS account to pages. This patch is consistent with 8f182270dfec ("mm/swap.c: flush lru pvecs on compound page arrival"). Doing this also can make the unit of vmstat counters more unified. Finally, the unit of the vmstat counters are pages, kB and bytes. The B/KB suffix can tell us that the unit is bytes or kB. The rest which is without suffix are pages. Link: https://lkml.kernel.org/r/20201228164110.2838-5-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Alexey Dobriyan Cc: Feng Tang Cc: Greg Kroah-Hartman Cc: Hugh Dickins Cc: Johannes Weiner Cc: Joonsoo Kim Cc: Michal Hocko Cc: NeilBrown Cc: Pankaj Gupta Cc: Rafael. J. Wysocki Cc: Randy Dunlap Cc: Roman Gushchin Cc: Sami Tolvanen Cc: Shakeel Butt Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b751a9898bb6..788837f40b38 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -220,7 +220,8 @@ static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) return false; return item == NR_ANON_THPS || - item == NR_FILE_THPS; + item == NR_FILE_THPS || + item == NR_SHMEM_THPS; } /* -- cgit v1.2.3 From a1528e21f8915e16252cda1137fe29672c918361 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Wed, 24 Feb 2021 12:03:35 -0800 Subject: mm: memcontrol: convert NR_SHMEM_PMDMAPPED account to pages Currently we use struct per_cpu_nodestat to cache the vmstat counters, which leads to inaccurate statistics especially THP vmstat counters. In the systems with hundreds of processors it can be GBs of memory. For example, for a 96 CPUs system, the threshold is the maximum number of 125. And the per cpu counters can cache 23.4375 GB in total. The THP page is already a form of batched addition (it will add 512 worth of memory in one go) so skipping the batching seems like sensible. Although every THP stats update overflows the per-cpu counter, resorting to atomic global updates. But it can make the statistics more accuracy for the THP vmstat counters. So we convert the NR_SHMEM_PMDMAPPED account to pages. This patch is consistent with 8f182270dfec ("mm/swap.c: flush lru pvecs on compound page arrival"). Doing this also can make the unit of vmstat counters more unified. Finally, the unit of the vmstat counters are pages, kB and bytes. The B/KB suffix can tell us that the unit is bytes or kB. The rest which is without suffix are pages. Link: https://lkml.kernel.org/r/20201228164110.2838-6-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Alexey Dobriyan Cc: Feng Tang Cc: Greg Kroah-Hartman Cc: Hugh Dickins Cc: Johannes Weiner Cc: Joonsoo Kim Cc: Michal Hocko Cc: NeilBrown Cc: Pankaj Gupta Cc: Rafael. J. Wysocki Cc: Randy Dunlap Cc: Roman Gushchin Cc: Sami Tolvanen Cc: Shakeel Butt Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 788837f40b38..7bdbfeeb5c8c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -221,7 +221,8 @@ static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) return item == NR_ANON_THPS || item == NR_FILE_THPS || - item == NR_SHMEM_THPS; + item == NR_SHMEM_THPS || + item == NR_SHMEM_PMDMAPPED; } /* -- cgit v1.2.3 From 380780e71895ae301505ffcec8f954ab3666a4c7 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Wed, 24 Feb 2021 12:03:39 -0800 Subject: mm: memcontrol: convert NR_FILE_PMDMAPPED account to pages Currently we use struct per_cpu_nodestat to cache the vmstat counters, which leads to inaccurate statistics especially THP vmstat counters. In the systems with hundreds of processors it can be GBs of memory. For example, for a 96 CPUs system, the threshold is the maximum number of 125. And the per cpu counters can cache 23.4375 GB in total. The THP page is already a form of batched addition (it will add 512 worth of memory in one go) so skipping the batching seems like sensible. Although every THP stats update overflows the per-cpu counter, resorting to atomic global updates. But it can make the statistics more accuracy for the THP vmstat counters. So we convert the NR_FILE_PMDMAPPED account to pages. This patch is consistent with 8f182270dfec ("mm/swap.c: flush lru pvecs on compound page arrival"). Doing this also can make the unit of vmstat counters more unified. Finally, the unit of the vmstat counters are pages, kB and bytes. The B/KB suffix can tell us that the unit is bytes or kB. The rest which is without suffix are pages. Link: https://lkml.kernel.org/r/20201228164110.2838-7-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Alexey Dobriyan Cc: Feng Tang Cc: Greg Kroah-Hartman Cc: Hugh Dickins Cc: Johannes Weiner Cc: Joonsoo Kim Cc: Michal Hocko Cc: NeilBrown Cc: Pankaj Gupta Cc: Rafael. J. Wysocki Cc: Randy Dunlap Cc: Roman Gushchin Cc: Sami Tolvanen Cc: Shakeel Butt Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7bdbfeeb5c8c..66d68e5d5a0f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -222,7 +222,8 @@ static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) return item == NR_ANON_THPS || item == NR_FILE_THPS || item == NR_SHMEM_THPS || - item == NR_SHMEM_PMDMAPPED; + item == NR_SHMEM_PMDMAPPED || + item == NR_FILE_PMDMAPPED; } /* -- cgit v1.2.3 From b6038942480e574c697ea1a80019bbe586c1d654 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Wed, 24 Feb 2021 12:03:55 -0800 Subject: mm: memcg: add swapcache stat for memcg v2 This patch adds swapcache stat for the cgroup v2. The swapcache represents the memory that is accounted against both the memory and the swap limit of the cgroup. The main motivation behind exposing the swapcache stat is for enabling users to gracefully migrate from cgroup v1's memsw counter to cgroup v2's memory and swap counters. Cgroup v1's memsw limit allows users to limit the memory+swap usage of a workload but without control on the exact proportion of memory and swap. Cgroup v2 provides separate limits for memory and swap which enables more control on the exact usage of memory and swap individually for the workload. With some little subtleties, the v1's memsw limit can be switched with the sum of the v2's memory and swap limits. However the alternative for memsw usage is not yet available in cgroup v2. Exposing per-cgroup swapcache stat enables that alternative. Adding the memory usage and swap usage and subtracting the swapcache will approximate the memsw usage. This will help in the transparent migration of the workloads depending on memsw usage and limit to v2' memory and swap counters. The reasons these applications are still interested in this approximate memsw usage are: (1) these applications are not really interested in two separate memory and swap usage metrics. A single usage metric is more simple to use and reason about for them. (2) The memsw usage metric hides the underlying system's swap setup from the applications. Applications with multiple instances running in a datacenter with heterogeneous systems (some have swap and some don't) will keep seeing a consistent view of their usage. [akpm@linux-foundation.org: fix CONFIG_SWAP=n build] Link: https://lkml.kernel.org/r/20210108155813.2914586-3-shakeelb@google.com Signed-off-by: Shakeel Butt Acked-by: Michal Hocko Reviewed-by: Roman Gushchin Cc: Johannes Weiner Cc: Muchun Song Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 +++ include/linux/swap.h | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 66d68e5d5a0f..fc99e9241846 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -206,6 +206,9 @@ enum node_stat_item { NR_KERNEL_SCS_KB, /* measured in KiB */ #endif NR_PAGETABLE, /* used for pagetables */ +#ifdef CONFIG_SWAP + NR_SWAPCACHE, +#endif NR_VM_NODE_STAT_ITEMS }; diff --git a/include/linux/swap.h b/include/linux/swap.h index 3f1f7ae0fbe9..0d64a2bc7e00 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -408,7 +408,11 @@ extern struct address_space *swapper_spaces[]; #define swap_address_space(entry) \ (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ >> SWAP_ADDRESS_SPACE_SHIFT]) -extern unsigned long total_swapcache_pages(void); +static inline unsigned long total_swapcache_pages(void) +{ + return global_node_page_state(NR_SWAPCACHE); +} + extern void show_swap_cache_info(void); extern int add_to_swap(struct page *page); extern void *get_shadow_from_swap_cache(swp_entry_t entry); -- cgit v1.2.3 From c1a660dea3fa616420606f1e206e6d22f7e05c30 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Wed, 24 Feb 2021 12:03:58 -0800 Subject: mm: kmem: make __memcg_kmem_(un)charge static I've noticed that __memcg_kmem_charge() and __memcg_kmem_uncharge() are not used anywhere except memcontrol.c. Yet they are not declared as non-static and are declared in memcontrol.h. This patch makes them static. Link: https://lkml.kernel.org/r/20210108020332.4096911-1-guro@fb.com Signed-off-by: Roman Gushchin Reviewed-by: Shakeel Butt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 41bbf71edd9f..7a38a1517a05 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1592,9 +1592,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, #endif #ifdef CONFIG_MEMCG_KMEM -int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, - unsigned int nr_pages); -void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); -- cgit v1.2.3 From 802f1d522d5fdaefc2b935141bc8fe03d43a99ab Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Wed, 24 Feb 2021 12:04:01 -0800 Subject: mm: page_counter: re-layout structure to reduce false sharing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When checking a memory cgroup related performance regression [1], from the perf c2c profiling data, we found high false sharing for accessing 'usage' and 'parent'. On 64 bit system, the 'usage' and 'parent' are close to each other, and easy to be in one cacheline (for cacheline size == 64+ B). 'usage' is usally written, while 'parent' is usually read as the cgroup's hierarchical counting nature. So move the 'parent' to the end of the structure to make sure they are in different cache lines. Following are some performance data with the patch, against v5.11-rc1. [ In the data, A means a platform with 2 sockets 48C/96T, B is a platform of 4 sockests 72C/144T, and if a %stddev will be shown bigger than 2%, P100/P50 means number of test tasks equals to 100%/50% of nr_cpu] will-it-scale/malloc1 --------------------- v5.11-rc1 v5.11-rc1+patch A-P100 15782 ± 2% -0.1% 15765 ± 3% will-it-scale.per_process_ops A-P50 21511 +8.9% 23432 will-it-scale.per_process_ops B-P100 9155 +2.2% 9357 will-it-scale.per_process_ops B-P50 10967 +7.1% 11751 ± 2% will-it-scale.per_process_ops will-it-scale/pagefault2 ------------------------ v5.11-rc1 v5.11-rc1+patch A-P100 79028 +3.0% 81411 will-it-scale.per_process_ops A-P50 183960 ± 2% +4.4% 192078 ± 2% will-it-scale.per_process_ops B-P100 85966 +9.9% 94467 ± 3% will-it-scale.per_process_ops B-P50 198195 +9.8% 217526 will-it-scale.per_process_ops fio (4k/1M is block size) ------------------------- v5.11-rc1 v5.11-rc1+patch A-P50-r-4k 16881 ± 2% +1.2% 17081 ± 2% fio.read_bw_MBps A-P50-w-4k 3931 +4.5% 4111 ± 2% fio.write_bw_MBps A-P50-r-1M 15178 -0.2% 15154 fio.read_bw_MBps A-P50-w-1M 3924 +0.1% 3929 fio.write_bw_MBps [1].https://lore.kernel.org/lkml/20201102091543.GM31092@shao2-debian/ Link: https://lkml.kernel.org/r/1611040814-33449-1-git-send-email-feng.tang@intel.com Signed-off-by: Feng Tang Reviewed-by: Roman Gushchin Reviewed-by: Shakeel Butt Acked-by: Johannes Weiner Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_counter.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 85bd413e784e..679591301994 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -12,7 +12,6 @@ struct page_counter { unsigned long low; unsigned long high; unsigned long max; - struct page_counter *parent; /* effective memory.min and memory.min usage tracking */ unsigned long emin; @@ -27,6 +26,14 @@ struct page_counter { /* legacy */ unsigned long watermark; unsigned long failcnt; + + /* + * 'parent' is placed here to be far from 'usage' to reduce + * cache false sharing, as 'usage' is written mostly while + * parent is frequently read for cgroup's hierarchical + * counting nature. + */ + struct page_counter *parent; }; #if BITS_PER_LONG == 32 -- cgit v1.2.3 From 6eeb104e114cb6b7391c2d69ff873403858c1f35 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 24 Feb 2021 12:04:15 -0800 Subject: fs: buffer: use raw page_memcg() on locked page alloc_page_buffers() currently uses get_mem_cgroup_from_page() for charging the buffers to the page owner, which does an rcu-protected page->memcg lookup and acquires a reference. But buffer allocation has the page lock held throughout, which pins the page to the memcg and thereby the memcg - neither rcu nor holding an extra reference during the allocation are necessary. Use a raw page_memcg() instead. This was the last user of get_mem_cgroup_from_page(), delete it. Link: https://lkml.kernel.org/r/20210209190126.97842-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Reported-by: Muchun Song Reviewed-by: Shakeel Butt Acked-by: Roman Gushchin Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 7a38a1517a05..e6dc793d587d 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -680,8 +680,6 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); -struct mem_cgroup *get_mem_cgroup_from_page(struct page *page); - struct lruvec *lock_page_lruvec(struct page *page); struct lruvec *lock_page_lruvec_irq(struct page *page); struct lruvec *lock_page_lruvec_irqsave(struct page *page, @@ -1191,11 +1189,6 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) return NULL; } -static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) -{ - return NULL; -} - static inline void mem_cgroup_put(struct mem_cgroup *memcg) { } -- cgit v1.2.3 From 027b37b552f326aa94ef06c7ea77088b16c41e6e Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Wed, 24 Feb 2021 12:05:46 -0800 Subject: kasan: move _RET_IP_ to inline wrappers Generic mm functions that call KASAN annotations that might report a bug pass _RET_IP_ to them as an argument. This allows KASAN to include the name of the function that called the mm function in its report's header. Now that KASAN has inline wrappers for all of its annotations, move _RET_IP_ to those wrappers to simplify annotation call sites. Link: https://linux-review.googlesource.com/id/I8fb3c06d49671305ee184175a39591bc26647a67 Link: https://lkml.kernel.org/r/5c1490eddf20b436b8c4eeea83fce47687d5e4a4.1610733117.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Reviewed-by: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 0aea9e2a2a01..a7254186558a 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -185,19 +185,18 @@ static __always_inline void * __must_check kasan_init_slab_obj( } bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); -static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object, - unsigned long ip) +static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object) { if (kasan_enabled()) - return __kasan_slab_free(s, object, ip); + return __kasan_slab_free(s, object, _RET_IP_); return false; } void __kasan_slab_free_mempool(void *ptr, unsigned long ip); -static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) +static __always_inline void kasan_slab_free_mempool(void *ptr) { if (kasan_enabled()) - __kasan_slab_free_mempool(ptr, ip); + __kasan_slab_free_mempool(ptr, _RET_IP_); } void * __must_check __kasan_slab_alloc(struct kmem_cache *s, @@ -241,10 +240,10 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, } void __kasan_kfree_large(void *ptr, unsigned long ip); -static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip) +static __always_inline void kasan_kfree_large(void *ptr) { if (kasan_enabled()) - __kasan_kfree_large(ptr, ip); + __kasan_kfree_large(ptr, _RET_IP_); } bool kasan_save_enable_multi_shot(void); @@ -277,12 +276,11 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache, { return (void *)object; } -static inline bool kasan_slab_free(struct kmem_cache *s, void *object, - unsigned long ip) +static inline bool kasan_slab_free(struct kmem_cache *s, void *object) { return false; } -static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {} +static inline void kasan_slab_free_mempool(void *ptr) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) { @@ -302,7 +300,7 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, { return (void *)object; } -static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} +static inline void kasan_kfree_large(void *ptr) {} #endif /* CONFIG_KASAN */ -- cgit v1.2.3 From 611806b4bf8dd97a4f3d73f5cf3c2c7730c51eb2 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Wed, 24 Feb 2021 12:05:50 -0800 Subject: kasan: fix bug detection via ksize for HW_TAGS mode The currently existing kasan_check_read/write() annotations are intended to be used for kernel modules that have KASAN compiler instrumentation disabled. Thus, they are only relevant for the software KASAN modes that rely on compiler instrumentation. However there's another use case for these annotations: ksize() checks that the object passed to it is indeed accessible before unpoisoning the whole object. This is currently done via __kasan_check_read(), which is compiled away for the hardware tag-based mode that doesn't rely on compiler instrumentation. This leads to KASAN missing detecting some memory corruptions. Provide another annotation called kasan_check_byte() that is available for all KASAN modes. As the implementation rename and reuse kasan_check_invalid_free(). Use this new annotation in ksize(). To avoid having ksize() as the top frame in the reported stack trace pass _RET_IP_ to __kasan_check_byte(). Also add a new ksize_uaf() test that checks that a use-after-free is detected via ksize() itself, and via plain accesses that happen later. Link: https://linux-review.googlesource.com/id/Iaabf771881d0f9ce1b969f2a62938e99d3308ec5 Link: https://lkml.kernel.org/r/f32ad74a60b28d8402482a38476f02bb7600f620.1610733117.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Reviewed-by: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan-checks.h | 6 ++++++ include/linux/kasan.h | 17 +++++++++++++++++ 2 files changed, 23 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h index ca5e89fb10d3..3d6d22a25bdc 100644 --- a/include/linux/kasan-checks.h +++ b/include/linux/kasan-checks.h @@ -4,6 +4,12 @@ #include +/* + * The annotations present in this file are only relevant for the software + * KASAN modes that rely on compiler instrumentation, and will be optimized + * away for the hardware tag-based KASAN mode. Use kasan_check_byte() instead. + */ + /* * __kasan_check_*: Always available when KASAN is enabled. This may be used * even in compilation units that selectively disable KASAN, but must use KASAN diff --git a/include/linux/kasan.h b/include/linux/kasan.h index a7254186558a..7eaf2d9effb4 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -246,6 +246,19 @@ static __always_inline void kasan_kfree_large(void *ptr) __kasan_kfree_large(ptr, _RET_IP_); } +/* + * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for + * the hardware tag-based mode that doesn't rely on compiler instrumentation. + */ +bool __kasan_check_byte(const void *addr, unsigned long ip); +static __always_inline bool kasan_check_byte(const void *addr) +{ + if (kasan_enabled()) + return __kasan_check_byte(addr, _RET_IP_); + return true; +} + + bool kasan_save_enable_multi_shot(void); void kasan_restore_multi_shot(bool enabled); @@ -301,6 +314,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, return (void *)object; } static inline void kasan_kfree_large(void *ptr) {} +static inline bool kasan_check_byte(const void *address) +{ + return true; +} #endif /* CONFIG_KASAN */ -- cgit v1.2.3 From 93f503c3fcd168a43e4a6c875fe2cfafaf8439dc Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 24 Feb 2021 12:06:10 -0800 Subject: mm: fix prototype warning from kernel test robot Patch series "mm: clean up names and parameters of memmap_init_xxxx functions", v5. This patchset corrects inappropriate function names of memmap_init_xxx, and simplify parameters of functions in the code flow. And also fix a prototype warning reported by lkp. This patch (of 5); Kernel test robot calling make with 'W=1' is triggering warning like below for memmap_init_zone() function. mm/page_alloc.c:6259:23: warning: no previous prototype for 'memmap_init_zone' [-Wmissing-prototypes] 6259 | void __meminit __weak memmap_init_zone(unsigned long size, int nid, | ^~~~~~~~~~~~~~~~ Fix it by adding the function declaration in include/linux/mm.h. Since memmap_init_zone() has a generic version with '__weak', the declaratoin in ia64 header file can be simply removed. Link: https://lkml.kernel.org/r/20210122135956.5946-1-bhe@redhat.com Link: https://lkml.kernel.org/r/20210122135956.5946-2-bhe@redhat.com Signed-off-by: Baoquan He Reported-by: kernel test robot Reviewed-by: Mike Rapoport Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 7deb7168104d..1c1eabdf112c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2408,6 +2408,8 @@ extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, struct vmem_altmap *, int migratetype); +extern void memmap_init(unsigned long size, int nid, + unsigned long zone, unsigned long range_start_pfn); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); -- cgit v1.2.3 From ab28cb6e1e5e59eb8bf3ad399133617414301d3a Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 24 Feb 2021 12:06:14 -0800 Subject: mm: rename memmap_init() and memmap_init_zone() The current memmap_init_zone() only handles memory region inside one zone, actually memmap_init() does the memmap init of one zone. So rename both of them accordingly. Link: https://lkml.kernel.org/r/20210122135956.5946-3-bhe@redhat.com Signed-off-by: Baoquan He Reviewed-by: Mike Rapoport Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 1c1eabdf112c..56d0eeae0ce3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2405,10 +2405,10 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); -extern void memmap_init_zone(unsigned long, int, unsigned long, +extern void memmap_init_range(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, struct vmem_altmap *, int migratetype); -extern void memmap_init(unsigned long size, int nid, +extern void memmap_init_zone(unsigned long size, int nid, unsigned long zone, unsigned long range_start_pfn); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); -- cgit v1.2.3 From 3256ff83c566235e812498ee1dc806c45a5d5af7 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 24 Feb 2021 12:06:17 -0800 Subject: mm: simplify parater of function memmap_init_zone() As David suggested, simply passing 'struct zone *zone' is enough. We can get all needed information from 'struct zone*' easily. Link: https://lkml.kernel.org/r/20210122135956.5946-4-bhe@redhat.com Signed-off-by: Baoquan He Suggested-by: David Hildenbrand Reviewed-by: Mike Rapoport Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 56d0eeae0ce3..2601a5cce0ef 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2408,8 +2408,7 @@ extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_range(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, struct vmem_altmap *, int migratetype); -extern void memmap_init_zone(unsigned long size, int nid, - unsigned long zone, unsigned long range_start_pfn); +extern void memmap_init_zone(struct zone *zone); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); -- cgit v1.2.3 From a0cd7a7c4bc004587d1f4785a320f58e72d880eb Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 24 Feb 2021 12:06:32 -0800 Subject: mm: simplify free_highmem_page() and free_reserved_page() adjust_managed_page_count() as called by free_reserved_page() properly handles pages in a highmem zone, so we can reuse it for free_highmem_page(). We can now get rid of totalhigh_pages_inc() and simplify free_reserved_page(). Link: https://lkml.kernel.org/r/20210126182113.19892-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Oscar Salvador Reviewed-by: Anshuman Khandual Cc: Thomas Gleixner Cc: "Peter Zijlstra (Intel)" Cc: Mike Rapoport Cc: Michal Hocko Cc: Wei Yang Cc: "Gustavo A. R. Silva" Cc: Sam Ravnborg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/highmem-internal.h | 5 ----- include/linux/mm.h | 16 ++-------------- 2 files changed, 2 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index 1bbe96dc8be6..7902c7d8b55f 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -127,11 +127,6 @@ static inline unsigned long totalhigh_pages(void) return (unsigned long)atomic_long_read(&_totalhigh_pages); } -static inline void totalhigh_pages_inc(void) -{ - atomic_long_inc(&_totalhigh_pages); -} - static inline void totalhigh_pages_add(long count) { atomic_long_add(count, &_totalhigh_pages); diff --git a/include/linux/mm.h b/include/linux/mm.h index 2601a5cce0ef..76cab132c295 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2310,32 +2310,20 @@ extern void free_initmem(void); extern unsigned long free_reserved_area(void *start, void *end, int poison, const char *s); -#ifdef CONFIG_HIGHMEM -/* - * Free a highmem page into the buddy system, adjusting totalhigh_pages - * and totalram_pages. - */ -extern void free_highmem_page(struct page *page); -#endif - extern void adjust_managed_page_count(struct page *page, long count); extern void mem_init_print_info(const char *str); extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); /* Free the reserved page into the buddy system, so it gets managed. */ -static inline void __free_reserved_page(struct page *page) +static inline void free_reserved_page(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); -} - -static inline void free_reserved_page(struct page *page) -{ - __free_reserved_page(page); adjust_managed_page_count(page, 1); } +#define free_highmem_page(page) free_reserved_page(page) static inline void mark_page_reserved(struct page *page) { -- cgit v1.2.3 From 3b2ebeaf98a028d5dd4ec63095855ef507920276 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 24 Feb 2021 12:06:35 -0800 Subject: mm/gfp: add kernel-doc for gfp_t The generated html will link to the definition of the gfp_t automatically once we define it. Move the one-paragraph overview of GFP flags from the documentation directory into gfp.h and pull gfp.h into the documentation. This generates warnings with clang (https://lkml.kernel.org/r/20210219195509.GA59987@24bbad8f3778), so use a #if 0 to hide it from the compiler for now. Link: https://lkml.kernel.org/r/20210215204909.3824509-1-willy@infradead.org Link: https://lkml.kernel.org/r/20210220003049.GZ2858050@casper.infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Mike Rapoport Cc: Nathan Chancellor Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 80544d5c08e7..220cd553a9e7 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -8,6 +8,20 @@ #include #include +/* The typedef is in types.h but we want the documentation here */ +#if 0 +/** + * typedef gfp_t - Memory allocation flags. + * + * GFP flags are commonly used throughout Linux to indicate how memory + * should be allocated. The GFP acronym stands for get_free_pages(), + * the underlying memory allocation function. Not every GFP flag is + * supported by every function which may allocate memory. Most users + * will want to use a plain ``GFP_KERNEL``. + */ +typedef unsigned int __bitwise gfp_t; +#endif + struct vm_area_struct; /* -- cgit v1.2.3 From 0fa5bc4023c188082024833b3deffd5543b93bc9 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Wed, 24 Feb 2021 12:07:12 -0800 Subject: mm/hugetlb: grab head page refcount once for group of subpages Patch series "mm/hugetlb: follow_hugetlb_page() improvements", v2. While looking at ZONE_DEVICE struct page reuse particularly the last patch[0], I found two possible improvements for follow_hugetlb_page() which is solely used for get_user_pages()/pin_user_pages(). The first patch batches page refcount updates while the second tidies up storing the subpages/vmas. Both together bring the cost of slow variant of gup() cost from ~87.6k usecs to ~5.8k usecs. libhugetlbfs tests seem to pass as well gup_test benchmarks with hugetlbfs vmas. This patch (of 2): follow_hugetlb_page() once it locks the pmd/pud, checks all its N subpages in a huge page and grabs a reference for each one. Similar to gup-fast, have follow_hugetlb_page() grab the head page refcount only after counting all its subpages that are part of the just faulted huge page. Consequently we reduce the number of atomics necessary to pin said huge page, which improves non-fast gup() considerably: - 16G with 1G huge page size gup_test -f /mnt/huge/file -m 16384 -r 10 -L -S -n 512 -w PIN_LONGTERM_BENCHMARK: ~87.6k us -> ~12.8k us Link: https://lkml.kernel.org/r/20210128182632.24562-1-joao.m.martins@oracle.com Link: https://lkml.kernel.org/r/20210128182632.24562-2-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Mike Kravetz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 76cab132c295..77e64e3eac80 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1187,6 +1187,9 @@ static inline void get_page(struct page *page) } bool __must_check try_grab_page(struct page *page, unsigned int flags); +__maybe_unused struct page *try_grab_compound_head(struct page *page, int refs, + unsigned int flags); + static inline __must_check bool try_get_page(struct page *page) { -- cgit v1.2.3 From 6c26d3108393211ecfd44d89404cfb744027bafd Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Wed, 24 Feb 2021 12:07:19 -0800 Subject: mm/hugetlb: fix some comment typos Fix typos sasitfy to satisfy, reservtion to reservation, hugegpage to hugepage and uniprocesor to uniprocessor in comments. Link: https://lkml.kernel.org/r/20210128112028.64831-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Souptick Joarder Cc: Mike Kravetz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index b5807f23caf8..ac3cb239a7ec 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -37,7 +37,7 @@ struct hugepage_subpool { struct hstate *hstate; long min_hpages; /* Minimum huge pages or -1 if no minimum. */ long rsv_hpages; /* Pages reserved against global pool to */ - /* sasitfy minimum size. */ + /* satisfy minimum size. */ }; struct resv_map { -- cgit v1.2.3 From bae84953815793f68ddd8edeadd3f4e32676a2c8 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 24 Feb 2021 12:07:32 -0800 Subject: mm/pmem: avoid inserting hugepage PTE entry with fsdax if hugepage support is disabled Differentiate between hardware not supporting hugepages and user disabling THP via 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' For the devdax namespace, the kernel handles the above via the supported_alignment attribute and failing to initialize the namespace if the namespace align value is not supported on the platform. For the fsdax namespace, the kernel will continue to initialize the namespace. This can result in the kernel creating a huge pte entry even though the hardware don't support the same. We do want hugepage support with pmem even if the end-user disabled THP via sysfs file (/sys/kernel/mm/transparent_hugepage/enabled). Hence differentiate between hardware/firmware lacking support vs user-controlled disable of THP and prevent a huge fault if the hardware lacks hugepage support. Link: https://lkml.kernel.org/r/20210205023956.417587-1-aneesh.kumar@linux.ibm.com Signed-off-by: Aneesh Kumar K.V Reviewed-by: Dan Williams Cc: "Kirill A . Shutemov" Cc: Jan Kara Cc: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 6a19f35f836b..ba973efcd369 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -78,6 +78,7 @@ static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, } enum transparent_hugepage_flag { + TRANSPARENT_HUGEPAGE_NEVER_DAX, TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, @@ -123,6 +124,13 @@ extern unsigned long transparent_hugepage_flags; */ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) { + + /* + * If the hardware/firmware marked hugepage support disabled. + */ + if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) + return false; + if (vma->vm_flags & VM_NOHUGEPAGE) return false; @@ -134,12 +142,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) return true; - /* - * For dax vmas, try to always use hugepage mappings. If the kernel does - * not support hugepages, fsdax mappings will fallback to PAGE_SIZE - * mappings, and device-dax namespaces, that try to guarantee a given - * mapping size, will fail to enable - */ + if (vma_is_dax(vma)) return true; -- cgit v1.2.3 From c2135f7c570bc274035834848d9bf46ea89ba763 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Wed, 24 Feb 2021 12:08:01 -0800 Subject: mm/vmscan: __isolate_lru_page_prepare() cleanup The function just returns 2 results, so using a 'switch' to deal with its result is unnecessary. Also simplify it to a bool func as Vlastimil suggested. Also remove 'goto' by reusing list_move(), and take Matthew Wilcox's suggestion to update comments in function. Link: https://lkml.kernel.org/r/728874d7-2d93-4049-68c1-dcc3b2d52ccd@linux.alibaba.com Signed-off-by: Alex Shi Reviewed-by: Andrew Morton Acked-by: Vlastimil Babka Cc: Matthew Wilcox Cc: Hugh Dickins Cc: Yu Zhao Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 0d64a2bc7e00..32f665b1ee85 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -356,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page, extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); -extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode); +extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, -- cgit v1.2.3 From f90d8191ac864df33b1898bc7edc54eaa24e22bc Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Wed, 24 Feb 2021 12:08:13 -0800 Subject: include/linux/mm_inline.h: shuffle lru list addition and deletion functions These functions will call page_lru() in the following patches. Move them below page_lru() to avoid the forward declaration. Link: https://lore.kernel.org/linux-mm/20201207220949.830352-3-yuzhao@google.com/ Link: https://lkml.kernel.org/r/20210122220600.906146-3-yuzhao@google.com Signed-off-by: Yu Zhao Acked-by: Vlastimil Babka Reviewed-by: Miaohe Lin Cc: Alex Shi Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Roman Gushchin Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 8fc71e9d7bb0..2889741f450a 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -45,27 +45,6 @@ static __always_inline void update_lru_size(struct lruvec *lruvec, #endif } -static __always_inline void add_page_to_lru_list(struct page *page, - struct lruvec *lruvec, enum lru_list lru) -{ - update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); - list_add(&page->lru, &lruvec->lists[lru]); -} - -static __always_inline void add_page_to_lru_list_tail(struct page *page, - struct lruvec *lruvec, enum lru_list lru) -{ - update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); - list_add_tail(&page->lru, &lruvec->lists[lru]); -} - -static __always_inline void del_page_from_lru_list(struct page *page, - struct lruvec *lruvec, enum lru_list lru) -{ - list_del(&page->lru); - update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page)); -} - /** * page_lru_base_type - which LRU list type should a page be on? * @page: the page to test @@ -125,4 +104,25 @@ static __always_inline enum lru_list page_lru(struct page *page) } return lru; } + +static __always_inline void add_page_to_lru_list(struct page *page, + struct lruvec *lruvec, enum lru_list lru) +{ + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); + list_add(&page->lru, &lruvec->lists[lru]); +} + +static __always_inline void add_page_to_lru_list_tail(struct page *page, + struct lruvec *lruvec, enum lru_list lru) +{ + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); + list_add_tail(&page->lru, &lruvec->lists[lru]); +} + +static __always_inline void del_page_from_lru_list(struct page *page, + struct lruvec *lruvec, enum lru_list lru) +{ + list_del(&page->lru); + update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page)); +} #endif -- cgit v1.2.3 From 3a9c9788a3149d9745b7eb2eae811e57ef3b127c Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Wed, 24 Feb 2021 12:08:17 -0800 Subject: mm: don't pass "enum lru_list" to lru list addition functions The "enum lru_list" parameter to add_page_to_lru_list() and add_page_to_lru_list_tail() is redundant in the sense that it can be extracted from the "struct page" parameter by page_lru(). A caveat is that we need to make sure PageActive() or PageUnevictable() is correctly set or cleared before calling these two functions. And they are indeed. Link: https://lore.kernel.org/linux-mm/20201207220949.830352-4-yuzhao@google.com/ Link: https://lkml.kernel.org/r/20210122220600.906146-4-yuzhao@google.com Signed-off-by: Yu Zhao Cc: Alex Shi Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Roman Gushchin Cc: Vladimir Davydov Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 2889741f450a..130ba3201d3f 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -106,15 +106,19 @@ static __always_inline enum lru_list page_lru(struct page *page) } static __always_inline void add_page_to_lru_list(struct page *page, - struct lruvec *lruvec, enum lru_list lru) + struct lruvec *lruvec) { + enum lru_list lru = page_lru(page); + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); } static __always_inline void add_page_to_lru_list_tail(struct page *page, - struct lruvec *lruvec, enum lru_list lru) + struct lruvec *lruvec) { + enum lru_list lru = page_lru(page); + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add_tail(&page->lru, &lruvec->lists[lru]); } -- cgit v1.2.3 From 46ae6b2cc2a47904a368d238425531ea91f3a2a5 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Wed, 24 Feb 2021 12:08:25 -0800 Subject: mm/swap.c: don't pass "enum lru_list" to del_page_from_lru_list() The parameter is redundant in the sense that it can be potentially extracted from the "struct page" parameter by page_lru(). We need to make sure that existing PageActive() or PageUnevictable() remains until the function returns. A few places don't conform, and simple reordering fixes them. This patch may have left page_off_lru() seemingly odd, and we'll take care of it in the next patch. Link: https://lore.kernel.org/linux-mm/20201207220949.830352-6-yuzhao@google.com/ Link: https://lkml.kernel.org/r/20210122220600.906146-6-yuzhao@google.com Signed-off-by: Yu Zhao Cc: Alex Shi Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Roman Gushchin Cc: Vladimir Davydov Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 130ba3201d3f..ffacc6273678 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -124,9 +124,10 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page, } static __always_inline void del_page_from_lru_list(struct page *page, - struct lruvec *lruvec, enum lru_list lru) + struct lruvec *lruvec) { list_del(&page->lru); - update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page)); + update_lru_size(lruvec, page_lru(page), page_zonenum(page), + -thp_nr_pages(page)); } #endif -- cgit v1.2.3 From 875601796267214f286d3581fe74f2805d060fe8 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Wed, 24 Feb 2021 12:08:28 -0800 Subject: mm: add __clear_page_lru_flags() to replace page_off_lru() Similar to page_off_lru(), the new function does non-atomic clearing of PageLRU() in addition to PageActive() and PageUnevictable(), on a page that has no references left. If PageActive() and PageUnevictable() are both set, refuse to clear either and leave them to bad_page(). This is a behavior change that is meant to help debug. Link: https://lore.kernel.org/linux-mm/20201207220949.830352-7-yuzhao@google.com/ Link: https://lkml.kernel.org/r/20210122220600.906146-7-yuzhao@google.com Signed-off-by: Yu Zhao Cc: Alex Shi Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Roman Gushchin Cc: Vladimir Davydov Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index ffacc6273678..ef3fd79222e5 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -61,27 +61,19 @@ static inline enum lru_list page_lru_base_type(struct page *page) } /** - * page_off_lru - which LRU list was page on? clearing its lru flags. - * @page: the page to test - * - * Returns the LRU list a page was on, as an index into the array of LRU - * lists; and clears its Unevictable or Active flags, ready for freeing. + * __clear_page_lru_flags - clear page lru flags before releasing a page + * @page: the page that was on lru and now has a zero reference */ -static __always_inline enum lru_list page_off_lru(struct page *page) +static __always_inline void __clear_page_lru_flags(struct page *page) { - enum lru_list lru; + __ClearPageLRU(page); - if (PageUnevictable(page)) { - __ClearPageUnevictable(page); - lru = LRU_UNEVICTABLE; - } else { - lru = page_lru_base_type(page); - if (PageActive(page)) { - __ClearPageActive(page); - lru += LRU_ACTIVE; - } - } - return lru; + /* this shouldn't happen, so leave the flags to bad_page() */ + if (PageActive(page) && PageUnevictable(page)) + return; + + __ClearPageActive(page); + __ClearPageUnevictable(page); } /** -- cgit v1.2.3 From bc7112719e1e80e4208eef3fc9bd8d2b6c263e7d Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Wed, 24 Feb 2021 12:08:32 -0800 Subject: mm: VM_BUG_ON lru page flags Move scattered VM_BUG_ONs to two essential places that cover all lru list additions and deletions. Link: https://lore.kernel.org/linux-mm/20201207220949.830352-8-yuzhao@google.com/ Link: https://lkml.kernel.org/r/20210122220600.906146-8-yuzhao@google.com Signed-off-by: Yu Zhao Cc: Alex Shi Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Roman Gushchin Cc: Vladimir Davydov Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index ef3fd79222e5..6d907a4dd6ad 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -66,6 +66,8 @@ static inline enum lru_list page_lru_base_type(struct page *page) */ static __always_inline void __clear_page_lru_flags(struct page *page) { + VM_BUG_ON_PAGE(!PageLRU(page), page); + __ClearPageLRU(page); /* this shouldn't happen, so leave the flags to bad_page() */ @@ -87,6 +89,8 @@ static __always_inline enum lru_list page_lru(struct page *page) { enum lru_list lru; + VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); + if (PageUnevictable(page)) lru = LRU_UNEVICTABLE; else { -- cgit v1.2.3 From c1770e34f3e7640887d8129fc05d13fe17101301 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Wed, 24 Feb 2021 12:08:36 -0800 Subject: include/linux/mm_inline.h: fold page_lru_base_type() into its sole caller We've removed all other references to this function. Link: https://lore.kernel.org/linux-mm/20201207220949.830352-9-yuzhao@google.com/ Link: https://lkml.kernel.org/r/20210122220600.906146-9-yuzhao@google.com Signed-off-by: Yu Zhao Reviewed-by: Alex Shi Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Roman Gushchin Cc: Vladimir Davydov Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 27 ++++++--------------------- 1 file changed, 6 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 6d907a4dd6ad..7183c7a03f09 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -45,21 +45,6 @@ static __always_inline void update_lru_size(struct lruvec *lruvec, #endif } -/** - * page_lru_base_type - which LRU list type should a page be on? - * @page: the page to test - * - * Used for LRU list index arithmetic. - * - * Returns the base LRU type - file or anon - @page should be on. - */ -static inline enum lru_list page_lru_base_type(struct page *page) -{ - if (page_is_file_lru(page)) - return LRU_INACTIVE_FILE; - return LRU_INACTIVE_ANON; -} - /** * __clear_page_lru_flags - clear page lru flags before releasing a page * @page: the page that was on lru and now has a zero reference @@ -92,12 +77,12 @@ static __always_inline enum lru_list page_lru(struct page *page) VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); if (PageUnevictable(page)) - lru = LRU_UNEVICTABLE; - else { - lru = page_lru_base_type(page); - if (PageActive(page)) - lru += LRU_ACTIVE; - } + return LRU_UNEVICTABLE; + + lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; + if (PageActive(page)) + lru += LRU_ACTIVE; + return lru; } -- cgit v1.2.3 From 289ccba18af436f2b65ec69b2be1b086ec9f24a4 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Wed, 24 Feb 2021 12:08:40 -0800 Subject: include/linux/mm_inline.h: fold __update_lru_size() into its sole caller All other references to the function were removed after commit a892cb6b977f ("mm/vmscan.c: use update_lru_size() in update_lru_sizes()"). Link: https://lore.kernel.org/linux-mm/20201207220949.830352-10-yuzhao@google.com/ Link: https://lkml.kernel.org/r/20210122220600.906146-10-yuzhao@google.com Signed-off-by: Yu Zhao Reviewed-by: Alex Shi Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Roman Gushchin Cc: Vladimir Davydov Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 7183c7a03f09..355ea1ee32bd 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -24,7 +24,7 @@ static inline int page_is_file_lru(struct page *page) return !PageSwapBacked(page); } -static __always_inline void __update_lru_size(struct lruvec *lruvec, +static __always_inline void update_lru_size(struct lruvec *lruvec, enum lru_list lru, enum zone_type zid, int nr_pages) { @@ -33,13 +33,6 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec, __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); __mod_zone_page_state(&pgdat->node_zones[zid], NR_ZONE_LRU_BASE + lru, nr_pages); -} - -static __always_inline void update_lru_size(struct lruvec *lruvec, - enum lru_list lru, enum zone_type zid, - int nr_pages) -{ - __update_lru_size(lruvec, lru, zid, nr_pages); #ifdef CONFIG_MEMCG mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); #endif -- cgit v1.2.3 From 2091339d59e7808e9b39a79f48e3d17ef7389b97 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Wed, 24 Feb 2021 12:08:44 -0800 Subject: mm/vmscan.c: make lruvec_lru_size() static All other references to the function were removed after commit b910718a948a ("mm: vmscan: detect file thrashing at the reclaim root"). Link: https://lore.kernel.org/linux-mm/20201207220949.830352-11-yuzhao@google.com/ Link: https://lkml.kernel.org/r/20210122220600.906146-11-yuzhao@google.com Signed-off-by: Yu Zhao Reviewed-by: Alex Shi Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Roman Gushchin Cc: Vladimir Davydov Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index fc99e9241846..9198b7ade85f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -892,8 +892,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) #endif } -extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); - #ifdef CONFIG_HAVE_MEMORYLESS_NODES int local_memory_node(int node_id); #else -- cgit v1.2.3 From d6995da311221a05c8aef3bda2629e5cb14c7302 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 24 Feb 2021 12:08:51 -0800 Subject: hugetlb: use page.private for hugetlb specific page flags Patch series "create hugetlb flags to consolidate state", v3. While discussing a series of hugetlb fixes in [1], it became evident that the hugetlb specific page state information is stored in a somewhat haphazard manner. Code dealing with state information would be easier to read, understand and maintain if this information was stored in a consistent manner. This series uses page.private of the hugetlb head page for storing a set of hugetlb specific page flags. Routines are priovided for test, set and clear of the flags. [1] https://lore.kernel.org/r/20210106084739.63318-1-songmuchun@bytedance.com This patch (of 4): As hugetlbfs evolved, state information about hugetlb pages was added. One 'convenient' way of doing this was to use available fields in tail pages. Over time, it has become difficult to know the meaning or contents of fields simply by looking at a small bit of code. Sometimes, the naming is just confusing. For example: The PagePrivate flag indicates a huge page reservation was consumed and needs to be restored if an error is encountered and the page is freed before it is instantiated. The page.private field contains the pointer to a subpool if the page is associated with one. In an effort to make the code more readable, use page.private to contain hugetlb specific page flags. These flags will have test, set and clear functions similar to those used for 'normal' page flags. More importantly, an enum of flag values will be created with names that actually reflect their purpose. In this patch, - Create infrastructure for hugetlb specific page flag functions - Move subpool pointer to page[1].private to make way for flags Create routines with meaningful names to modify subpool field - Use new HPageRestoreReserve flag instead of PagePrivate Conversion of other state information will happen in subsequent patches. Link: https://lkml.kernel.org/r/20210122195231.324857-1-mike.kravetz@oracle.com Link: https://lkml.kernel.org/r/20210122195231.324857-2-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz Reviewed-by: Oscar Salvador Acked-by: Michal Hocko Cc: Naoya Horiguchi Cc: Muchun Song Cc: David Hildenbrand Cc: Matthew Wilcox Cc: Miaohe Lin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 68 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ac3cb239a7ec..249c65e1d8ca 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -472,6 +472,60 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long flags); #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ +/* + * huegtlb page specific state flags. These flags are located in page.private + * of the hugetlb head page. Functions created via the below macros should be + * used to manipulate these flags. + * + * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at + * allocation time. Cleared when page is fully instantiated. Free + * routine checks flag to restore a reservation on error paths. + */ +enum hugetlb_page_flags { + HPG_restore_reserve = 0, + __NR_HPAGEFLAGS, +}; + +/* + * Macros to create test, set and clear function definitions for + * hugetlb specific page flags. + */ +#ifdef CONFIG_HUGETLB_PAGE +#define TESTHPAGEFLAG(uname, flname) \ +static inline int HPage##uname(struct page *page) \ + { return test_bit(HPG_##flname, &(page->private)); } + +#define SETHPAGEFLAG(uname, flname) \ +static inline void SetHPage##uname(struct page *page) \ + { set_bit(HPG_##flname, &(page->private)); } + +#define CLEARHPAGEFLAG(uname, flname) \ +static inline void ClearHPage##uname(struct page *page) \ + { clear_bit(HPG_##flname, &(page->private)); } +#else +#define TESTHPAGEFLAG(uname, flname) \ +static inline int HPage##uname(struct page *page) \ + { return 0; } + +#define SETHPAGEFLAG(uname, flname) \ +static inline void SetHPage##uname(struct page *page) \ + { } + +#define CLEARHPAGEFLAG(uname, flname) \ +static inline void ClearHPage##uname(struct page *page) \ + { } +#endif + +#define HPAGEFLAG(uname, flname) \ + TESTHPAGEFLAG(uname, flname) \ + SETHPAGEFLAG(uname, flname) \ + CLEARHPAGEFLAG(uname, flname) \ + +/* + * Create functions associated with hugetlb page flags + */ +HPAGEFLAG(RestoreReserve, restore_reserve) + #ifdef CONFIG_HUGETLB_PAGE #define HSTATE_NAME_LEN 32 @@ -531,6 +585,20 @@ extern unsigned int default_hstate_idx; #define default_hstate (hstates[default_hstate_idx]) +/* + * hugetlb page subpool pointer located in hpage[1].private + */ +static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) +{ + return (struct hugepage_subpool *)(hpage+1)->private; +} + +static inline void hugetlb_set_page_subpool(struct page *hpage, + struct hugepage_subpool *subpool) +{ + set_page_private(hpage+1, (unsigned long)subpool); +} + static inline struct hstate *hstate_file(struct file *f) { return hstate_inode(file_inode(f)); -- cgit v1.2.3 From 8f251a3d5ce3bdea73bd045ed35db64f32e0d0d9 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 24 Feb 2021 12:08:56 -0800 Subject: hugetlb: convert page_huge_active() HPageMigratable flag Use the new hugetlb page specific flag HPageMigratable to replace the page_huge_active interfaces. By it's name, page_huge_active implied that a huge page was on the active list. However, that is not really what code checking the flag wanted to know. It really wanted to determine if the huge page could be migrated. This happens when the page is actually added to the page cache and/or task page table. This is the reasoning behind the name change. The VM_BUG_ON_PAGE() calls in the *_huge_active() interfaces are not really necessary as we KNOW the page is a hugetlb page. Therefore, they are removed. The routine page_huge_active checked for PageHeadHuge before testing the active bit. This is unnecessary in the case where we hold a reference or lock and know it is a hugetlb head page. page_huge_active is also called without holding a reference or lock (scan_movable_pages), and can race with code freeing the page. The extra check in page_huge_active shortened the race window, but did not prevent the race. Offline code calling scan_movable_pages already deals with these races, so removing the check is acceptable. Add comment to racy code. [songmuchun@bytedance.com: remove set_page_huge_active() declaration from include/linux/hugetlb.h] Link: https://lkml.kernel.org/r/CAMZfGtUda+KoAZscU0718TN61cSFwp4zy=y2oZ=+6Z2TAZZwng@mail.gmail.com Link: https://lkml.kernel.org/r/20210122195231.324857-3-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz Reviewed-by: Oscar Salvador Reviewed-by: Muchun Song Reviewed-by: Miaohe Lin Acked-by: Michal Hocko Cc: David Hildenbrand Cc: Matthew Wilcox Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 7 +++++-- include/linux/page-flags.h | 6 ------ 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 249c65e1d8ca..77f2a032fe32 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -480,9 +480,13 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at * allocation time. Cleared when page is fully instantiated. Free * routine checks flag to restore a reservation on error paths. + * HPG_migratable - Set after a newly allocated page is added to the page + * cache and/or page tables. Indicates the page is a candidate for + * migration. */ enum hugetlb_page_flags { HPG_restore_reserve = 0, + HPG_migratable, __NR_HPAGEFLAGS, }; @@ -525,6 +529,7 @@ static inline void ClearHPage##uname(struct page *page) \ * Create functions associated with hugetlb page flags */ HPAGEFLAG(RestoreReserve, restore_reserve) +HPAGEFLAG(Migratable, migratable) #ifdef CONFIG_HUGETLB_PAGE @@ -838,8 +843,6 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, } #endif -void set_page_huge_active(struct page *page); - #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index ec5d0290e0ee..db914477057b 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -592,15 +592,9 @@ static inline void ClearPageCompound(struct page *page) #ifdef CONFIG_HUGETLB_PAGE int PageHuge(struct page *page); int PageHeadHuge(struct page *page); -bool page_huge_active(struct page *page); #else TESTPAGEFLAG_FALSE(Huge) TESTPAGEFLAG_FALSE(HeadHuge) - -static inline bool page_huge_active(struct page *page) -{ - return 0; -} #endif -- cgit v1.2.3 From 9157c31186c358c5750dea50ac5705d61d7fc917 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 24 Feb 2021 12:09:00 -0800 Subject: hugetlb: convert PageHugeTemporary() to HPageTemporary flag Use new hugetlb specific HPageTemporary flag to replace the PageHugeTemporary() interfaces. PageHugeTemporary does contain a PageHuge() check. However, this interface is only used within hugetlb code where we know we are dealing with a hugetlb page. Therefore, the check can be eliminated. Link: https://lkml.kernel.org/r/20210122195231.324857-5-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz Reviewed-by: Oscar Salvador Reviewed-by: Muchun Song Acked-by: Michal Hocko Cc: David Hildenbrand Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 77f2a032fe32..e97498a21010 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -483,10 +483,15 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * HPG_migratable - Set after a newly allocated page is added to the page * cache and/or page tables. Indicates the page is a candidate for * migration. + * HPG_temporary - - Set on a page that is temporarily allocated from the buddy + * allocator. Typically used for migration target pages when no pages + * are available in the pool. The hugetlb free page path will + * immediately free pages with this flag set to the buddy allocator. */ enum hugetlb_page_flags { HPG_restore_reserve = 0, HPG_migratable, + HPG_temporary, __NR_HPAGEFLAGS, }; @@ -530,6 +535,7 @@ static inline void ClearHPage##uname(struct page *page) \ */ HPAGEFLAG(RestoreReserve, restore_reserve) HPAGEFLAG(Migratable, migratable) +HPAGEFLAG(Temporary, temporary) #ifdef CONFIG_HUGETLB_PAGE -- cgit v1.2.3 From 6c037149014027d50175da5be4ae4531374dcbe0 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 24 Feb 2021 12:09:04 -0800 Subject: hugetlb: convert PageHugeFreed to HPageFreed flag Use new hugetlb specific HPageFreed flag to replace the PageHugeFreed interfaces. Link: https://lkml.kernel.org/r/20210122195231.324857-6-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz Reviewed-by: Oscar Salvador Reviewed-by: Muchun Song Acked-by: Michal Hocko Cc: David Hildenbrand Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Michal Hocko Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e97498a21010..eb2682fd97b6 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -487,11 +487,13 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * allocator. Typically used for migration target pages when no pages * are available in the pool. The hugetlb free page path will * immediately free pages with this flag set to the buddy allocator. + * HPG_freed - Set when page is on the free lists. */ enum hugetlb_page_flags { HPG_restore_reserve = 0, HPG_migratable, HPG_temporary, + HPG_freed, __NR_HPAGEFLAGS, }; @@ -536,6 +538,7 @@ static inline void ClearHPage##uname(struct page *page) \ HPAGEFLAG(RestoreReserve, restore_reserve) HPAGEFLAG(Migratable, migratable) HPAGEFLAG(Temporary, temporary) +HPAGEFLAG(Freed, freed) #ifdef CONFIG_HUGETLB_PAGE -- cgit v1.2.3 From d95c0337774b1dc74d271e7475a96fe8838332ea Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 24 Feb 2021 12:09:08 -0800 Subject: include/linux/hugetlb.h: add synchronization information for new hugetlb specific flags Add comments, no functional change. Link: https://lkml.kernel.org/r/62a80585-2a73-10cc-4a2d-5721540d4ad2@oracle.com Signed-off-by: Mike Kravetz Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index eb2682fd97b6..3a541c6cf5c3 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -480,14 +480,24 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at * allocation time. Cleared when page is fully instantiated. Free * routine checks flag to restore a reservation on error paths. + * Synchronization: Examined or modified by code that knows it has + * the only reference to page. i.e. After allocation but before use + * or when the page is being freed. * HPG_migratable - Set after a newly allocated page is added to the page * cache and/or page tables. Indicates the page is a candidate for * migration. + * Synchronization: Initially set after new page allocation with no + * locking. When examined and modified during migration processing + * (isolate, migrate, putback) the hugetlb_lock is held. * HPG_temporary - - Set on a page that is temporarily allocated from the buddy * allocator. Typically used for migration target pages when no pages * are available in the pool. The hugetlb free page path will * immediately free pages with this flag set to the buddy allocator. + * Synchronization: Can be set after huge page allocation from buddy when + * code knows it has only reference. All other examinations and + * modifications require hugetlb_lock. * HPG_freed - Set when page is on the free lists. + * Synchronization: hugetlb_lock held for examination and modification. */ enum hugetlb_page_flags { HPG_restore_reserve = 0, -- cgit v1.2.3 From 33b8f84a4ee78491a8f4f9e4c5520c9da4a10983 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 24 Feb 2021 12:09:54 -0800 Subject: mm/hugetlb: change hugetlb_reserve_pages() to type bool While reviewing a bug in hugetlb_reserve_pages, it was noticed that all callers ignore the return value. Any failure is considered an ENOMEM error by the callers. Change the function to be of type bool. The function will return true if the reservation was successful, false otherwise. Callers currently assume a zero return code indicates success. Change the callers to look for true to indicate success. No functional change, only code cleanup. Link: https://lkml.kernel.org/r/20201221192542.15732-1-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz Reviewed-by: Matthew Wilcox (Oracle) Cc: David Hildenbrand Cc: Dan Carpenter Cc: Michal Hocko Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 3a541c6cf5c3..cccd1aab69dd 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -139,7 +139,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, unsigned long dst_addr, unsigned long src_addr, struct page **pagep); -int hugetlb_reserve_pages(struct inode *inode, long from, long to, +bool hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, -- cgit v1.2.3 From a553e3cd2053501b658feec2be9a3b662eb1b22b Mon Sep 17 00:00:00 2001 From: Chengyang Fan Date: Wed, 24 Feb 2021 12:10:28 -0800 Subject: mm/migrate: remove unneeded semicolons Remove superfluous semicolons after function definitions. Link: https://lkml.kernel.org/r/20210115110131.2359683-1-cy.fan@huawei.com Signed-off-by: Chengyang Fan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 4594838a0f7c..3a389633b68f 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -89,7 +89,7 @@ extern int PageMovable(struct page *page); extern void __SetPageMovable(struct page *page, struct address_space *mapping); extern void __ClearPageMovable(struct page *page); #else -static inline int PageMovable(struct page *page) { return 0; }; +static inline int PageMovable(struct page *page) { return 0; } static inline void __SetPageMovable(struct page *page, struct address_space *mapping) { -- cgit v1.2.3