summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-12-11 08:46:52 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-12-11 08:46:52 -0800
commitdf442a4ec740864ff65cbfa7e71669603ddbe2af (patch)
tree3691f48c9c6eeafff08f31123ce1ae27f8f63d0f /mm/memcontrol.c
parent6f513529296fd4f696afb4354c46508abe646541 (diff)
parent3c376dfafbf7a8ea0dea212d095ddd83e93280bb (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "21 patches. Subsystems affected by this patch series: MAINTAINERS, mailmap, and mm (mlock, pagecache, damon, slub, memcg, hugetlb, and pagecache)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (21 commits) mm: bdi: initialize bdi_min_ratio when bdi is unregistered hugetlbfs: fix issue of preallocation of gigantic pages can't work mm/memcg: relocate mod_objcg_mlstate(), get_obj_stock() and put_obj_stock() mm/slub: fix endianness bug for alloc/free_traces attributes selftests/damon: split test cases selftests/damon: test debugfs file reads/writes with huge count selftests/damon: test wrong DAMOS condition ranges input selftests/damon: test DAMON enabling with empty target_ids case selftests/damon: skip test if DAMON is running mm/damon/vaddr-test: remove unnecessary variables mm/damon/vaddr-test: split a test function having >1024 bytes frame size mm/damon/vaddr: remove an unnecessary warning message mm/damon/core: remove unnecessary error messages mm/damon/dbgfs: remove an unnecessary error message mm/damon/core: use better timer mechanisms selection threshold mm/damon/core: fix fake load reports due to uninterruptible sleeps timers: implement usleep_idle_range() filemap: remove PageHWPoison check from next_uptodate_page() mailmap: update email address for Guo Ren MAINTAINERS: update kdump maintainers ...
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c106
1 files changed, 53 insertions, 53 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6863a834ed42..2ed5f2a0879d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
rcu_read_unlock();
}
-/*
- * mod_objcg_mlstate() may be called with irq enabled, so
- * mod_memcg_lruvec_state() should be used.
- */
-static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
- struct pglist_data *pgdat,
- enum node_stat_item idx, int nr)
-{
- struct mem_cgroup *memcg;
- struct lruvec *lruvec;
-
- rcu_read_lock();
- memcg = obj_cgroup_memcg(objcg);
- lruvec = mem_cgroup_lruvec(memcg, pgdat);
- mod_memcg_lruvec_state(lruvec, idx, nr);
- rcu_read_unlock();
-}
-
/**
* __count_memcg_events - account VM events in a cgroup
* @memcg: the memory cgroup
@@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
}
#endif
-/*
- * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
- * sequence used in this case to access content from object stock is slow.
- * To optimize for user context access, there are now two object stocks for
- * task context and interrupt context access respectively.
- *
- * The task context object stock can be accessed by disabling preemption only
- * which is cheap in non-preempt kernel. The interrupt context object stock
- * can only be accessed after disabling interrupt. User context code can
- * access interrupt object stock, but not vice versa.
- */
-static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
-{
- struct memcg_stock_pcp *stock;
-
- if (likely(in_task())) {
- *pflags = 0UL;
- preempt_disable();
- stock = this_cpu_ptr(&memcg_stock);
- return &stock->task_obj;
- }
-
- local_irq_save(*pflags);
- stock = this_cpu_ptr(&memcg_stock);
- return &stock->irq_obj;
-}
-
-static inline void put_obj_stock(unsigned long flags)
-{
- if (likely(in_task()))
- preempt_enable();
- else
- local_irq_restore(flags);
-}
-
/**
* consume_stock: Try to consume stocked charge on this cpu.
* @memcg: memcg to consume from.
@@ -2816,6 +2763,59 @@ retry:
*/
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
+/*
+ * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
+ * sequence used in this case to access content from object stock is slow.
+ * To optimize for user context access, there are now two object stocks for
+ * task context and interrupt context access respectively.
+ *
+ * The task context object stock can be accessed by disabling preemption only
+ * which is cheap in non-preempt kernel. The interrupt context object stock
+ * can only be accessed after disabling interrupt. User context code can
+ * access interrupt object stock, but not vice versa.
+ */
+static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
+{
+ struct memcg_stock_pcp *stock;
+
+ if (likely(in_task())) {
+ *pflags = 0UL;
+ preempt_disable();
+ stock = this_cpu_ptr(&memcg_stock);
+ return &stock->task_obj;
+ }
+
+ local_irq_save(*pflags);
+ stock = this_cpu_ptr(&memcg_stock);
+ return &stock->irq_obj;
+}
+
+static inline void put_obj_stock(unsigned long flags)
+{
+ if (likely(in_task()))
+ preempt_enable();
+ else
+ local_irq_restore(flags);
+}
+
+/*
+ * mod_objcg_mlstate() may be called with irq enabled, so
+ * mod_memcg_lruvec_state() should be used.
+ */
+static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
+ struct pglist_data *pgdat,
+ enum node_stat_item idx, int nr)
+{
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
+ rcu_read_lock();
+ memcg = obj_cgroup_memcg(objcg);
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ mod_memcg_lruvec_state(lruvec, idx, nr);
+ rcu_read_unlock();
+}
+
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
gfp_t gfp, bool new_page)
{