summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2025-12-22 22:20:22 -0800
committerAlexei Starovoitov <ast@kernel.org>2025-12-22 22:20:22 -0800
commit042d4c0642b35facee87628a61d81cc77debbc41 (patch)
tree6cf27a817e33f436316232ce92007eacf09e5439 /include
parentf785a31395d9cafb8b2c42c7358fad72a6463142 (diff)
parent6e57cdde70c10f4654356cc45467ebce0a5c3f88 (diff)
Merge branch 'mm-bpf-kfuncs-to-access-memcg-data'
Roman Gushchin says: ==================== mm: bpf kfuncs to access memcg data Introduce kfuncs to simplify the access to the memcg data. These kfuncs can be used to accelerate monitoring use cases and for implementing custom OOM policies once BPF OOM is landed. This patchset was separated out from the BPF OOM patchset to simplify the logistics and accelerate the landing of the part which is useful by itself. No functional changes since BPF OOM v2. v4: - refactored memcg vm event and stat item idx checks (by Alexei) v3: - dropped redundant kfuncs flags (by Alexei) - fixed kdocs warnings (by Alexei) - merged memcg stats access patches into one (by Alexei) - restored root memcg usage reporting, added a comment - added checks for enum boundaries - added Shakeel and JP as co-maintainers (by Shakeel) v2: - added mem_cgroup_disabled() checks (by Shakeel B.) - added special handling of the root memcg in bpf_mem_cgroup_usage() (by Shakeel B.) - minor fixes in the kselftest (by Shakeel B.) - added a MAINTAINERS entry (by Shakeel B.) v1: https://lore.kernel.org/bpf/87ike29s5r.fsf@linux.dev/T/#t ==================== Link: https://patch.msgid.link/20251223044156.208250-1-roman.gushchin@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/memcontrol.h20
1 files changed, 20 insertions, 0 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0651865a4564..6a5d65487b70 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -949,7 +949,12 @@ static inline void mod_memcg_page_state(struct page *page,
rcu_read_unlock();
}
+unsigned long memcg_events(struct mem_cgroup *memcg, int event);
+unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
+unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
+bool memcg_stat_item_valid(int idx);
+bool memcg_vm_event_item_valid(enum vm_event_item idx);
unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
unsigned long lruvec_page_state_local(struct lruvec *lruvec,
enum node_stat_item idx);
@@ -1373,6 +1378,21 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
return 0;
}
+static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
+{
+ return 0;
+}
+
+static inline bool memcg_stat_item_valid(int idx)
+{
+ return false;
+}
+
+static inline bool memcg_vm_event_item_valid(enum vm_event_item idx)
+{
+ return false;
+}
+
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{