summaryrefslogtreecommitdiff
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
authorJani Nikula <jani.nikula@intel.com>2024-08-01 13:06:09 +0300
committerJani Nikula <jani.nikula@intel.com>2024-08-01 13:06:09 +0300
commit3663e2c4bc45fcdc71931fcbfcbfbf9b71f55c83 (patch)
tree3927fa90c7faa89131fcf2e789ca9de97a65dc5f /include/linux/mmzone.h
parent688c43dd6ca9e0cd0568868aefca5b041695c3f4 (diff)
parent8400291e289ee6b2bf9779ff1c83a291501f017b (diff)
Merge drm/drm-next into drm-intel-next
Sync with v6.11-rc1 in general, and specifically get the new BACKLIGHT_POWER_ constants for power states. Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h14
1 files changed, 8 insertions, 6 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8f9c9590a42c..41458892bc8a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -220,6 +220,8 @@ enum node_stat_item {
PGDEMOTE_KSWAPD,
PGDEMOTE_DIRECT,
PGDEMOTE_KHUGEPAGED,
+ NR_MEMMAP, /* page metadata allocated through buddy allocator */
+ NR_MEMMAP_BOOT, /* page metadata allocated through boot allocator */
NR_VM_NODE_STAT_ITEMS
};
@@ -654,13 +656,12 @@ enum zone_watermarks {
};
/*
- * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
- * for THP which will usually be GFP_MOVABLE. Even if it is another type,
- * it should not contribute to serious fragmentation causing THP allocation
- * failures.
+ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
+ * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
+ * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define NR_PCP_THP 1
+#define NR_PCP_THP 2
#else
#define NR_PCP_THP 0
#endif
@@ -1980,8 +1981,9 @@ static inline int subsection_map_index(unsigned long pfn)
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
+ struct mem_section_usage *usage = READ_ONCE(ms->usage);
- return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
+ return usage ? test_bit(idx, usage->subsection_map) : 0;
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)