summaryrefslogtreecommitdiff
path: root/include/linux/page-flags.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/page-flags.h')
-rw-r--r--include/linux/page-flags.h163
1 files changed, 79 insertions, 84 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f7a0e4af0c73..0e03d816e8b9 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -198,97 +198,91 @@ enum pageflags {
#ifndef __GENERATING_BOUNDS_H
-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
-DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
-
/*
- * Return the real head page struct iff the @page is a fake head page, otherwise
- * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
+ * For tail pages, if the size of struct page is power-of-2 ->compound_info
+ * encodes the mask that converts the address of the tail page address to
+ * the head page address.
+ *
+ * Otherwise, ->compound_info has direct pointer to head pages.
*/
-static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
+static __always_inline bool compound_info_has_mask(void)
{
- if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
- return page;
-
/*
- * Only addresses aligned with PAGE_SIZE of struct page may be fake head
- * struct page. The alignment check aims to avoid access the fields (
- * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
- * cold cacheline in some cases.
+ * Limit mask usage to HugeTLB vmemmap optimization (HVO) where it
+ * makes a difference.
+ *
+ * The approach with mask would work in the wider set of conditions,
+ * but it requires validating that struct pages are naturally aligned
+ * for all orders up to the MAX_FOLIO_ORDER, which can be tricky.
*/
- if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
- test_bit(PG_head, &page->flags.f)) {
- /*
- * We can safely access the field of the @page[1] with PG_head
- * because the @page is a compound page composed with at least
- * two contiguous pages.
- */
- unsigned long head = READ_ONCE(page[1].compound_head);
-
- if (likely(head & 1))
- return (const struct page *)(head - 1);
- }
- return page;
+ if (!IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP))
+ return false;
+
+ return is_power_of_2(sizeof(struct page));
}
-static __always_inline bool page_count_writable(const struct page *page, int u)
+static __always_inline unsigned long _compound_head(const struct page *page)
{
- if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
- return true;
+ unsigned long info = READ_ONCE(page->compound_info);
+ unsigned long mask;
+
+ if (!compound_info_has_mask()) {
+ /* Bit 0 encodes PageTail() */
+ if (info & 1)
+ return info - 1;
+
+ return (unsigned long)page;
+ }
/*
- * The refcount check is ordered before the fake-head check to prevent
- * the following race:
- * CPU 1 (HVO) CPU 2 (speculative PFN walker)
- *
- * page_ref_freeze()
- * synchronize_rcu()
- * rcu_read_lock()
- * page_is_fake_head() is false
- * vmemmap_remap_pte()
- * XXX: struct page[] becomes r/o
+ * If compound_info_has_mask() is true the rest of the info encodes
+ * the mask that converts the address of the tail page to the head page.
*
- * page_ref_unfreeze()
- * page_ref_count() is not zero
+ * No need to clear bit 0 in the mask as 'page' always has it clear.
*
- * atomic_add_unless(&page->_refcount)
- * XXX: try to modify r/o struct page[]
- *
- * The refcount check also prevents modification attempts to other (r/o)
- * tail pages that are not fake heads.
+ * Let's do it in a branchless manner.
*/
- if (atomic_read_acquire(&page->_refcount) == u)
- return false;
- return page_fixed_fake_head(page) == page;
-}
-#else
-static inline const struct page *page_fixed_fake_head(const struct page *page)
-{
- return page;
-}
+ /* Non-tail: -1UL, Tail: 0 */
+ mask = (info & 1) - 1;
-static inline bool page_count_writable(const struct page *page, int u)
-{
- return true;
-}
-#endif
+ /* Non-tail: -1UL, Tail: info */
+ mask |= info;
-static __always_inline int page_is_fake_head(const struct page *page)
-{
- return page_fixed_fake_head(page) != page;
+ return (unsigned long)page & mask;
}
-static __always_inline unsigned long _compound_head(const struct page *page)
+#define compound_head(page) ((typeof(page))_compound_head(page))
+
+static __always_inline void set_compound_head(struct page *tail,
+ const struct page *head, unsigned int order)
{
- unsigned long head = READ_ONCE(page->compound_head);
+ unsigned int shift;
+ unsigned long mask;
+
+ if (!compound_info_has_mask()) {
+ WRITE_ONCE(tail->compound_info, (unsigned long)head | 1);
+ return;
+ }
+
+ /*
+ * If the size of struct page is power-of-2, bits [shift:0] of the
+ * virtual address of compound head are zero.
+ *
+ * Calculate mask that can be applied to the virtual address of
+ * the tail page to get address of the head page.
+ */
+ shift = order + order_base_2(sizeof(struct page));
+ mask = GENMASK(BITS_PER_LONG - 1, shift);
- if (unlikely(head & 1))
- return head - 1;
- return (unsigned long)page_fixed_fake_head(page);
+ /* Bit 0 encodes PageTail() */
+ WRITE_ONCE(tail->compound_info, mask | 1);
}
-#define compound_head(page) ((typeof(page))_compound_head(page))
+static __always_inline void clear_compound_head(struct page *page)
+{
+ WRITE_ONCE(page->compound_info, 0);
+}
/**
* page_folio - Converts from page to folio.
@@ -320,13 +314,13 @@ static __always_inline unsigned long _compound_head(const struct page *page)
static __always_inline int PageTail(const struct page *page)
{
- return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
+ return READ_ONCE(page->compound_info) & 1;
}
static __always_inline int PageCompound(const struct page *page)
{
return test_bit(PG_head, &page->flags.f) ||
- READ_ONCE(page->compound_head) & 1;
+ READ_ONCE(page->compound_info) & 1;
}
#define PAGE_POISON_PATTERN -1l
@@ -348,7 +342,7 @@ static const unsigned long *const_folio_flags(const struct folio *folio,
{
const struct page *page = &folio->page;
- VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
+ VM_BUG_ON_PGFLAGS(page->compound_info & 1, page);
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
return &page[n].flags.f;
}
@@ -357,7 +351,7 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n)
{
struct page *page = &folio->page;
- VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
+ VM_BUG_ON_PGFLAGS(page->compound_info & 1, page);
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
return &page[n].flags.f;
}
@@ -724,6 +718,11 @@ static __always_inline bool folio_test_anon(const struct folio *folio)
return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0;
}
+static __always_inline bool folio_test_lazyfree(const struct folio *folio)
+{
+ return folio_test_anon(folio) && !folio_test_swapbacked(folio);
+}
+
static __always_inline bool PageAnonNotKsm(const struct page *page)
{
unsigned long flags = (unsigned long)page_folio(page)->mapping;
@@ -847,7 +846,7 @@ static __always_inline bool folio_test_head(const struct folio *folio)
static __always_inline int PageHead(const struct page *page)
{
PF_POISONED_CHECK(page);
- return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page);
+ return test_bit(PG_head, &page->flags.f);
}
__SETPAGEFLAG(Head, head, PF_ANY)
@@ -865,16 +864,6 @@ static inline bool folio_test_large(const struct folio *folio)
return folio_test_head(folio);
}
-static __always_inline void set_compound_head(struct page *page, struct page *head)
-{
- WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
-}
-
-static __always_inline void clear_compound_head(struct page *page)
-{
- WRITE_ONCE(page->compound_head, 0);
-}
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void ClearPageCompound(struct page *page)
{
@@ -934,6 +923,7 @@ enum pagetype {
PGTY_zsmalloc = 0xf6,
PGTY_unaccepted = 0xf7,
PGTY_large_kmalloc = 0xf8,
+ PGTY_netpp = 0xf9,
PGTY_mapcount_underflow = 0xff
};
@@ -1066,6 +1056,11 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
+/*
+ * Marks page_pool allocated pages.
+ */
+PAGE_TYPE_OPS(Netpp, netpp, netpp)
+
/**
* PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test.