summaryrefslogtreecommitdiff
path: root/tools/testing/vma
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/vma')
-rw-r--r--tools/testing/vma/include/custom.h26
-rw-r--r--tools/testing/vma/include/dup.h492
-rw-r--r--tools/testing/vma/include/stubs.h24
-rw-r--r--tools/testing/vma/main.c2
-rw-r--r--tools/testing/vma/shared.c8
-rw-r--r--tools/testing/vma/shared.h22
-rw-r--r--tools/testing/vma/tests/merge.c311
-rw-r--r--tools/testing/vma/tests/mmap.c18
-rw-r--r--tools/testing/vma/tests/vma.c395
-rw-r--r--tools/testing/vma/vma_internal.h6
10 files changed, 937 insertions, 367 deletions
diff --git a/tools/testing/vma/include/custom.h b/tools/testing/vma/include/custom.h
index 802a76317245..744fe874c168 100644
--- a/tools/testing/vma/include/custom.h
+++ b/tools/testing/vma/include/custom.h
@@ -15,15 +15,6 @@ extern unsigned long dac_mmap_min_addr;
#define dac_mmap_min_addr 0UL
#endif
-#define VM_WARN_ON(_expr) (WARN_ON(_expr))
-#define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
-#define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr))
-#define VM_BUG_ON(_expr) (BUG_ON(_expr))
-#define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
-
-/* We hardcode this for now. */
-#define sysctl_max_map_count 0x1000000UL
-
#define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
/*
@@ -32,8 +23,6 @@ extern unsigned long dac_mmap_min_addr;
*/
#define pr_warn_once pr_err
-#define pgtable_supports_soft_dirty() 1
-
struct anon_vma {
struct anon_vma *root;
struct rb_root_cached rb_root;
@@ -102,18 +91,7 @@ static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
refcount_set(&vma->vm_refcnt, 0);
}
-static inline vma_flags_t __mk_vma_flags(size_t count, const vma_flag_t *bits)
+static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
{
- vma_flags_t flags;
- int i;
-
- /*
- * For testing purposes: allow invalid bit specification so we can
- * easily test.
- */
- vma_flags_clear_all(&flags);
- for (i = 0; i < count; i++)
- if (bits[i] < NUM_VMA_FLAG_BITS)
- vma_flag_set(&flags, bits[i]);
- return flags;
+ return PAGE_SIZE;
}
diff --git a/tools/testing/vma/include/dup.h b/tools/testing/vma/include/dup.h
index 3078ff1487d3..b4864aad2db0 100644
--- a/tools/testing/vma/include/dup.h
+++ b/tools/testing/vma/include/dup.h
@@ -33,7 +33,10 @@ struct mm_struct {
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
+ union {
+ vm_flags_t def_flags;
+ vma_flags_t def_vma_flags;
+ };
mm_flags_t flags; /* Must use mm_flags_* helpers to access */
};
@@ -264,8 +267,10 @@ enum {
#endif /* CONFIG_ARCH_HAS_PKEYS */
#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
+#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT, VMA_SHADOW_STACK_BIT)
#else
#define VM_SHADOW_STACK VM_NONE
+#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT)
#endif
#if defined(CONFIG_PPC64)
#define VM_SAO INIT_VM_FLAG(SAO)
@@ -311,36 +316,49 @@ enum {
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
-#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
+#define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \
+ VM_EXEC_BIT : VM_READ_BIT)
/* Common data flag combinations */
-#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
- VM_MAYWRITE | VM_MAYEXEC)
-#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */
-#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
+#define VMA_DATA_FLAGS_TSK_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
+ TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
+ VMA_MAYEXEC_BIT)
+#define VMA_DATA_FLAGS_NON_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT)
+#define VMA_DATA_FLAGS_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
+ VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
+ VMA_MAYEXEC_BIT)
+
+#ifndef VMA_DATA_DEFAULT_FLAGS /* arch can override this */
+#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_EXEC
#endif
-#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
-#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+#ifndef VMA_STACK_DEFAULT_FLAGS /* arch can override this */
+#define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS
#endif
-#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
+#define VMA_STACK_FLAGS append_vma_flags(VMA_STACK_DEFAULT_FLAGS, \
+ VMA_STACK_BIT, VMA_ACCOUNT_BIT)
+/* Temporary until VMA flags conversion complete. */
+#define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS)
-#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
/* VMA basic access permission flags */
#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
+#define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT)
/*
* Special vmas that are non-mergable, non-mlock()able.
*/
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+#define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \
+ VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
+
+#define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT, \
+ VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)
+
#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
#define TASK_SIZE_LOW DEFAULT_MAP_WINDOW
#define TASK_SIZE_MAX DEFAULT_MAP_WINDOW
@@ -350,19 +368,20 @@ enum {
/* This mask represents all the VMA flag bits used by mlock */
#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
-#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
-
-#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VMA_LOCKED_MASK mk_vma_flags(VMA_LOCKED_BIT, VMA_LOCKONFAULT_BIT)
#define RLIMIT_STACK 3 /* max stack size */
#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
#define CAP_IPC_LOCK 14
-#define VM_STICKY (VM_SOFTDIRTY | VM_MAYBE_GUARD)
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define VMA_STICKY_FLAGS mk_vma_flags(VMA_SOFTDIRTY_BIT, VMA_MAYBE_GUARD_BIT)
+#else
+#define VMA_STICKY_FLAGS mk_vma_flags(VMA_MAYBE_GUARD_BIT)
+#endif
-#define VM_IGNORE_MERGE VM_STICKY
+#define VMA_IGNORE_MERGE_FLAGS VMA_STICKY_FLAGS
#define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD)
@@ -419,11 +438,23 @@ struct vma_iterator {
#define EMPTY_VMA_FLAGS ((vma_flags_t){ })
+#define MAPCOUNT_ELF_CORE_MARGIN (5)
+#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+static __always_inline bool vma_flags_empty(const vma_flags_t *flags)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+
+ return bitmap_empty(bitmap, NUM_VMA_FLAG_BITS);
+}
+
/* What action should be taken after an .mmap_prepare call is complete? */
enum mmap_action_type {
MMAP_NOTHING, /* Mapping is complete, no further action. */
MMAP_REMAP_PFN, /* Remap PFN range. */
MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */
+ MMAP_SIMPLE_IO_REMAP, /* I/O remap with guardrails. */
+ MMAP_MAP_KERNEL_PAGES, /* Map kernel page range from an array. */
};
/*
@@ -432,13 +463,22 @@ enum mmap_action_type {
*/
struct mmap_action {
union {
- /* Remap range. */
struct {
unsigned long start;
unsigned long start_pfn;
unsigned long size;
pgprot_t pgprot;
} remap;
+ struct {
+ phys_addr_t start_phys_addr;
+ unsigned long size;
+ } simple_ioremap;
+ struct {
+ unsigned long start;
+ struct page **pages;
+ unsigned long nr_pages;
+ pgoff_t pgoff;
+ } map_kernel;
};
enum mmap_action_type type;
@@ -486,18 +526,15 @@ enum vma_operation {
*/
struct vm_area_desc {
/* Immutable state. */
- const struct mm_struct *const mm;
- struct file *const file; /* May vary from vm_file in stacked callers. */
+ struct mm_struct *mm;
+ struct file *file; /* May vary from vm_file in stacked callers. */
unsigned long start;
unsigned long end;
/* Mutable fields. Populated with initial state. */
pgoff_t pgoff;
struct file *vm_file;
- union {
- vm_flags_t vm_flags;
- vma_flags_t vma_flags;
- };
+ vma_flags_t vma_flags;
pgprot_t page_prot;
/* Write-only fields. */
@@ -606,15 +643,37 @@ struct vm_area_struct {
} __randomize_layout;
struct vm_operations_struct {
- void (*open)(struct vm_area_struct * area);
+ /**
+ * @open: Called when a VMA is remapped, split or forked. Not called
+ * upon first mapping a VMA.
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
+ void (*open)(struct vm_area_struct *vma);
/**
* @close: Called when the VMA is being removed from the MM.
* Context: User context. May sleep. Caller holds mmap_lock.
*/
- void (*close)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct *vma);
+ /**
+ * @mapped: Called when the VMA is first mapped in the MM. Not called if
+ * the new VMA is merged with an adjacent VMA.
+ *
+ * The @vm_private_data field is an output field allowing the user to
+ * modify vma->vm_private_data as necessary.
+ *
+ * ONLY valid if set from f_op->mmap_prepare. Will result in an error if
+ * set from f_op->mmap.
+ *
+ * Returns %0 on success, or an error otherwise. On error, the VMA will
+ * be unmapped.
+ *
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
+ int (*mapped)(unsigned long start, unsigned long end, pgoff_t pgoff,
+ const struct file *file, void **vm_private_data);
/* Called any time before splitting to check if it's allowed */
- int (*may_split)(struct vm_area_struct *area, unsigned long addr);
- int (*mremap)(struct vm_area_struct *area);
+ int (*may_split)(struct vm_area_struct *vma, unsigned long addr);
+ int (*mremap)(struct vm_area_struct *vma);
/*
* Called by mprotect() to make driver-specific permission
* checks before mprotect() is finalised. The VMA must not
@@ -626,7 +685,7 @@ struct vm_operations_struct {
vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
- unsigned long (*pagesize)(struct vm_area_struct * area);
+ unsigned long (*pagesize)(struct vm_area_struct *vma);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
@@ -746,9 +805,12 @@ static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
* IMPORTANT: This does not overwrite bytes past the first system word. The
* caller must account for this.
*/
-static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_overwrite_word(vma_flags_t *flags,
+ unsigned long value)
{
- *ACCESS_PRIVATE(flags, __vma_flags) = value;
+ unsigned long *bitmap = flags->__vma_flags;
+
+ bitmap[0] = value;
}
/*
@@ -757,35 +819,65 @@ static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long va
* IMPORTANT: This does not overwrite bytes past the first system word. The
* caller must account for this.
*/
-static inline void vma_flags_overwrite_word_once(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_overwrite_word_once(vma_flags_t *flags,
+ unsigned long value)
{
- unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+ unsigned long *bitmap = flags->__vma_flags;
WRITE_ONCE(*bitmap, value);
}
/* Update the first system word of VMA flags setting bits, non-atomically. */
-static inline void vma_flags_set_word(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_set_word(vma_flags_t *flags,
+ unsigned long value)
{
- unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+ unsigned long *bitmap = flags->__vma_flags;
*bitmap |= value;
}
/* Update the first system word of VMA flags clearing bits, non-atomically. */
-static inline void vma_flags_clear_word(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_clear_word(vma_flags_t *flags,
+ unsigned long value)
{
- unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+ unsigned long *bitmap = flags->__vma_flags;
*bitmap &= ~value;
}
-static inline void vma_flags_clear_all(vma_flags_t *flags)
+static __always_inline void vma_flags_clear_all(vma_flags_t *flags)
{
bitmap_zero(ACCESS_PRIVATE(flags, __vma_flags), NUM_VMA_FLAG_BITS);
}
-static inline void vma_flag_set(vma_flags_t *flags, vma_flag_t bit)
+/*
+ * Helper function which converts a vma_flags_t value to a legacy vm_flags_t
+ * value. This is only valid if the input flags value can be expressed in a
+ * system word.
+ *
+ * Will be removed once the conversion to VMA flags is complete.
+ */
+static __always_inline vm_flags_t vma_flags_to_legacy(vma_flags_t flags)
+{
+ return (vm_flags_t)flags.__vma_flags[0];
+}
+
+/*
+ * Helper function which converts a legacy vm_flags_t value to a vma_flags_t
+ * value.
+ *
+ * Will be removed once the conversion to VMA flags is complete.
+ */
+static __always_inline vma_flags_t legacy_to_vma_flags(vm_flags_t flags)
+{
+ vma_flags_t ret = EMPTY_VMA_FLAGS;
+
+ vma_flags_overwrite_word(&ret, flags);
+ return ret;
+}
+
+static __always_inline void vma_flags_set_flag(vma_flags_t *flags,
+ vma_flag_t bit)
{
unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
@@ -812,16 +904,20 @@ static inline void vm_flags_reset(struct vm_area_struct *vma,
vm_flags_init(vma, flags);
}
-static inline void vm_flags_reset_once(struct vm_area_struct *vma,
- vm_flags_t flags)
+static inline void vma_flags_reset_once(struct vm_area_struct *vma,
+ vma_flags_t *flags)
{
- vma_assert_write_locked(vma);
- /*
- * The user should only be interested in avoiding reordering of
- * assignment to the first word.
- */
- vma_flags_clear_all(&vma->flags);
- vma_flags_overwrite_word_once(&vma->flags, flags);
+ const unsigned long word = flags->__vma_flags[0];
+
+ /* It is assumed only the first system word must be written once. */
+ vma_flags_overwrite_word_once(&vma->flags, word);
+ /* The remainder can be copied normally. */
+ if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) {
+ unsigned long *dst = &vma->flags.__vma_flags[1];
+ const unsigned long *src = &flags->__vma_flags[1];
+
+ bitmap_copy(dst, src, NUM_VMA_FLAG_BITS - BITS_PER_LONG);
+ }
}
static inline void vm_flags_set(struct vm_area_struct *vma,
@@ -838,12 +934,53 @@ static inline void vm_flags_clear(struct vm_area_struct *vma,
vma_flags_clear_word(&vma->flags, flags);
}
-static inline vma_flags_t __mk_vma_flags(size_t count, const vma_flag_t *bits);
+static __always_inline vma_flags_t __mk_vma_flags(vma_flags_t flags,
+ size_t count, const vma_flag_t *bits)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ vma_flags_set_flag(&flags, bits[i]);
+ return flags;
+}
+
+#define mk_vma_flags(...) __mk_vma_flags(EMPTY_VMA_FLAGS, \
+ COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
-#define mk_vma_flags(...) __mk_vma_flags(COUNT_ARGS(__VA_ARGS__), \
- (const vma_flag_t []){__VA_ARGS__})
+#define append_vma_flags(flags, ...) __mk_vma_flags(flags, \
+ COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
-static __always_inline bool vma_flags_test_mask(const vma_flags_t *flags,
+static __always_inline int vma_flags_count(const vma_flags_t *flags)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+
+ return bitmap_weight(bitmap, NUM_VMA_FLAG_BITS);
+}
+
+static __always_inline bool vma_flags_test(const vma_flags_t *flags,
+ vma_flag_t bit)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+
+ return test_bit((__force int)bit, bitmap);
+}
+
+static __always_inline vma_flags_t vma_flags_and_mask(const vma_flags_t *flags,
+ vma_flags_t to_and)
+{
+ vma_flags_t dst;
+ unsigned long *bitmap_dst = dst.__vma_flags;
+ const unsigned long *bitmap = flags->__vma_flags;
+ const unsigned long *bitmap_to_and = to_and.__vma_flags;
+
+ bitmap_and(bitmap_dst, bitmap, bitmap_to_and, NUM_VMA_FLAG_BITS);
+ return dst;
+}
+
+#define vma_flags_and(flags, ...) \
+ vma_flags_and_mask(flags, mk_vma_flags(__VA_ARGS__))
+
+static __always_inline bool vma_flags_test_any_mask(const vma_flags_t *flags,
vma_flags_t to_test)
{
const unsigned long *bitmap = flags->__vma_flags;
@@ -852,8 +989,8 @@ static __always_inline bool vma_flags_test_mask(const vma_flags_t *flags,
return bitmap_intersects(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
}
-#define vma_flags_test(flags, ...) \
- vma_flags_test_mask(flags, mk_vma_flags(__VA_ARGS__))
+#define vma_flags_test_any(flags, ...) \
+ vma_flags_test_any_mask(flags, mk_vma_flags(__VA_ARGS__))
static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
vma_flags_t to_test)
@@ -867,6 +1004,14 @@ static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
#define vma_flags_test_all(flags, ...) \
vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__))
+static __always_inline bool vma_flags_test_single_mask(const vma_flags_t *flags,
+ vma_flags_t flagmask)
+{
+ VM_WARN_ON_ONCE(vma_flags_count(&flagmask) > 1);
+
+ return vma_flags_test_any_mask(flags, flagmask);
+}
+
static __always_inline void vma_flags_set_mask(vma_flags_t *flags, vma_flags_t to_set)
{
unsigned long *bitmap = flags->__vma_flags;
@@ -889,23 +1034,71 @@ static __always_inline void vma_flags_clear_mask(vma_flags_t *flags, vma_flags_t
#define vma_flags_clear(flags, ...) \
vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__))
-static inline bool vma_test_all_flags_mask(const struct vm_area_struct *vma,
- vma_flags_t flags)
+static __always_inline vma_flags_t vma_flags_diff_pair(const vma_flags_t *flags,
+ const vma_flags_t *flags_other)
+{
+ vma_flags_t dst;
+ const unsigned long *bitmap_other = flags_other->__vma_flags;
+ const unsigned long *bitmap = flags->__vma_flags;
+ unsigned long *bitmap_dst = dst.__vma_flags;
+
+ bitmap_xor(bitmap_dst, bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
+ return dst;
+}
+
+static __always_inline bool vma_flags_same_pair(const vma_flags_t *flags,
+ const vma_flags_t *flags_other)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+ const unsigned long *bitmap_other = flags_other->__vma_flags;
+
+ return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
+}
+
+static __always_inline bool vma_flags_same_mask(const vma_flags_t *flags,
+ vma_flags_t flags_other)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+ const unsigned long *bitmap_other = flags_other.__vma_flags;
+
+ return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
+}
+
+#define vma_flags_same(flags, ...) \
+ vma_flags_same_mask(flags, mk_vma_flags(__VA_ARGS__))
+
+static __always_inline bool vma_test(const struct vm_area_struct *vma,
+ vma_flag_t bit)
+{
+ return vma_flags_test(&vma->flags, bit);
+}
+
+static __always_inline bool vma_test_any_mask(const struct vm_area_struct *vma,
+ vma_flags_t flags)
+{
+ return vma_flags_test_any_mask(&vma->flags, flags);
+}
+
+#define vma_test_any(vma, ...) \
+ vma_test_any_mask(vma, mk_vma_flags(__VA_ARGS__))
+
+static __always_inline bool vma_test_all_mask(const struct vm_area_struct *vma,
+ vma_flags_t flags)
{
return vma_flags_test_all_mask(&vma->flags, flags);
}
-#define vma_test_all_flags(vma, ...) \
- vma_test_all_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
+#define vma_test_all(vma, ...) \
+ vma_test_all_mask(vma, mk_vma_flags(__VA_ARGS__))
-static inline bool is_shared_maywrite_vm_flags(vm_flags_t vm_flags)
+static __always_inline bool
+vma_test_single_mask(const struct vm_area_struct *vma, vma_flags_t flagmask)
{
- return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
- (VM_SHARED | VM_MAYWRITE);
+ return vma_flags_test_single_mask(&vma->flags, flagmask);
}
-static inline void vma_set_flags_mask(struct vm_area_struct *vma,
- vma_flags_t flags)
+static __always_inline void vma_set_flags_mask(struct vm_area_struct *vma,
+ vma_flags_t flags)
{
vma_flags_set_mask(&vma->flags, flags);
}
@@ -913,17 +1106,41 @@ static inline void vma_set_flags_mask(struct vm_area_struct *vma,
#define vma_set_flags(vma, ...) \
vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
-static inline bool vma_desc_test_flags_mask(const struct vm_area_desc *desc,
- vma_flags_t flags)
+static __always_inline void vma_clear_flags_mask(struct vm_area_struct *vma,
+ vma_flags_t flags)
{
- return vma_flags_test_mask(&desc->vma_flags, flags);
+ vma_flags_clear_mask(&vma->flags, flags);
}
-#define vma_desc_test_flags(desc, ...) \
- vma_desc_test_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
+#define vma_clear_flags(vma, ...) \
+ vma_clear_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
-static inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
- vma_flags_t flags)
+static __always_inline bool vma_desc_test(const struct vm_area_desc *desc,
+ vma_flag_t bit)
+{
+ return vma_flags_test(&desc->vma_flags, bit);
+}
+
+static __always_inline bool vma_desc_test_any_mask(const struct vm_area_desc *desc,
+ vma_flags_t flags)
+{
+ return vma_flags_test_any_mask(&desc->vma_flags, flags);
+}
+
+#define vma_desc_test_any(desc, ...) \
+ vma_desc_test_any_mask(desc, mk_vma_flags(__VA_ARGS__))
+
+static __always_inline bool vma_desc_test_all_mask(const struct vm_area_desc *desc,
+ vma_flags_t flags)
+{
+ return vma_flags_test_all_mask(&desc->vma_flags, flags);
+}
+
+#define vma_desc_test_all(desc, ...) \
+ vma_desc_test_all_mask(desc, mk_vma_flags(__VA_ARGS__))
+
+static __always_inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
+ vma_flags_t flags)
{
vma_flags_set_mask(&desc->vma_flags, flags);
}
@@ -931,8 +1148,8 @@ static inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
#define vma_desc_set_flags(desc, ...) \
vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
-static inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
- vma_flags_t flags)
+static __always_inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
+ vma_flags_t flags)
{
vma_flags_clear_mask(&desc->vma_flags, flags);
}
@@ -1068,42 +1285,71 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma)
}
/* Declared in vma.h. */
-static inline void set_vma_from_desc(struct vm_area_struct *vma,
+static inline void compat_set_vma_from_desc(struct vm_area_struct *vma,
struct vm_area_desc *desc);
-static inline int __compat_vma_mmap(const struct file_operations *f_op,
- struct file *file, struct vm_area_struct *vma)
+static inline void compat_set_desc_from_vma(struct vm_area_desc *desc,
+ const struct file *file,
+ const struct vm_area_struct *vma)
{
- struct vm_area_desc desc = {
- .mm = vma->vm_mm,
- .file = file,
- .start = vma->vm_start,
- .end = vma->vm_end,
+ memset(desc, 0, sizeof(*desc));
- .pgoff = vma->vm_pgoff,
- .vm_file = vma->vm_file,
- .vm_flags = vma->vm_flags,
- .page_prot = vma->vm_page_prot,
+ desc->mm = vma->vm_mm;
+ desc->file = (struct file *)file;
+ desc->start = vma->vm_start;
+ desc->end = vma->vm_end;
- .action.type = MMAP_NOTHING, /* Default */
- };
- int err;
+ desc->pgoff = vma->vm_pgoff;
+ desc->vm_file = vma->vm_file;
+ desc->vma_flags = vma->flags;
+ desc->page_prot = vma->vm_page_prot;
- err = f_op->mmap_prepare(&desc);
- if (err)
- return err;
+ /* Default. */
+ desc->action.type = MMAP_NOTHING;
+}
- mmap_action_prepare(&desc.action, &desc);
- set_vma_from_desc(vma, &desc);
- return mmap_action_complete(&desc.action, vma);
+static inline unsigned long vma_pages(const struct vm_area_struct *vma)
+{
+ return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+}
+
+static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
+{
+ return file->f_op->mmap_prepare(desc);
}
-static inline int compat_vma_mmap(struct file *file,
+static inline int __compat_vma_mmap(struct vm_area_desc *desc,
struct vm_area_struct *vma)
{
- return __compat_vma_mmap(file->f_op, file, vma);
+ int err;
+
+ /* Perform any preparatory tasks for mmap action. */
+ err = mmap_action_prepare(desc);
+ if (err)
+ return err;
+ /* Update the VMA from the descriptor. */
+ compat_set_vma_from_desc(vma, desc);
+ /* Complete any specified mmap actions. */
+ return mmap_action_complete(vma, &desc->action);
}
+static inline int compat_vma_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vm_area_desc desc;
+ struct mmap_action *action;
+ int err;
+
+ compat_set_desc_from_vma(&desc, file, vma);
+ err = vfs_mmap_prepare(file, &desc);
+ if (err)
+ return err;
+ action = &desc.action;
+
+ /* being invoked from .mmmap means we don't have to enforce this. */
+ action->hide_from_rmap_until_complete = false;
+
+ return __compat_vma_mmap(&desc, vma);
+}
static inline void vma_iter_init(struct vma_iterator *vmi,
struct mm_struct *mm, unsigned long addr)
@@ -1111,11 +1357,6 @@ static inline void vma_iter_init(struct vma_iterator *vmi,
mas_init(&vmi->mas, &mm->mm_mt, addr);
}
-static inline unsigned long vma_pages(struct vm_area_struct *vma)
-{
- return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-}
-
static inline void mmap_assert_locked(struct mm_struct *);
static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
unsigned long start_addr,
@@ -1256,27 +1497,6 @@ static inline bool mlock_future_ok(const struct mm_struct *mm,
return locked_pages <= limit_pages;
}
-static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
-{
- /* If MDWE is disabled, we have nothing to deny. */
- if (mm_flags_test(MMF_HAS_MDWE, current->mm))
- return false;
-
- /* If the new VMA is not executable, we have nothing to deny. */
- if (!(new & VM_EXEC))
- return false;
-
- /* Under MDWE we do not accept newly writably executable VMAs... */
- if (new & VM_WRITE)
- return true;
-
- /* ...nor previously non-executable VMAs becoming executable. */
- if (!(old & VM_EXEC))
- return true;
-
- return false;
-}
-
static inline int mapping_map_writable(struct address_space *mapping)
{
return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
@@ -1306,11 +1526,6 @@ static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
return file->f_op->mmap(file, vma);
}
-static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
-{
- return file->f_op->mmap_prepare(desc);
-}
-
static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
{
/* Changing an anonymous vma with this is illegal */
@@ -1318,3 +1533,20 @@ static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
swap(vma->vm_file, file);
fput(file);
}
+
+extern int sysctl_max_map_count;
+static inline int get_sysctl_max_map_count(void)
+{
+ return READ_ONCE(sysctl_max_map_count);
+}
+
+#ifndef pgtable_supports_soft_dirty
+#define pgtable_supports_soft_dirty() IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)
+#endif
+
+static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
+{
+ const vm_flags_t vm_flags = vma_flags_to_legacy(vma_flags);
+
+ return vm_get_page_prot(vm_flags);
+}
diff --git a/tools/testing/vma/include/stubs.h b/tools/testing/vma/include/stubs.h
index 947a3a0c2566..a30b8bc84955 100644
--- a/tools/testing/vma/include/stubs.h
+++ b/tools/testing/vma/include/stubs.h
@@ -81,13 +81,13 @@ static inline void free_anon_vma_name(struct vm_area_struct *vma)
{
}
-static inline void mmap_action_prepare(struct mmap_action *action,
- struct vm_area_desc *desc)
+static inline int mmap_action_prepare(struct vm_area_desc *desc)
{
+ return 0;
}
-static inline int mmap_action_complete(struct mmap_action *action,
- struct vm_area_struct *vma)
+static inline int mmap_action_complete(struct vm_area_struct *vma,
+ struct mmap_action *action)
{
return 0;
}
@@ -101,10 +101,10 @@ static inline bool shmem_file(struct file *file)
return false;
}
-static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm,
- const struct file *file, vm_flags_t vm_flags)
+static inline vma_flags_t ksm_vma_flags(struct mm_struct *mm,
+ const struct file *file, vma_flags_t vma_flags)
{
- return vm_flags;
+ return vma_flags;
}
static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
@@ -229,7 +229,7 @@ static inline bool signal_pending(void *p)
return false;
}
-static inline bool is_file_hugepages(struct file *file)
+static inline bool is_file_hugepages(const struct file *file)
{
return false;
}
@@ -239,7 +239,8 @@ static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
return 0;
}
-static inline bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags,
+static inline bool may_expand_vm(struct mm_struct *mm,
+ const vma_flags_t *vma_flags,
unsigned long npages)
{
return true;
@@ -426,3 +427,8 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
}
static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
+
+static inline bool vma_supports_mlock(const struct vm_area_struct *vma)
+{
+ return false;
+}
diff --git a/tools/testing/vma/main.c b/tools/testing/vma/main.c
index 49b09e97a51f..18338f5d29e0 100644
--- a/tools/testing/vma/main.c
+++ b/tools/testing/vma/main.c
@@ -14,6 +14,8 @@
#include "tests/mmap.c"
#include "tests/vma.c"
+int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
+
/* Helper functions which utilise static kernel functions. */
struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
diff --git a/tools/testing/vma/shared.c b/tools/testing/vma/shared.c
index bda578cc3304..2565a5aecb80 100644
--- a/tools/testing/vma/shared.c
+++ b/tools/testing/vma/shared.c
@@ -14,7 +14,7 @@ struct task_struct __current;
struct vm_area_struct *alloc_vma(struct mm_struct *mm,
unsigned long start, unsigned long end,
- pgoff_t pgoff, vm_flags_t vm_flags)
+ pgoff_t pgoff, vma_flags_t vma_flags)
{
struct vm_area_struct *vma = vm_area_alloc(mm);
@@ -24,7 +24,7 @@ struct vm_area_struct *alloc_vma(struct mm_struct *mm,
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
- vm_flags_reset(vma, vm_flags);
+ vma->flags = vma_flags;
vma_assert_detached(vma);
return vma;
@@ -38,9 +38,9 @@ void detach_free_vma(struct vm_area_struct *vma)
struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
unsigned long start, unsigned long end,
- pgoff_t pgoff, vm_flags_t vm_flags)
+ pgoff_t pgoff, vma_flags_t vma_flags)
{
- struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags);
+ struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vma_flags);
if (vma == NULL)
return NULL;
diff --git a/tools/testing/vma/shared.h b/tools/testing/vma/shared.h
index 6c64211cfa22..8b9e3b11c3cb 100644
--- a/tools/testing/vma/shared.h
+++ b/tools/testing/vma/shared.h
@@ -35,6 +35,24 @@
#define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
#define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
+#define ASSERT_FLAGS_SAME_MASK(_flags, _flags_other) \
+ ASSERT_TRUE(vma_flags_same_mask((_flags), (_flags_other)))
+
+#define ASSERT_FLAGS_NOT_SAME_MASK(_flags, _flags_other) \
+ ASSERT_FALSE(vma_flags_same_mask((_flags), (_flags_other)))
+
+#define ASSERT_FLAGS_SAME(_flags, ...) \
+ ASSERT_TRUE(vma_flags_same(_flags, __VA_ARGS__))
+
+#define ASSERT_FLAGS_NOT_SAME(_flags, ...) \
+ ASSERT_FALSE(vma_flags_same(_flags, __VA_ARGS__))
+
+#define ASSERT_FLAGS_EMPTY(_flags) \
+ ASSERT_TRUE(vma_flags_empty(_flags))
+
+#define ASSERT_FLAGS_NONEMPTY(_flags) \
+ ASSERT_FALSE(vma_flags_empty(_flags))
+
#define IS_SET(_val, _flags) ((_val & _flags) == _flags)
extern bool fail_prealloc;
@@ -76,7 +94,7 @@ static inline void dummy_close(struct vm_area_struct *)
/* Helper function to simply allocate a VMA. */
struct vm_area_struct *alloc_vma(struct mm_struct *mm,
unsigned long start, unsigned long end,
- pgoff_t pgoff, vm_flags_t vm_flags);
+ pgoff_t pgoff, vma_flags_t vma_flags);
/* Helper function to detach and free a VMA. */
void detach_free_vma(struct vm_area_struct *vma);
@@ -84,7 +102,7 @@ void detach_free_vma(struct vm_area_struct *vma);
/* Helper function to allocate a VMA and link it to the tree. */
struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
unsigned long start, unsigned long end,
- pgoff_t pgoff, vm_flags_t vm_flags);
+ pgoff_t pgoff, vma_flags_t vma_flags);
/*
* Helper function to reset the dummy anon_vma to indicate it has not been
diff --git a/tools/testing/vma/tests/merge.c b/tools/testing/vma/tests/merge.c
index 3708dc6945b0..03b6f9820e0a 100644
--- a/tools/testing/vma/tests/merge.c
+++ b/tools/testing/vma/tests/merge.c
@@ -33,7 +33,7 @@ static int expand_existing(struct vma_merge_struct *vmg)
* specified new range.
*/
void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
- unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags)
+ unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags)
{
vma_iter_set(vmg->vmi, start);
@@ -45,7 +45,7 @@ void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
vmg->start = start;
vmg->end = end;
vmg->pgoff = pgoff;
- vmg->vm_flags = vm_flags;
+ vmg->vma_flags = vma_flags;
vmg->just_expand = false;
vmg->__remove_middle = false;
@@ -56,10 +56,10 @@ void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
/* Helper function to set both the VMG range and its anon_vma. */
static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start,
- unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
+ unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags,
struct anon_vma *anon_vma)
{
- vmg_set_range(vmg, start, end, pgoff, vm_flags);
+ vmg_set_range(vmg, start, end, pgoff, vma_flags);
vmg->anon_vma = anon_vma;
}
@@ -71,12 +71,12 @@ static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long s
*/
static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
struct vma_merge_struct *vmg, unsigned long start,
- unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
+ unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags,
bool *was_merged)
{
struct vm_area_struct *merged;
- vmg_set_range(vmg, start, end, pgoff, vm_flags);
+ vmg_set_range(vmg, start, end, pgoff, vma_flags);
merged = merge_new(vmg);
if (merged) {
@@ -89,23 +89,24 @@ static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
- return alloc_and_link_vma(mm, start, end, pgoff, vm_flags);
+ return alloc_and_link_vma(mm, start, end, pgoff, vma_flags);
}
static bool test_simple_merge(void)
{
struct vm_area_struct *vma;
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
- struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
- struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vm_flags);
+ struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vma_flags);
+ struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vma_flags);
VMA_ITERATOR(vmi, &mm, 0x1000);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
.start = 0x1000,
.end = 0x2000,
- .vm_flags = vm_flags,
+ .vma_flags = vma_flags,
.pgoff = 1,
};
@@ -118,7 +119,7 @@ static bool test_simple_merge(void)
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x3000);
ASSERT_EQ(vma->vm_pgoff, 0);
- ASSERT_EQ(vma->vm_flags, vm_flags);
+ ASSERT_FLAGS_SAME_MASK(&vma->flags, vma_flags);
detach_free_vma(vma);
mtree_destroy(&mm.mm_mt);
@@ -129,11 +130,11 @@ static bool test_simple_merge(void)
static bool test_simple_modify(void)
{
struct vm_area_struct *vma;
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
- struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
+ struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags);
VMA_ITERATOR(vmi, &mm, 0x1000);
- vm_flags_t flags = VM_READ | VM_MAYREAD;
ASSERT_FALSE(attach_vma(&mm, init_vma));
@@ -142,7 +143,7 @@ static bool test_simple_modify(void)
* performs the merge/split only.
*/
vma = vma_modify_flags(&vmi, init_vma, init_vma,
- 0x1000, 0x2000, &flags);
+ 0x1000, 0x2000, &vma_flags);
ASSERT_NE(vma, NULL);
/* We modify the provided VMA, and on split allocate new VMAs. */
ASSERT_EQ(vma, init_vma);
@@ -189,9 +190,10 @@ static bool test_simple_modify(void)
static bool test_simple_expand(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
- struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
+ struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vma_flags);
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.vmi = &vmi,
@@ -217,9 +219,10 @@ static bool test_simple_expand(void)
static bool test_simple_shrink(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
- struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
+ struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags);
VMA_ITERATOR(vmi, &mm, 0);
ASSERT_FALSE(attach_vma(&mm, vma));
@@ -238,7 +241,8 @@ static bool test_simple_shrink(void)
static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky, bool c_is_sticky)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
@@ -265,31 +269,31 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
bool merged;
if (is_sticky)
- vm_flags |= VM_STICKY;
+ vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS);
/*
* 0123456789abc
* AA B CC
*/
- vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
+ vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
ASSERT_NE(vma_a, NULL);
if (a_is_sticky)
- vm_flags_set(vma_a, VM_STICKY);
+ vma_flags_set_mask(&vma_a->flags, VMA_STICKY_FLAGS);
/* We give each VMA a single avc so we can test anon_vma duplication. */
INIT_LIST_HEAD(&vma_a->anon_vma_chain);
list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
- vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
+ vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
ASSERT_NE(vma_b, NULL);
if (b_is_sticky)
- vm_flags_set(vma_b, VM_STICKY);
+ vma_flags_set_mask(&vma_b->flags, VMA_STICKY_FLAGS);
INIT_LIST_HEAD(&vma_b->anon_vma_chain);
list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
- vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vm_flags);
+ vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vma_flags);
ASSERT_NE(vma_c, NULL);
if (c_is_sticky)
- vm_flags_set(vma_c, VM_STICKY);
+ vma_flags_set_mask(&vma_c->flags, VMA_STICKY_FLAGS);
INIT_LIST_HEAD(&vma_c->anon_vma_chain);
list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
@@ -299,7 +303,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
* 0123456789abc
* AA B ** CC
*/
- vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vm_flags, &merged);
+ vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vma_flags, &merged);
ASSERT_NE(vma_d, NULL);
INIT_LIST_HEAD(&vma_d->anon_vma_chain);
list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
@@ -314,7 +318,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
*/
vma_a->vm_ops = &vm_ops; /* This should have no impact. */
vma_b->anon_vma = &dummy_anon_vma;
- vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Merge with A, delete B. */
ASSERT_TRUE(merged);
@@ -325,7 +329,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
if (is_sticky || a_is_sticky || b_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge to PREVIOUS VMA.
@@ -333,7 +337,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
* 0123456789abc
* AAAA* DD CC
*/
- vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Extend A. */
ASSERT_TRUE(merged);
@@ -344,7 +348,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
if (is_sticky || a_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge to NEXT VMA.
@@ -354,7 +358,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
*/
vma_d->anon_vma = &dummy_anon_vma;
vma_d->vm_ops = &vm_ops; /* This should have no impact. */
- vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vma_flags, &merged);
ASSERT_EQ(vma, vma_d);
/* Prepend. */
ASSERT_TRUE(merged);
@@ -365,7 +369,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
if (is_sticky) /* D uses is_sticky. */
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge BOTH sides.
@@ -374,7 +378,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
* AAAAA*DDD CC
*/
vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
- vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Merge with A, delete D. */
ASSERT_TRUE(merged);
@@ -385,7 +389,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
if (is_sticky || a_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge to NEXT VMA.
@@ -394,7 +398,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
* AAAAAAAAA *CC
*/
vma_c->anon_vma = &dummy_anon_vma;
- vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vma_flags, &merged);
ASSERT_EQ(vma, vma_c);
/* Prepend C. */
ASSERT_TRUE(merged);
@@ -405,7 +409,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
if (is_sticky || c_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge BOTH sides.
@@ -413,7 +417,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
* 0123456789abc
* AAAAAAAAA*CCC
*/
- vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Extend A and delete C. */
ASSERT_TRUE(merged);
@@ -424,7 +428,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 1);
if (is_sticky || a_is_sticky || c_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Final state.
@@ -469,29 +473,30 @@ static bool test_merge_new(void)
static bool test_vma_merge_special_flags(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
- vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
- vm_flags_t all_special_flags = 0;
+ vma_flag_t special_flags[] = { VMA_IO_BIT, VMA_DONTEXPAND_BIT,
+ VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT };
+ vma_flags_t all_special_flags = EMPTY_VMA_FLAGS;
int i;
struct vm_area_struct *vma_left, *vma;
/* Make sure there aren't new VM_SPECIAL flags. */
- for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
- all_special_flags |= special_flags[i];
- }
- ASSERT_EQ(all_special_flags, VM_SPECIAL);
+ for (i = 0; i < ARRAY_SIZE(special_flags); i++)
+ vma_flags_set(&all_special_flags, special_flags[i]);
+ ASSERT_FLAGS_SAME_MASK(&all_special_flags, VMA_SPECIAL_FLAGS);
/*
* 01234
* AAA
*/
- vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
+ vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
ASSERT_NE(vma_left, NULL);
/* 1. Set up new VMA with special flag that would otherwise merge. */
@@ -502,12 +507,14 @@ static bool test_vma_merge_special_flags(void)
*
* This should merge if not for the VM_SPECIAL flag.
*/
- vmg_set_range(&vmg, 0x3000, 0x4000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x4000, 3, vma_flags);
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
- vm_flags_t special_flag = special_flags[i];
+ vma_flag_t special_flag = special_flags[i];
+ vma_flags_t flags = vma_flags;
- vm_flags_reset(vma_left, vm_flags | special_flag);
- vmg.vm_flags = vm_flags | special_flag;
+ vma_flags_set(&flags, special_flag);
+ vma_left->flags = flags;
+ vmg.vma_flags = flags;
vma = merge_new(&vmg);
ASSERT_EQ(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
@@ -521,15 +528,17 @@ static bool test_vma_merge_special_flags(void)
*
* Create a VMA to modify.
*/
- vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
ASSERT_NE(vma, NULL);
vmg.middle = vma;
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
- vm_flags_t special_flag = special_flags[i];
+ vma_flag_t special_flag = special_flags[i];
+ vma_flags_t flags = vma_flags;
- vm_flags_reset(vma_left, vm_flags | special_flag);
- vmg.vm_flags = vm_flags | special_flag;
+ vma_flags_set(&flags, special_flag);
+ vma_left->flags = flags;
+ vmg.vma_flags = flags;
vma = merge_existing(&vmg);
ASSERT_EQ(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
@@ -541,7 +550,8 @@ static bool test_vma_merge_special_flags(void)
static bool test_vma_merge_with_close(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
@@ -621,11 +631,11 @@ static bool test_vma_merge_with_close(void)
* PPPPPPNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma_next->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
ASSERT_EQ(merge_new(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
@@ -646,11 +656,11 @@ static bool test_vma_merge_with_close(void)
* proceed.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -674,11 +684,11 @@ static bool test_vma_merge_with_close(void)
* proceed.
*/
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
/*
@@ -702,12 +712,12 @@ static bool test_vma_merge_with_close(void)
* PPPVVNNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -728,12 +738,12 @@ static bool test_vma_merge_with_close(void)
* PPPPPNNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma_next->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -750,15 +760,16 @@ static bool test_vma_merge_with_close(void)
static bool test_vma_merge_new_with_close(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
- struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
- struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vm_flags);
+ struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
+ struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vma_flags);
const struct vm_operations_struct vm_ops = {
.close = dummy_close,
};
@@ -788,7 +799,7 @@ static bool test_vma_merge_new_with_close(void)
vma_prev->vm_ops = &vm_ops;
vma_next->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x2000, 0x5000, 2, vm_flags);
+ vmg_set_range(&vmg, 0x2000, 0x5000, 2, vma_flags);
vma = merge_new(&vmg);
ASSERT_NE(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -805,9 +816,10 @@ static bool test_vma_merge_new_with_close(void)
static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bool next_is_sticky)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
- vm_flags_t prev_flags = vm_flags;
- vm_flags_t next_flags = vm_flags;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
+ vma_flags_t prev_flags = vma_flags;
+ vma_flags_t next_flags = vma_flags;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma, *vma_prev, *vma_next;
@@ -821,11 +833,11 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
struct anon_vma_chain avc = {};
if (prev_is_sticky)
- prev_flags |= VM_STICKY;
+ vma_flags_set_mask(&prev_flags, VMA_STICKY_FLAGS);
if (middle_is_sticky)
- vm_flags |= VM_STICKY;
+ vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS);
if (next_is_sticky)
- next_flags |= VM_STICKY;
+ vma_flags_set_mask(&next_flags, VMA_STICKY_FLAGS);
/*
* Merge right case - partial span.
@@ -837,11 +849,11 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
* 0123456789
* VNNNNNN
*/
- vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags);
vma->vm_ops = &vm_ops; /* This should have no impact. */
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma);
vmg.middle = vma;
vmg.prev = vma;
vma_set_dummy_anon_vma(vma, &avc);
@@ -858,7 +870,7 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
ASSERT_TRUE(vma_write_started(vma_next));
ASSERT_EQ(mm.map_count, 2);
if (middle_is_sticky || next_is_sticky)
- ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
@@ -873,10 +885,10 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
* 0123456789
* NNNNNNN
*/
- vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags);
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
- vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vma_flags, &dummy_anon_vma);
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_EQ(merge_existing(&vmg), vma_next);
@@ -888,7 +900,7 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
ASSERT_TRUE(vma_write_started(vma_next));
ASSERT_EQ(mm.map_count, 1);
if (middle_is_sticky || next_is_sticky)
- ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. We should have deleted vma. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
@@ -905,9 +917,9 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
vma->vm_ops = &vm_ops; /* This should have no impact. */
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
@@ -924,7 +936,7 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
if (prev_is_sticky || middle_is_sticky)
- ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
@@ -941,8 +953,8 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
@@ -955,7 +967,7 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
ASSERT_TRUE(vma_write_started(vma_prev));
ASSERT_EQ(mm.map_count, 1);
if (prev_is_sticky || middle_is_sticky)
- ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. We should have deleted vma. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
@@ -972,9 +984,9 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, next_flags);
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
@@ -987,7 +999,7 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
ASSERT_TRUE(vma_write_started(vma_prev));
ASSERT_EQ(mm.map_count, 1);
if (prev_is_sticky || middle_is_sticky || next_is_sticky)
- ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. We should have deleted prev and next. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
@@ -1008,40 +1020,40 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags);
vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, next_flags);
- vmg_set_range(&vmg, 0x4000, 0x5000, 4, vm_flags);
+ vmg_set_range(&vmg, 0x4000, 0x5000, 4, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
+ vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x6000, 0x7000, 6, vm_flags);
+ vmg_set_range(&vmg, 0x6000, 0x7000, 6, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x4000, 0x7000, 4, vm_flags);
+ vmg_set_range(&vmg, 0x4000, 0x7000, 4, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x4000, 0x6000, 4, vm_flags);
+ vmg_set_range(&vmg, 0x4000, 0x6000, 4, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
+ vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
@@ -1067,7 +1079,8 @@ static bool test_merge_existing(void)
static bool test_anon_vma_non_mergeable(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma, *vma_prev, *vma_next;
@@ -1091,9 +1104,9 @@ static bool test_anon_vma_non_mergeable(void)
* 0123456789
* PPPPPPPNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags);
/*
* Give both prev and next single anon_vma_chain fields, so they will
@@ -1101,7 +1114,7 @@ static bool test_anon_vma_non_mergeable(void)
*
* However, when prev is compared to next, the merge should fail.
*/
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
@@ -1129,10 +1142,10 @@ static bool test_anon_vma_non_mergeable(void)
* 0123456789
* PPPPPPPNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags);
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL);
vmg.prev = vma_prev;
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
@@ -1154,7 +1167,8 @@ static bool test_anon_vma_non_mergeable(void)
static bool test_dup_anon_vma(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
@@ -1175,11 +1189,11 @@ static bool test_dup_anon_vma(void)
* This covers new VMA merging, as these operations amount to a VMA
* expand.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma_next->anon_vma = &dummy_anon_vma;
- vmg_set_range(&vmg, 0, 0x5000, 0, vm_flags);
+ vmg_set_range(&vmg, 0, 0x5000, 0, vma_flags);
vmg.target = vma_prev;
vmg.next = vma_next;
@@ -1201,16 +1215,16 @@ static bool test_dup_anon_vma(void)
* extend delete delete
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
/* Initialise avc so mergeability check passes. */
INIT_LIST_HEAD(&vma_next->anon_vma_chain);
list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
vma_next->anon_vma = &dummy_anon_vma;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -1234,12 +1248,12 @@ static bool test_dup_anon_vma(void)
* extend delete delete
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
vmg.anon_vma = &dummy_anon_vma;
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -1263,11 +1277,11 @@ static bool test_dup_anon_vma(void)
* extend shrink/delete
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags);
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -1291,11 +1305,11 @@ static bool test_dup_anon_vma(void)
* shrink/delete extend
*/
- vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
@@ -1314,7 +1328,8 @@ static bool test_dup_anon_vma(void)
static bool test_vmi_prealloc_fail(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
@@ -1330,11 +1345,11 @@ static bool test_vmi_prealloc_fail(void)
* the duplicated anon_vma is unlinked.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma->anon_vma = &dummy_anon_vma;
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
@@ -1358,11 +1373,11 @@ static bool test_vmi_prealloc_fail(void)
* performed in this case too.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma->anon_vma = &dummy_anon_vma;
- vmg_set_range(&vmg, 0, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0, 0x5000, 3, vma_flags);
vmg.target = vma_prev;
vmg.next = vma;
@@ -1380,13 +1395,14 @@ static bool test_vmi_prealloc_fail(void)
static bool test_merge_extend(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0x1000);
struct vm_area_struct *vma;
- vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vm_flags);
- alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vma_flags);
+ alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
/*
* Extend a VMA into the gap between itself and the following VMA.
@@ -1410,11 +1426,12 @@ static bool test_merge_extend(void)
static bool test_expand_only_mode(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma_prev, *vma;
- VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vm_flags, 5);
+ VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vma_flags, 5);
/*
* Place a VMA prior to the one we're expanding so we assert that we do
@@ -1422,14 +1439,14 @@ static bool test_expand_only_mode(void)
* have, through the use of the just_expand flag, indicated we do not
* need to do so.
*/
- alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
+ alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
/*
* We will be positioned at the prev VMA, but looking to expand to
* 0x9000.
*/
vma_iter_set(&vmi, 0x3000);
- vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.just_expand = true;
diff --git a/tools/testing/vma/tests/mmap.c b/tools/testing/vma/tests/mmap.c
index bded4ecbe5db..c85bc000d1cb 100644
--- a/tools/testing/vma/tests/mmap.c
+++ b/tools/testing/vma/tests/mmap.c
@@ -2,6 +2,8 @@
static bool test_mmap_region_basic(void)
{
+ const vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
unsigned long addr;
struct vm_area_struct *vma;
@@ -10,27 +12,19 @@ static bool test_mmap_region_basic(void)
current->mm = &mm;
/* Map at 0x300000, length 0x3000. */
- addr = __mmap_region(NULL, 0x300000, 0x3000,
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
- 0x300, NULL);
+ addr = __mmap_region(NULL, 0x300000, 0x3000, vma_flags, 0x300, NULL);
ASSERT_EQ(addr, 0x300000);
/* Map at 0x250000, length 0x3000. */
- addr = __mmap_region(NULL, 0x250000, 0x3000,
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
- 0x250, NULL);
+ addr = __mmap_region(NULL, 0x250000, 0x3000, vma_flags, 0x250, NULL);
ASSERT_EQ(addr, 0x250000);
/* Map at 0x303000, merging to 0x300000 of length 0x6000. */
- addr = __mmap_region(NULL, 0x303000, 0x3000,
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
- 0x303, NULL);
+ addr = __mmap_region(NULL, 0x303000, 0x3000, vma_flags, 0x303, NULL);
ASSERT_EQ(addr, 0x303000);
/* Map at 0x24d000, merging to 0x250000 of length 0x6000. */
- addr = __mmap_region(NULL, 0x24d000, 0x3000,
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
- 0x24d, NULL);
+ addr = __mmap_region(NULL, 0x24d000, 0x3000, vma_flags, 0x24d, NULL);
ASSERT_EQ(addr, 0x24d000);
ASSERT_EQ(mm.map_count, 2);
diff --git a/tools/testing/vma/tests/vma.c b/tools/testing/vma/tests/vma.c
index c54ffc954f11..754a2da06321 100644
--- a/tools/testing/vma/tests/vma.c
+++ b/tools/testing/vma/tests/vma.c
@@ -5,11 +5,12 @@ static bool compare_legacy_flags(vm_flags_t legacy_flags, vma_flags_t flags)
const unsigned long legacy_val = legacy_flags;
/* The lower word should contain the precise same value. */
const unsigned long flags_lower = flags.__vma_flags[0];
-#if NUM_VMA_FLAGS > BITS_PER_LONG
+ vma_flags_t converted_flags;
+#if NUM_VMA_FLAG_BITS > BITS_PER_LONG
int i;
/* All bits in higher flag values should be zero. */
- for (i = 1; i < NUM_VMA_FLAGS / BITS_PER_LONG; i++) {
+ for (i = 1; i < NUM_VMA_FLAG_BITS / BITS_PER_LONG; i++) {
if (flags.__vma_flags[i] != 0)
return false;
}
@@ -17,12 +18,18 @@ static bool compare_legacy_flags(vm_flags_t legacy_flags, vma_flags_t flags)
static_assert(sizeof(legacy_flags) == sizeof(unsigned long));
+ /* Assert that legacy flag helpers work correctly. */
+ converted_flags = legacy_to_vma_flags(legacy_flags);
+ ASSERT_FLAGS_SAME_MASK(&converted_flags, flags);
+ ASSERT_EQ(vma_flags_to_legacy(flags), legacy_flags);
+
return legacy_val == flags_lower;
}
static bool test_copy_vma(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
bool need_locks = false;
VMA_ITERATOR(vmi, &mm, 0);
@@ -30,7 +37,7 @@ static bool test_copy_vma(void)
/* Move backwards and do not merge. */
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
ASSERT_NE(vma_new, vma);
ASSERT_EQ(vma_new->vm_start, 0);
@@ -42,8 +49,8 @@ static bool test_copy_vma(void)
/* Move a VMA into position next to another and merge the two. */
- vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, vma_flags);
vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
vma_assert_attached(vma_new);
@@ -61,7 +68,6 @@ static bool test_vma_flags_unchanged(void)
struct vm_area_struct vma;
struct vm_area_desc desc;
-
vma.flags = EMPTY_VMA_FLAGS;
desc.vma_flags = EMPTY_VMA_FLAGS;
@@ -116,6 +122,7 @@ static bool test_vma_flags_cleared(void)
return true;
}
+#if NUM_VMA_FLAG_BITS > 64
/*
* Assert that VMA flag functions that operate at the system word level function
* correctly.
@@ -124,10 +131,14 @@ static bool test_vma_flags_word(void)
{
vma_flags_t flags = EMPTY_VMA_FLAGS;
const vma_flags_t comparison =
- mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 64, 65);
+ mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT
+
+ , 64, 65
+ );
/* Set some custom high flags. */
vma_flags_set(&flags, 64, 65);
+
/* Now overwrite the first word. */
vma_flags_overwrite_word(&flags, VM_READ | VM_WRITE);
/* Ensure they are equal. */
@@ -158,29 +169,93 @@ static bool test_vma_flags_word(void)
return true;
}
+#endif /* NUM_VMA_FLAG_BITS > 64 */
/* Ensure that vma_flags_test() and friends works correctly. */
static bool test_vma_flags_test(void)
{
- const vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
- VMA_EXEC_BIT, 64, 65);
- struct vm_area_struct vma;
- struct vm_area_desc desc;
+ vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+ struct vm_area_desc desc = {
+ .vma_flags = flags,
+ };
+ struct vm_area_struct vma = {
+ .flags = flags,
+ };
+
+#define do_test(_flag) \
+ ASSERT_TRUE(vma_flags_test(&flags, _flag)); \
+ ASSERT_TRUE(vma_flags_test_single_mask(&flags, mk_vma_flags(_flag))); \
+ ASSERT_TRUE(vma_test(&vma, _flag)); \
+ ASSERT_TRUE(vma_test_single_mask(&vma, mk_vma_flags(_flag))); \
+ ASSERT_TRUE(vma_desc_test(&desc, _flag))
+
+#define do_test_false(_flag) \
+ ASSERT_FALSE(vma_flags_test(&flags, _flag)); \
+ ASSERT_FALSE(vma_flags_test_single_mask(&flags, mk_vma_flags(_flag))); \
+ ASSERT_FALSE(vma_test(&vma, _flag)); \
+ ASSERT_FALSE(vma_test_single_mask(&vma, mk_vma_flags(_flag))); \
+ ASSERT_FALSE(vma_desc_test(&desc, _flag))
- vma.flags = flags;
- desc.vma_flags = flags;
+ do_test(VMA_READ_BIT);
+ do_test(VMA_WRITE_BIT);
+ do_test(VMA_EXEC_BIT);
+#if NUM_VMA_FLAG_BITS > 64
+ do_test(64);
+ do_test(65);
+#endif
+ do_test_false(VMA_MAYWRITE_BIT);
+#if NUM_VMA_FLAG_BITS > 64
+ do_test_false(66);
+#endif
+
+#undef do_test
+#undef do_test_false
+
+ /* We define the _single_mask() variants to return false if empty. */
+ ASSERT_FALSE(vma_flags_test_single_mask(&flags, EMPTY_VMA_FLAGS));
+ ASSERT_FALSE(vma_test_single_mask(&vma, EMPTY_VMA_FLAGS));
+ /* Even when both flags and tested flag mask are empty! */
+ flags = EMPTY_VMA_FLAGS;
+ vma.flags = EMPTY_VMA_FLAGS;
+ ASSERT_FALSE(vma_flags_test_single_mask(&flags, EMPTY_VMA_FLAGS));
+ ASSERT_FALSE(vma_test_single_mask(&vma, EMPTY_VMA_FLAGS));
+
+ return true;
+}
+
+/* Ensure that vma_flags_test_any() and friends works correctly. */
+static bool test_vma_flags_test_any(void)
+{
+ const vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+ struct vm_area_struct vma = {
+ .flags = flags,
+ };
+ struct vm_area_desc desc = {
+ .vma_flags = flags,
+ };
#define do_test(...) \
- ASSERT_TRUE(vma_flags_test(&flags, __VA_ARGS__)); \
- ASSERT_TRUE(vma_desc_test_flags(&desc, __VA_ARGS__))
+ ASSERT_TRUE(vma_flags_test_any(&flags, __VA_ARGS__)); \
+ ASSERT_TRUE(vma_desc_test_any(&desc, __VA_ARGS__)); \
+ ASSERT_TRUE(vma_test_any(&vma, __VA_ARGS__));
#define do_test_all_true(...) \
ASSERT_TRUE(vma_flags_test_all(&flags, __VA_ARGS__)); \
- ASSERT_TRUE(vma_test_all_flags(&vma, __VA_ARGS__))
+ ASSERT_TRUE(vma_test_all(&vma, __VA_ARGS__))
#define do_test_all_false(...) \
ASSERT_FALSE(vma_flags_test_all(&flags, __VA_ARGS__)); \
- ASSERT_FALSE(vma_test_all_flags(&vma, __VA_ARGS__))
+ ASSERT_FALSE(vma_test_all(&vma, __VA_ARGS__))
/*
* Testing for some flags that are present, some that are not - should
@@ -189,10 +264,12 @@ static bool test_vma_flags_test(void)
do_test(VMA_READ_BIT, VMA_MAYREAD_BIT, VMA_SEQ_READ_BIT);
/* However, the ...test_all() variant should NOT pass. */
do_test_all_false(VMA_READ_BIT, VMA_MAYREAD_BIT, VMA_SEQ_READ_BIT);
+#if NUM_VMA_FLAG_BITS > 64
/* But should pass for flags present. */
do_test_all_true(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT, 64, 65);
/* Also subsets... */
do_test_all_true(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT, 64);
+#endif
do_test_all_true(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
do_test_all_true(VMA_READ_BIT, VMA_WRITE_BIT);
do_test_all_true(VMA_READ_BIT);
@@ -200,7 +277,7 @@ static bool test_vma_flags_test(void)
* Check _mask variant. We don't need to test extensively as macro
* helper is the equivalent.
*/
- ASSERT_TRUE(vma_flags_test_mask(&flags, flags));
+ ASSERT_TRUE(vma_flags_test_any_mask(&flags, flags));
ASSERT_TRUE(vma_flags_test_all_mask(&flags, flags));
/* Single bits. */
@@ -245,6 +322,10 @@ static bool test_vma_flags_test(void)
do_test(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT, 64, 65);
#endif
+ /* Testing all flags against none trivially succeeds. */
+ ASSERT_TRUE(vma_flags_test_all_mask(&flags, EMPTY_VMA_FLAGS));
+ ASSERT_TRUE(vma_test_all_mask(&vma, EMPTY_VMA_FLAGS));
+
#undef do_test
#undef do_test_all_true
#undef do_test_all_false
@@ -256,59 +337,77 @@ static bool test_vma_flags_test(void)
static bool test_vma_flags_clear(void)
{
vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
- VMA_EXEC_BIT, 64, 65);
- vma_flags_t mask = mk_vma_flags(VMA_EXEC_BIT, 64);
- struct vm_area_struct vma;
- struct vm_area_desc desc;
-
- vma.flags = flags;
- desc.vma_flags = flags;
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+ vma_flags_t mask = mk_vma_flags(VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64
+#endif
+ );
+ struct vm_area_struct vma = {
+ .flags = flags,
+ };
+ struct vm_area_desc desc = {
+ .vma_flags = flags,
+ };
/* Cursory check of _mask() variant, as the helper macros imply. */
vma_flags_clear_mask(&flags, mask);
- vma_flags_clear_mask(&vma.flags, mask);
+ vma_clear_flags_mask(&vma, mask);
vma_desc_clear_flags_mask(&desc, mask);
- ASSERT_FALSE(vma_flags_test(&flags, VMA_EXEC_BIT, 64));
- ASSERT_FALSE(vma_flags_test(&vma.flags, VMA_EXEC_BIT, 64));
- ASSERT_FALSE(vma_desc_test_flags(&desc, VMA_EXEC_BIT, 64));
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FALSE(vma_flags_test_any(&flags, VMA_EXEC_BIT, 64));
+ ASSERT_FALSE(vma_test_any(&vma, VMA_EXEC_BIT, 64));
+ ASSERT_FALSE(vma_desc_test_any(&desc, VMA_EXEC_BIT, 64));
/* Reset. */
vma_flags_set(&flags, VMA_EXEC_BIT, 64);
vma_set_flags(&vma, VMA_EXEC_BIT, 64);
vma_desc_set_flags(&desc, VMA_EXEC_BIT, 64);
+#endif
/*
* Clear the flags and assert clear worked, then reset flags back to
* include specified flags.
*/
-#define do_test_and_reset(...) \
- vma_flags_clear(&flags, __VA_ARGS__); \
- vma_flags_clear(&vma.flags, __VA_ARGS__); \
- vma_desc_clear_flags(&desc, __VA_ARGS__); \
- ASSERT_FALSE(vma_flags_test(&flags, __VA_ARGS__)); \
- ASSERT_FALSE(vma_flags_test(&vma.flags, __VA_ARGS__)); \
- ASSERT_FALSE(vma_desc_test_flags(&desc, __VA_ARGS__)); \
- vma_flags_set(&flags, __VA_ARGS__); \
- vma_set_flags(&vma, __VA_ARGS__); \
+#define do_test_and_reset(...) \
+ vma_flags_clear(&flags, __VA_ARGS__); \
+ vma_clear_flags(&vma, __VA_ARGS__); \
+ vma_desc_clear_flags(&desc, __VA_ARGS__); \
+ ASSERT_FALSE(vma_flags_test_any(&flags, __VA_ARGS__)); \
+ ASSERT_FALSE(vma_test_any(&vma, __VA_ARGS__)); \
+ ASSERT_FALSE(vma_desc_test_any(&desc, __VA_ARGS__)); \
+ vma_flags_set(&flags, __VA_ARGS__); \
+ vma_set_flags(&vma, __VA_ARGS__); \
vma_desc_set_flags(&desc, __VA_ARGS__)
/* Single flags. */
do_test_and_reset(VMA_READ_BIT);
do_test_and_reset(VMA_WRITE_BIT);
do_test_and_reset(VMA_EXEC_BIT);
+#if NUM_VMA_FLAG_BITS > 64
do_test_and_reset(64);
do_test_and_reset(65);
+#endif
/* Two flags, in different orders. */
do_test_and_reset(VMA_READ_BIT, VMA_WRITE_BIT);
do_test_and_reset(VMA_READ_BIT, VMA_EXEC_BIT);
+#if NUM_VMA_FLAG_BITS > 64
do_test_and_reset(VMA_READ_BIT, 64);
do_test_and_reset(VMA_READ_BIT, 65);
+#endif
do_test_and_reset(VMA_WRITE_BIT, VMA_READ_BIT);
do_test_and_reset(VMA_WRITE_BIT, VMA_EXEC_BIT);
+#if NUM_VMA_FLAG_BITS > 64
do_test_and_reset(VMA_WRITE_BIT, 64);
do_test_and_reset(VMA_WRITE_BIT, 65);
+#endif
do_test_and_reset(VMA_EXEC_BIT, VMA_READ_BIT);
do_test_and_reset(VMA_EXEC_BIT, VMA_WRITE_BIT);
+#if NUM_VMA_FLAG_BITS > 64
do_test_and_reset(VMA_EXEC_BIT, 64);
do_test_and_reset(VMA_EXEC_BIT, 65);
do_test_and_reset(64, VMA_READ_BIT);
@@ -319,6 +418,7 @@ static bool test_vma_flags_clear(void)
do_test_and_reset(65, VMA_WRITE_BIT);
do_test_and_reset(65, VMA_EXEC_BIT);
do_test_and_reset(65, 64);
+#endif
/* Three flags. */
@@ -328,12 +428,229 @@ static bool test_vma_flags_clear(void)
return true;
}
+/* Ensure that vma_flags_empty() works correctly. */
+static bool test_vma_flags_empty(void)
+{
+ vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+
+ ASSERT_FLAGS_NONEMPTY(&flags);
+ vma_flags_clear(&flags, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FLAGS_NONEMPTY(&flags);
+ vma_flags_clear(&flags, 64, 65);
+ ASSERT_FLAGS_EMPTY(&flags);
+#else
+ ASSERT_FLAGS_EMPTY(&flags);
+#endif
+
+ return true;
+}
+
+/* Ensure that vma_flags_diff_pair() works correctly. */
+static bool test_vma_flags_diff(void)
+{
+ vma_flags_t flags1 = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+
+ vma_flags_t flags2 = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT, VMA_MAYWRITE_BIT,
+ VMA_MAYEXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65, 66, 67
+#endif
+ );
+ vma_flags_t diff = vma_flags_diff_pair(&flags1, &flags2);
+
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FLAGS_SAME(&diff, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT, 66, 67);
+#else
+ ASSERT_FLAGS_SAME(&diff, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT);
+#endif
+ /* Should be the same even if re-ordered. */
+ diff = vma_flags_diff_pair(&flags2, &flags1);
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FLAGS_SAME(&diff, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT, 66, 67);
+#else
+ ASSERT_FLAGS_SAME(&diff, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT);
+#endif
+
+ /* Should be no difference when applied against themselves. */
+ diff = vma_flags_diff_pair(&flags1, &flags1);
+ ASSERT_FLAGS_EMPTY(&diff);
+ diff = vma_flags_diff_pair(&flags2, &flags2);
+ ASSERT_FLAGS_EMPTY(&diff);
+
+ /* One set of flags against an empty one should equal the original. */
+ flags2 = EMPTY_VMA_FLAGS;
+ diff = vma_flags_diff_pair(&flags1, &flags2);
+ ASSERT_FLAGS_SAME_MASK(&diff, flags1);
+
+ /* A subset should work too. */
+ flags2 = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT);
+ diff = vma_flags_diff_pair(&flags1, &flags2);
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FLAGS_SAME(&diff, VMA_EXEC_BIT, 64, 65);
+#else
+ ASSERT_FLAGS_SAME(&diff, VMA_EXEC_BIT);
+#endif
+
+ return true;
+}
+
+/* Ensure that vma_flags_and() and friends work correctly. */
+static bool test_vma_flags_and(void)
+{
+ vma_flags_t flags1 = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+ vma_flags_t flags2 = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT, VMA_MAYWRITE_BIT,
+ VMA_MAYEXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65, 66, 67
+#endif
+ );
+ vma_flags_t flags3 = mk_vma_flags(VMA_IO_BIT, VMA_MAYBE_GUARD_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 68, 69
+#endif
+ );
+ vma_flags_t and = vma_flags_and_mask(&flags1, flags2);
+
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ 64, 65);
+#else
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+#endif
+
+ and = vma_flags_and_mask(&flags1, flags1);
+ ASSERT_FLAGS_SAME_MASK(&and, flags1);
+
+ and = vma_flags_and_mask(&flags2, flags2);
+ ASSERT_FLAGS_SAME_MASK(&and, flags2);
+
+ and = vma_flags_and_mask(&flags1, flags3);
+ ASSERT_FLAGS_EMPTY(&and);
+ and = vma_flags_and_mask(&flags2, flags3);
+ ASSERT_FLAGS_EMPTY(&and);
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT);
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT);
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+
+#if NUM_VMA_FLAG_BITS > 64
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ 64);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT, 64);
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ 64, 65);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT, 64,
+ 65);
+#endif
+
+ /* And against some missing values. */
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ VMA_IO_BIT);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ VMA_IO_BIT, VMA_RAND_READ_BIT);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+
+#if NUM_VMA_FLAG_BITS > 64
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ VMA_IO_BIT, VMA_RAND_READ_BIT, 69);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+#endif
+
+ return true;
+}
+
+/* Ensure append_vma_flags() acts as expected. */
+static bool test_append_vma_flags(void)
+{
+ vma_flags_t flags = append_vma_flags(VMA_REMAP_FLAGS, VMA_READ_BIT,
+ VMA_WRITE_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+
+ ASSERT_FLAGS_SAME(&flags, VMA_IO_BIT, VMA_PFNMAP_BIT,
+ VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT, VMA_READ_BIT,
+ VMA_WRITE_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+
+ flags = append_vma_flags(EMPTY_VMA_FLAGS, VMA_READ_BIT, VMA_WRITE_BIT);
+ ASSERT_FLAGS_SAME(&flags, VMA_READ_BIT, VMA_WRITE_BIT);
+
+ return true;
+}
+
+/* Assert that vma_flags_count() behaves as expected. */
+static bool test_vma_flags_count(void)
+{
+ vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_EQ(vma_flags_count(&flags), 5);
+ vma_flags_clear(&flags, 64);
+ ASSERT_EQ(vma_flags_count(&flags), 4);
+ vma_flags_clear(&flags, 65);
+#endif
+ ASSERT_EQ(vma_flags_count(&flags), 3);
+ vma_flags_clear(&flags, VMA_EXEC_BIT);
+ ASSERT_EQ(vma_flags_count(&flags), 2);
+ vma_flags_clear(&flags, VMA_WRITE_BIT);
+ ASSERT_EQ(vma_flags_count(&flags), 1);
+ vma_flags_clear(&flags, VMA_READ_BIT);
+ ASSERT_EQ(vma_flags_count(&flags), 0);
+
+ return true;
+}
+
static void run_vma_tests(int *num_tests, int *num_fail)
{
TEST(copy_vma);
TEST(vma_flags_unchanged);
TEST(vma_flags_cleared);
+#if NUM_VMA_FLAG_BITS > 64
TEST(vma_flags_word);
+#endif
TEST(vma_flags_test);
+ TEST(vma_flags_test_any);
TEST(vma_flags_clear);
+ TEST(vma_flags_empty);
+ TEST(vma_flags_diff);
+ TEST(vma_flags_and);
+ TEST(append_vma_flags);
+ TEST(vma_flags_count);
}
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
index 0e1121e2ef23..e12ab2c80f95 100644
--- a/tools/testing/vma/vma_internal.h
+++ b/tools/testing/vma/vma_internal.h
@@ -51,6 +51,12 @@ typedef unsigned long pgprotval_t;
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
typedef __bitwise unsigned int vm_fault_t;
+#define VM_WARN_ON(_expr) (WARN_ON(_expr))
+#define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
+#define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr))
+#define VM_BUG_ON(_expr) (BUG_ON(_expr))
+#define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
+
#include "include/stubs.h"
#include "include/dup.h"
#include "include/custom.h"