summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/include/linux/bitmap.h24
-rw-r--r--tools/lib/bitmap.c10
-rw-r--r--tools/testing/radix-tree/maple.c308
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c11
-rw-r--r--tools/testing/selftests/cgroup/test_zswap.c136
-rw-r--r--tools/testing/selftests/damon/_damon_sysfs.py12
-rw-r--r--tools/testing/selftests/damon/config1
-rwxr-xr-xtools/testing/selftests/damon/drgn_dump_damon_status.py1
-rwxr-xr-xtools/testing/selftests/damon/sysfs.py7
-rw-r--r--tools/testing/selftests/kho/init.c20
-rw-r--r--tools/testing/selftests/mm/.gitignore1
-rw-r--r--tools/testing/selftests/mm/Makefile1
-rw-r--r--tools/testing/selftests/mm/folio_split_race_test.c297
-rw-r--r--tools/testing/selftests/mm/memory-failure.c1
-rw-r--r--tools/testing/selftests/mm/migration.c3
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c20
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh2
-rw-r--r--tools/testing/vma/include/custom.h26
-rw-r--r--tools/testing/vma/include/dup.h492
-rw-r--r--tools/testing/vma/include/stubs.h24
-rw-r--r--tools/testing/vma/main.c2
-rw-r--r--tools/testing/vma/shared.c8
-rw-r--r--tools/testing/vma/shared.h22
-rw-r--r--tools/testing/vma/tests/merge.c311
-rw-r--r--tools/testing/vma/tests/mmap.c18
-rw-r--r--tools/testing/vma/tests/vma.c395
-rw-r--r--tools/testing/vma/vma_internal.h6
27 files changed, 1747 insertions, 412 deletions
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 250883090a5d..5cb4f3942fd3 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -28,6 +28,8 @@ bool __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
@@ -53,6 +55,17 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
}
+static __always_inline
+void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits)
+{
+ unsigned int len = bitmap_size(nbits);
+
+ if (small_const_nbits(nbits))
+ *dst = *src;
+ else
+ memcpy(dst, src, len);
+}
+
static inline bool bitmap_empty(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
@@ -209,4 +222,15 @@ static inline void bitmap_clear(unsigned long *map, unsigned int start,
else
__bitmap_clear(map, start, nbits);
}
+
+static __always_inline
+void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = *src1 ^ *src2;
+ else
+ __bitmap_xor(dst, src1, src2, nbits);
+}
+
#endif /* _TOOLS_LINUX_BITMAP_H */
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index aa83d22c45e3..fedc9070f0e4 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -169,3 +169,13 @@ bool __bitmap_subset(const unsigned long *bitmap1,
return false;
return true;
}
+
+void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k;
+ unsigned int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] ^ bitmap2[k];
+}
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index 5c1b18e3ed21..feedd5ab7058 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -38,6 +38,7 @@ struct rcu_test_struct2 {
unsigned long index[RCU_RANGE_COUNT];
unsigned long last[RCU_RANGE_COUNT];
+ pthread_mutex_t dump;
};
struct rcu_test_struct3 {
@@ -33997,8 +33998,25 @@ static void *rcu_reader_fwd(void *ptr)
}
}
- RCU_MT_BUG_ON(test, mas.index != r_start);
- RCU_MT_BUG_ON(test, mas.last != r_end);
+ if (mas.index != r_start) {
+ if (pthread_mutex_trylock(&test->dump) != 0) {
+ rcu_read_unlock();
+ goto quit;
+ }
+ printk("start is wrong: %lx (%lu) vs expected %lx (%lu)\n",
+ mas.index, mas.index, r_start, r_start);
+ RCU_MT_BUG_ON(test, mas.index != r_start);
+ }
+
+ if (mas.last != r_end) {
+ if (pthread_mutex_trylock(&test->dump) != 0) {
+ rcu_read_unlock();
+ goto quit;
+ }
+ printk("last is wrong: %lx (%lu) vs expected %lx (%lu)\n",
+ mas.last, mas.last, r_end, r_end);
+ RCU_MT_BUG_ON(test, mas.last != r_end);
+ }
if (i == reader->flip) {
alt = xa_mk_value(index + i + RCU_RANGE_COUNT);
@@ -34014,7 +34032,8 @@ static void *rcu_reader_fwd(void *ptr)
else if (entry == alt)
toggled = true;
else {
- printk("!!%lu-%lu -> %p not %p or %p\n", mas.index, mas.last, entry, expected, alt);
+ printk("!!%lu-%lu -> %p not %p or %p\n",
+ mas.index, mas.last, entry, expected, alt);
RCU_MT_BUG_ON(test, 1);
}
@@ -34047,9 +34066,11 @@ static void *rcu_reader_fwd(void *ptr)
usleep(test->pause);
}
+quit:
rcu_unregister_thread();
return NULL;
}
+
/* RCU reader in decreasing index */
static void *rcu_reader_rev(void *ptr)
{
@@ -34119,13 +34140,17 @@ static void *rcu_reader_rev(void *ptr)
line = __LINE__;
if (mas.index != r_start) {
+ if (pthread_mutex_trylock(&test->dump) != 0) {
+ rcu_read_unlock();
+ goto quit;
+ }
+
alt = xa_mk_value(index + i * 2 + 1 +
RCU_RANGE_COUNT);
mt_dump(test->mt, mt_dump_dec);
- printk("Error: %lu-%lu %p != %lu-%lu %p %p line %d i %d\n",
- mas.index, mas.last, entry,
- r_start, r_end, expected, alt,
- line, i);
+ printk("Error: %p %lu-%lu %p != %lu-%lu %p %p line %d i %d\n",
+ mas.node, mas.index, mas.last, entry,
+ r_start, r_end, expected, alt, line, i);
}
RCU_MT_BUG_ON(test, mas.index != r_start);
RCU_MT_BUG_ON(test, mas.last != r_end);
@@ -34180,6 +34205,7 @@ static void *rcu_reader_rev(void *ptr)
usleep(test->pause);
}
+quit:
rcu_unregister_thread();
return NULL;
}
@@ -34329,6 +34355,7 @@ static void rcu_stress(struct maple_tree *mt, bool forward)
test.seen_modified = 0;
test.thread_count = 0;
test.start = test.stop = false;
+ pthread_mutex_init(&test.dump, NULL);
seed = time(NULL);
srand(seed);
for (i = 0; i < RCU_RANGE_COUNT; i++) {
@@ -34414,6 +34441,7 @@ struct rcu_test_struct {
unsigned long removed; /* The index of the removed entry */
unsigned long added; /* The index of the removed entry */
unsigned long toggle; /* The index of the removed entry */
+ pthread_mutex_t dump;
};
static inline
@@ -34506,7 +34534,9 @@ static void *rcu_loop(void *ptr)
/* Out of the interesting range */
if (mas.index < test->index || mas.index > test->last) {
if (entry != expected) {
- printk("%lx - %lx = %p not %p\n",
+ if (pthread_mutex_trylock(&test->dump) != 0)
+ break;
+ printk("\nERROR: %lx - %lx = %p not %p\n",
mas.index, mas.last, entry, expected);
}
MT_BUG_ON(test->mt, entry != expected);
@@ -34854,6 +34884,7 @@ static noinline void __init check_rcu_threaded(struct maple_tree *mt)
vals.range_end = ULONG_MAX;
vals.seen_entry2 = 0;
vals.seen_entry3 = 0;
+ pthread_mutex_init(&vals.dump, NULL);
run_check_rcu(mt, &vals);
mtree_destroy(mt);
@@ -35250,6 +35281,8 @@ static noinline void __init check_spanning_write(struct maple_tree *mt)
{
unsigned long i, max = 5000;
MA_STATE(mas, mt, 1200, 2380);
+ struct maple_enode *enode;
+ struct maple_node *pnode;
for (i = 0; i <= max; i++)
mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
@@ -35373,7 +35406,18 @@ static noinline void __init check_spanning_write(struct maple_tree *mt)
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
for (i = 0; i <= max; i++)
mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
mtree_lock(mt);
+ if (MAPLE_32BIT) {
+ i = 47811;
+ do {
+ mas_set(&mas, i);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ i++;
+ mas_ascend(&mas);
+ } while (mas_data_end(&mas) < mt_slot_count(mas.node) - 1);
+ }
+
mas_set(&mas, 47606);
mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
mas_set(&mas, 47607);
@@ -35410,6 +35454,128 @@ static noinline void __init check_spanning_write(struct maple_tree *mt)
mas_set_range(&mas, 76, 875);
mas_store_gfp(&mas, NULL, GFP_KERNEL);
mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ if (MAPLE_32BIT)
+ i = 49750; /* 0xC25B */
+ else
+ i = 49835; /* 0xC2AB */
+
+ mtree_lock(mt);
+ /* Store a null across a boundary that ends in a null */
+ mas_set(&mas, i); /* 0xC2AB */
+ MT_BUG_ON(mt, mas_walk(&mas) == NULL);
+ MT_BUG_ON(mt, mas.end != mas.offset);
+ MT_BUG_ON(mt, mas_next_range(&mas, ULONG_MAX) != NULL);
+ mas_set_range(&mas, i, mas.last - 1);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mt_validate(mt);
+
+ /* Store a null across a boundary that starts and ends in a null */
+ mas_set(&mas, 49849);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ MT_BUG_ON(mt, mas.index != 49846);
+ mas_set(&mas, 49876);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ MT_BUG_ON(mt, mas.last != 49879);
+ mas_set_range(&mas, 49849, 49876);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ /* Results in 49846-49879: (nil) */
+ MT_BUG_ON(mt, mas.index != 49846);
+ MT_BUG_ON(mt, mas.last != 49879);
+ mt_validate(mt);
+
+ /* Store a null across a boundary that starts and ends next to nulls */
+ mas_set(&mas, 49800);
+ MT_BUG_ON(mt, mas_walk(&mas) == NULL);
+ MT_BUG_ON(mt, mas.index != 49800);
+ mas_set(&mas, 49815);
+ MT_BUG_ON(mt, mas_walk(&mas) == NULL);
+ MT_BUG_ON(mt, mas.last != 49815);
+ mas_set_range(&mas, 49800, 49815);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ /* Results in 49846-49879: (nil) */
+ MT_BUG_ON(mt, mas.index != 49796);
+ MT_BUG_ON(mt, mas.last != 49819);
+ mt_validate(mt);
+
+ /* Store a value across a boundary that starts and ends in a null */
+ mas_set(&mas, 49907);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ MT_BUG_ON(mt, mas.index != 49906);
+ mas_set(&mas, 49928);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ MT_BUG_ON(mt, mas.last != 49929);
+ mas_set_range(&mas, 49907, 49928);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ MT_BUG_ON(mt, mas.index != 49907);
+ MT_BUG_ON(mt, mas.last != 49928);
+ mt_validate(mt);
+
+ /* Store a value across a node boundary that causes a 3 way split */
+
+ if (MAPLE_32BIT)
+ i = 49430; /* 0xc116 */
+ else
+ i = 49670; /* 0xC206 */
+
+ mas_set(&mas, i);
+ MT_BUG_ON(mt, mas_walk(&mas) == NULL);
+ MT_BUG_ON(mt, mas.index != i);
+ MT_BUG_ON(mt, mas.end != mt_slot_count(mas.node) - 1);
+ enode = mas.node;
+ MT_BUG_ON(mt, mas_next_range(&mas, ULONG_MAX) != NULL);
+ MT_BUG_ON(mt, mas.index != i + 6);
+ MT_BUG_ON(mt, mas.end != mt_slot_count(mas.node) - 1);
+ MT_BUG_ON(mt, enode == mas.node);
+ mas_set_range(&mas, i + 2, i + 7);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ MT_BUG_ON(mt, mas.index != i + 2);
+ MT_BUG_ON(mt, mas.last != i + 7);
+ mt_validate(mt);
+
+ /* 2 levels of basically the same testing */
+
+ if (MAPLE_32BIT) {
+ /* 32bit needs a bit more work to fill the nodes.
+ * The two parent nodes need to be filled (they have one space
+ * vacant) without causing a split at the store locations (or
+ * the siblings).
+ */
+ i = 44426;
+ mas_set(&mas, i);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ i = 45126;
+ mas_set(&mas, i);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ i = 44790;
+ } else {
+ /* 48950 - 48955 => ptr, 48956 - 48959 => NULL */
+ i = 48950;
+
+ }
+ mas_set(&mas, i);
+ MT_BUG_ON(mt, mas_walk(&mas) == NULL);
+ MT_BUG_ON(mt, mas.index != i);
+ MT_BUG_ON(mt, mas.end != mt_slot_count(mas.node) - 1);
+ enode = mas.node;
+ pnode = mte_parent(enode);
+ MT_BUG_ON(mt, mas_next_range(&mas, ULONG_MAX) != NULL);
+ MT_BUG_ON(mt, mas.index != i + 6);
+ MT_BUG_ON(mt, mas.end != mt_slot_count(mas.node) - 1);
+ MT_BUG_ON(mt, enode == mas.node);
+ MT_BUG_ON(mt, pnode == mte_parent(mas.node));
+ mas_set_range(&mas, i + 2, i + 8);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mt_validate(mt);
+
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+ rcu_barrier();
}
/* End of spanning write testing */
@@ -35733,6 +35899,127 @@ unlock:
return ret;
}
+static noinline void __init check_erase_rebalance(struct maple_tree *mt)
+{
+ unsigned long val;
+ void *enode;
+ int ret;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ /*
+ * During removal of big node, the rebalance started going too high,
+ * which resulted in too many nodes trying to be used.
+ *
+ * Create a rebalance which results in an exactly full parent (0-9) that
+ * does not need to be rebalanced. This required two full levels,
+ * followed by an insufficient level which will be rebalanced into two
+ * nodes, finally leaves that need to be rebalanced into one node.
+ *
+ * The bugs tree:
+ * root 4 Label R
+ * /\ /\
+ * 9 X F
+ * /\ /\ /
+ * 9 X E
+ * /\ /\ /\
+ * 4 8 C D
+ * /\ /\
+ * 6 9 A B
+ * ^ becomes 5 with the write.
+ *
+ * Below, the reconstruction leaves the root with 2 entries, the setup
+ * uses the letter labels above.
+ */
+
+ ret = build_full_tree(mt, MT_FLAGS_ALLOC_RANGE, 4);
+ MT_BUG_ON(mt, ret);
+
+ /* Cheap expansion to 5 levels */
+ mtree_store(mt, ULONG_MAX, xa_mk_value(0), GFP_KERNEL);
+ /* rcu is used to ensure node use */
+ mt_set_in_rcu(mt);
+ mas_lock(&mas);
+
+ /* Node A had 6 entries */
+ mas_walk(&mas);
+ MAS_BUG_ON(&mas, mas_data_end(&mas) < 6);
+ while (mas_data_end(&mas) > 6) {
+ mas_erase(&mas);
+ mas_next(&mas, ULONG_MAX);
+ }
+
+ /* Move to Node B */
+ enode = (void*) mas.node;
+ while (mas.node == enode)
+ mas_next(&mas, ULONG_MAX);
+
+ /* Node B had 9 entries */
+ MAS_BUG_ON(&mas, mas_data_end(&mas) < 9);
+ while (mas_data_end(&mas) > 9) {
+ mas_erase(&mas);
+ mas_next(&mas, ULONG_MAX);
+ }
+
+ /* Move to Node C */
+ mas_ascend(&mas);
+ val = mas.max;
+ /* Adjust entries to be 4 */
+ while (mas_data_end(&mas) > 4) {
+ mas_set(&mas, val);
+ mas_erase(&mas);
+ mas_prev(&mas, 0);
+ val = mas.index;
+ mas_ascend(&mas);
+ }
+
+ /* Move to Node D */
+ mas_ascend(&mas);
+ mas.offset = 1;
+ mas_descend(&mas);
+ val = mas.max;
+ /* Adjust entries to be 8 */
+ while (mas_data_end(&mas) < 8) {
+ mas_set(&mas, val--);
+ mas_store_gfp(&mas, &mas, GFP_KERNEL);
+ mas_ascend(&mas);
+ }
+
+ /* Move to Node E */
+ mas_ascend(&mas);
+ val = mas.max;
+ MAS_BUG_ON(&mas, mas_data_end(&mas) > 9);
+ /* Adjust Node E to 9 entries */
+ while (mas_data_end(&mas) < 9) {
+ mas_set(&mas, val--);
+ mas_store_gfp(&mas, &mas, GFP_KERNEL);
+ mas_ascend(&mas);
+ mas_ascend(&mas);
+ }
+
+ /* Move to Node F */
+ mas_ascend(&mas);
+ val = mas.max;
+ MAS_BUG_ON(&mas, mas_data_end(&mas) > 9);
+ /* Adjust Node F to 9 entries */
+ while (mas_data_end(&mas) < 9) {
+ mas_set(&mas, val--);
+ mas_store_gfp(&mas, &mas, GFP_KERNEL);
+ mas_ascend(&mas);
+ mas_ascend(&mas);
+ mas_ascend(&mas);
+ }
+
+ /* Test is set up, walk to first entry */
+ mas_set(&mas, 0);
+ mas_next(&mas, ULONG_MAX);
+ /* overwrite the entry to cause a rebalance, which was 1 too few */
+ mas_set_range(&mas, 0, mas.last);
+ mas_preallocate(&mas, NULL, GFP_KERNEL);
+ mas_store_prealloc(&mas, NULL);
+ mas_unlock(&mas);
+}
+
static noinline void __init check_mtree_dup(struct maple_tree *mt)
{
DEFINE_MTREE(new);
@@ -36029,7 +36316,6 @@ static inline int check_vma_modification(struct maple_tree *mt)
return 0;
}
-
void farmer_tests(void)
{
struct maple_node *node;
@@ -36095,6 +36381,10 @@ void farmer_tests(void)
check_mtree_dup(&tree);
mtree_destroy(&tree);
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_erase_rebalance(&tree);
+ mtree_destroy(&tree);
+
/* RCU testing */
mt_init_flags(&tree, 0);
check_erase_testset(&tree);
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index ea05a2524d0f..b43da9bc20c4 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -1281,8 +1281,11 @@ static int tcp_server(const char *cgroup, void *arg)
saddr.sin6_port = htons(srv_args->port);
sk = socket(AF_INET6, SOCK_STREAM, 0);
- if (sk < 0)
+ if (sk < 0) {
+ /* Pass back errno to the ctl_fd */
+ write(ctl_fd, &errno, sizeof(errno));
return ret;
+ }
if (setsockopt(sk, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0)
goto cleanup;
@@ -1413,6 +1416,12 @@ static int test_memcg_sock(const char *root)
goto cleanup;
close(args.ctl[0]);
+ /* Skip if address family not supported by protocol */
+ if (err == EAFNOSUPPORT) {
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+
if (!err)
break;
if (err != EADDRINUSE)
diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c
index 64ebc3f3f203..a7bdcdd09d62 100644
--- a/tools/testing/selftests/cgroup/test_zswap.c
+++ b/tools/testing/selftests/cgroup/test_zswap.c
@@ -5,6 +5,8 @@
#include <unistd.h>
#include <stdio.h>
#include <signal.h>
+#include <errno.h>
+#include <fcntl.h>
#include <sys/sysinfo.h>
#include <string.h>
#include <sys/wait.h>
@@ -574,6 +576,139 @@ out:
return ret;
}
+struct incomp_child_args {
+ size_t size;
+ int pipefd[2];
+ int madvise_ret;
+ int madvise_errno;
+};
+
+static int allocate_random_and_wait(const char *cgroup, void *arg)
+{
+ struct incomp_child_args *values = arg;
+ size_t size = values->size;
+ char *mem;
+ int fd;
+ ssize_t n;
+
+ close(values->pipefd[0]);
+
+ mem = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mem == MAP_FAILED)
+ return -1;
+
+ /* Fill with random data from /dev/urandom - incompressible */
+ fd = open("/dev/urandom", O_RDONLY);
+ if (fd < 0) {
+ munmap(mem, size);
+ return -1;
+ }
+
+ for (size_t i = 0; i < size; ) {
+ n = read(fd, mem + i, size - i);
+ if (n <= 0)
+ break;
+ i += n;
+ }
+ close(fd);
+
+ /* Touch all pages to ensure they're faulted in */
+ for (size_t i = 0; i < size; i += PAGE_SIZE)
+ mem[i] = mem[i];
+
+ /* Use MADV_PAGEOUT to push pages into zswap */
+ values->madvise_ret = madvise(mem, size, MADV_PAGEOUT);
+ values->madvise_errno = errno;
+
+ /* Notify parent that allocation and pageout are done */
+ write(values->pipefd[1], "x", 1);
+ close(values->pipefd[1]);
+
+ /* Keep memory alive for parent to check stats */
+ pause();
+ munmap(mem, size);
+ return 0;
+}
+
+static long get_zswap_incomp(const char *cgroup)
+{
+ return cg_read_key_long(cgroup, "memory.stat", "zswap_incomp ");
+}
+
+/*
+ * Test that incompressible pages (random data) are tracked by zswap_incomp.
+ *
+ * The child process allocates random data within memory.max, then uses
+ * MADV_PAGEOUT to push pages into zswap. The parent waits on a pipe for
+ * the child to finish, then checks the zswap_incomp stat before the child
+ * exits (zswap_incomp is a gauge that decreases on free).
+ */
+static int test_zswap_incompressible(const char *root)
+{
+ int ret = KSFT_FAIL;
+ struct incomp_child_args *values;
+ char *test_group;
+ long zswap_incomp;
+ pid_t child_pid;
+ int child_status;
+ char buf;
+
+ values = mmap(0, sizeof(struct incomp_child_args), PROT_READ |
+ PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (values == MAP_FAILED)
+ return KSFT_FAIL;
+
+ if (pipe(values->pipefd)) {
+ munmap(values, sizeof(struct incomp_child_args));
+ return KSFT_FAIL;
+ }
+
+ test_group = cg_name(root, "zswap_incompressible_test");
+ if (!test_group)
+ goto out;
+ if (cg_create(test_group))
+ goto out;
+ if (cg_write(test_group, "memory.max", "32M"))
+ goto out;
+
+ values->size = MB(4);
+ child_pid = cg_run_nowait(test_group, allocate_random_and_wait, values);
+ if (child_pid < 0)
+ goto out;
+
+ close(values->pipefd[1]);
+
+ /* Wait for child to finish allocating and pageout */
+ read(values->pipefd[0], &buf, 1);
+ close(values->pipefd[0]);
+
+ zswap_incomp = get_zswap_incomp(test_group);
+ if (zswap_incomp <= 0) {
+ long zswpout = get_zswpout(test_group);
+ long zswapped = cg_read_key_long(test_group, "memory.stat", "zswapped ");
+ long zswap_b = cg_read_key_long(test_group, "memory.stat", "zswap ");
+
+ ksft_print_msg("zswap_incomp not increased: %ld\n", zswap_incomp);
+ ksft_print_msg("debug: zswpout=%ld zswapped=%ld zswap_b=%ld\n",
+ zswpout, zswapped, zswap_b);
+ ksft_print_msg("debug: madvise ret=%d errno=%d\n",
+ values->madvise_ret, values->madvise_errno);
+ goto out_kill;
+ }
+
+ ret = KSFT_PASS;
+
+out_kill:
+ kill(child_pid, SIGTERM);
+ waitpid(child_pid, &child_status, 0);
+out:
+ cg_destroy(test_group);
+ free(test_group);
+ munmap(values, sizeof(struct incomp_child_args));
+ return ret;
+}
+
#define T(x) { x, #x }
struct zswap_test {
int (*fn)(const char *root);
@@ -586,6 +721,7 @@ struct zswap_test {
T(test_zswap_writeback_disabled),
T(test_no_kmem_bypass),
T(test_no_invasive_cgroup_shrink),
+ T(test_zswap_incompressible),
};
#undef T
diff --git a/tools/testing/selftests/damon/_damon_sysfs.py b/tools/testing/selftests/damon/_damon_sysfs.py
index 748778b563cd..2b4df655d9fd 100644
--- a/tools/testing/selftests/damon/_damon_sysfs.py
+++ b/tools/testing/selftests/damon/_damon_sysfs.py
@@ -130,15 +130,16 @@ class DamosQuota:
sz = None # size quota, in bytes
ms = None # time quota
goals = None # quota goals
+ goal_tuner = None # quota goal tuner
reset_interval_ms = None # quota reset interval
weight_sz_permil = None
weight_nr_accesses_permil = None
weight_age_permil = None
scheme = None # owner scheme
- def __init__(self, sz=0, ms=0, goals=None, reset_interval_ms=0,
- weight_sz_permil=0, weight_nr_accesses_permil=0,
- weight_age_permil=0):
+ def __init__(self, sz=0, ms=0, goals=None, goal_tuner='consist',
+ reset_interval_ms=0, weight_sz_permil=0,
+ weight_nr_accesses_permil=0, weight_age_permil=0):
self.sz = sz
self.ms = ms
self.reset_interval_ms = reset_interval_ms
@@ -146,6 +147,7 @@ class DamosQuota:
self.weight_nr_accesses_permil = weight_nr_accesses_permil
self.weight_age_permil = weight_age_permil
self.goals = goals if goals is not None else []
+ self.goal_tuner = goal_tuner
for idx, goal in enumerate(self.goals):
goal.idx = idx
goal.quota = self
@@ -191,6 +193,10 @@ class DamosQuota:
err = goal.stage()
if err is not None:
return err
+ err = write_file(
+ os.path.join(self.sysfs_dir(), 'goal_tuner'), self.goal_tuner)
+ if err is not None:
+ return err
return None
class DamosWatermarks:
diff --git a/tools/testing/selftests/damon/config b/tools/testing/selftests/damon/config
index a68a9fead5dc..6304adacb741 100644
--- a/tools/testing/selftests/damon/config
+++ b/tools/testing/selftests/damon/config
@@ -4,3 +4,4 @@ CONFIG_DAMON_PADDR=y
CONFIG_DAMON_VADDR=y
CONFIG_DAMON_RECLAIM=y
CONFIG_DAMON_LRU_SORT=y
+CONFIG_DAMON_DEBUG_SANITY=y
diff --git a/tools/testing/selftests/damon/drgn_dump_damon_status.py b/tools/testing/selftests/damon/drgn_dump_damon_status.py
index 5374d18d1fa8..af99b07a4f56 100755
--- a/tools/testing/selftests/damon/drgn_dump_damon_status.py
+++ b/tools/testing/selftests/damon/drgn_dump_damon_status.py
@@ -110,6 +110,7 @@ def damos_quota_to_dict(quota):
['reset_interval', int],
['ms', int], ['sz', int],
['goals', damos_quota_goals_to_list],
+ ['goal_tuner', int],
['esz', int],
['weight_sz', int],
['weight_nr_accesses', int],
diff --git a/tools/testing/selftests/damon/sysfs.py b/tools/testing/selftests/damon/sysfs.py
index 9cca71eb0325..3aa5c91548a5 100755
--- a/tools/testing/selftests/damon/sysfs.py
+++ b/tools/testing/selftests/damon/sysfs.py
@@ -67,6 +67,12 @@ def assert_quota_committed(quota, dump):
assert_true(dump['sz'] == quota.sz, 'sz', dump)
for idx, qgoal in enumerate(quota.goals):
assert_quota_goal_committed(qgoal, dump['goals'][idx])
+ tuner_val = {
+ 'consist': 0,
+ 'temporal': 1,
+ }
+ assert_true(dump['goal_tuner'] == tuner_val[quota.goal_tuner],
+ 'goal_tuner', dump)
assert_true(dump['weight_sz'] == quota.weight_sz_permil, 'weight_sz', dump)
assert_true(dump['weight_nr_accesses'] == quota.weight_nr_accesses_permil,
'weight_nr_accesses', dump)
@@ -231,6 +237,7 @@ def main():
metric='node_mem_used_bp',
target_value=9950,
nid=1)],
+ goal_tuner='temporal',
reset_interval_ms=1500,
weight_sz_permil=20,
weight_nr_accesses_permil=200,
diff --git a/tools/testing/selftests/kho/init.c b/tools/testing/selftests/kho/init.c
index 6d9e91d55d68..88a41b6eba95 100644
--- a/tools/testing/selftests/kho/init.c
+++ b/tools/testing/selftests/kho/init.c
@@ -11,7 +11,6 @@
/* from arch/x86/include/asm/setup.h */
#define COMMAND_LINE_SIZE 2048
-#define KHO_FINALIZE "/debugfs/kho/out/finalize"
#define KERNEL_IMAGE "/kernel"
static int mount_filesystems(void)
@@ -22,22 +21,6 @@ static int mount_filesystems(void)
return mount("proc", "/proc", "proc", 0, NULL);
}
-static int kho_enable(void)
-{
- const char enable[] = "1";
- int fd;
-
- fd = open(KHO_FINALIZE, O_RDWR);
- if (fd < 0)
- return -1;
-
- if (write(fd, enable, sizeof(enable)) != sizeof(enable))
- return 1;
-
- close(fd);
- return 0;
-}
-
static long kexec_file_load(int kernel_fd, int initrd_fd,
unsigned long cmdline_len, const char *cmdline,
unsigned long flags)
@@ -78,9 +61,6 @@ int main(int argc, char *argv[])
if (mount_filesystems())
goto err_reboot;
- if (kho_enable())
- goto err_reboot;
-
if (kexec_load())
goto err_reboot;
diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore
index 83ad9454dd9d..b0c30c5ee9e3 100644
--- a/tools/testing/selftests/mm/.gitignore
+++ b/tools/testing/selftests/mm/.gitignore
@@ -61,3 +61,4 @@ guard-regions
merge
prctl_thp_disable
rmap
+folio_split_race_test
diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
index 7a5de4e9bf52..cd24596cdd27 100644
--- a/tools/testing/selftests/mm/Makefile
+++ b/tools/testing/selftests/mm/Makefile
@@ -105,6 +105,7 @@ TEST_GEN_FILES += droppable
TEST_GEN_FILES += guard-regions
TEST_GEN_FILES += merge
TEST_GEN_FILES += rmap
+TEST_GEN_FILES += folio_split_race_test
ifneq ($(ARCH),arm64)
TEST_GEN_FILES += soft-dirty
diff --git a/tools/testing/selftests/mm/folio_split_race_test.c b/tools/testing/selftests/mm/folio_split_race_test.c
new file mode 100644
index 000000000000..ff026f183ac7
--- /dev/null
+++ b/tools/testing/selftests/mm/folio_split_race_test.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The test creates shmem PMD huge pages, fills all pages with known patterns,
+ * then continuously verifies non-punched pages with 16 threads. Meanwhile, the
+ * main thread punches holes via MADV_REMOVE on the shmem.
+ *
+ * It tests the race condition between folio_split() and filemap_get_entry(),
+ * where the hole punches on shmem lead to folio_split() and reading the shmem
+ * lead to filemap_get_entry().
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/mman.h>
+#include <pthread.h>
+#include <stdatomic.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <signal.h>
+#include <unistd.h>
+#include "vm_util.h"
+#include "kselftest.h"
+#include "thp_settings.h"
+
+uint64_t page_size;
+uint64_t pmd_pagesize;
+#define NR_PMD_PAGE 5
+#define FILE_SIZE (pmd_pagesize * NR_PMD_PAGE)
+#define TOTAL_PAGES (FILE_SIZE / page_size)
+
+/* Every N-th to N+M-th pages are punched; not aligned with huge page boundaries. */
+#define PUNCH_INTERVAL 50 /* N */
+#define PUNCH_SIZE_FACTOR 3 /* M */
+
+#define NUM_READER_THREADS 16
+#define FILL_BYTE 0xAF
+#define NUM_ITERATIONS 100
+
+/* Shared control block: control reading threads and record stats */
+struct shared_ctl {
+ atomic_uint_fast32_t stop;
+ atomic_uint_fast64_t reader_failures;
+ atomic_uint_fast64_t reader_verified;
+ pthread_barrier_t barrier;
+};
+
+static void fill_page(unsigned char *base, size_t page_idx)
+{
+ unsigned char *page_ptr = base + page_idx * page_size;
+ uint64_t idx = (uint64_t)page_idx;
+
+ memset(page_ptr, FILL_BYTE, page_size);
+ memcpy(page_ptr, &idx, sizeof(idx));
+}
+
+/* Returns true if valid, false if corrupted. */
+static bool check_page(unsigned char *base, uint64_t page_idx)
+{
+ unsigned char *page_ptr = base + page_idx * page_size;
+ uint64_t expected_idx = (uint64_t)page_idx;
+ uint64_t got_idx;
+
+ memcpy(&got_idx, page_ptr, 8);
+
+ if (got_idx != expected_idx) {
+ uint64_t off;
+ int all_zero = 1;
+
+ for (off = 0; off < page_size; off++) {
+ if (page_ptr[off] != 0) {
+ all_zero = 0;
+ break;
+ }
+ }
+ if (all_zero) {
+ ksft_print_msg("CORRUPTED: page %" PRIu64
+ " (huge page %" PRIu64
+ ") is ALL ZEROS\n",
+ page_idx,
+ (page_idx * page_size) / pmd_pagesize);
+ } else {
+ ksft_print_msg("CORRUPTED: page %" PRIu64
+ " (huge page %" PRIu64
+ "): expected idx %" PRIu64
+ ", got %" PRIu64 "\n",
+ page_idx,
+ (page_idx * page_size) / pmd_pagesize,
+ page_idx, got_idx);
+ }
+ return false;
+ }
+ return true;
+}
+
+struct reader_arg {
+ unsigned char *base;
+ struct shared_ctl *ctl;
+ int tid;
+ atomic_uint_fast64_t *failures;
+ atomic_uint_fast64_t *verified;
+};
+
+static void *reader_thread(void *arg)
+{
+ struct reader_arg *ra = (struct reader_arg *)arg;
+ unsigned char *base = ra->base;
+ struct shared_ctl *ctl = ra->ctl;
+ int tid = ra->tid;
+ atomic_uint_fast64_t *failures = ra->failures;
+ atomic_uint_fast64_t *verified = ra->verified;
+ uint64_t page_idx;
+
+ pthread_barrier_wait(&ctl->barrier);
+
+ while (atomic_load_explicit(&ctl->stop, memory_order_acquire) == 0) {
+ for (page_idx = (size_t)tid; page_idx < TOTAL_PAGES;
+ page_idx += NUM_READER_THREADS) {
+ /*
+ * page_idx % PUNCH_INTERVAL is in [0, PUNCH_INTERVAL),
+ * skip [0, PUNCH_SIZE_FACTOR)
+ */
+ if (page_idx % PUNCH_INTERVAL < PUNCH_SIZE_FACTOR)
+ continue;
+ if (check_page(base, page_idx))
+ atomic_fetch_add_explicit(verified, 1,
+ memory_order_relaxed);
+ else
+ atomic_fetch_add_explicit(failures, 1,
+ memory_order_relaxed);
+ }
+ if (atomic_load_explicit(failures, memory_order_relaxed) > 0)
+ break;
+ }
+
+ return NULL;
+}
+
+static void create_readers(pthread_t *threads, struct reader_arg *args,
+ unsigned char *base, struct shared_ctl *ctl)
+{
+ int i;
+
+ for (i = 0; i < NUM_READER_THREADS; i++) {
+ args[i].base = base;
+ args[i].ctl = ctl;
+ args[i].tid = i;
+ args[i].failures = &ctl->reader_failures;
+ args[i].verified = &ctl->reader_verified;
+ if (pthread_create(&threads[i], NULL, reader_thread,
+ &args[i]) != 0)
+ ksft_exit_fail_msg("pthread_create failed\n");
+ }
+}
+
+/* Run a single iteration. Returns total number of corrupted pages. */
+static uint64_t run_iteration(void)
+{
+ uint64_t reader_failures, reader_verified;
+ struct reader_arg args[NUM_READER_THREADS];
+ pthread_t threads[NUM_READER_THREADS];
+ unsigned char *mmap_base;
+ struct shared_ctl ctl;
+ uint64_t i;
+
+ memset(&ctl, 0, sizeof(struct shared_ctl));
+
+ mmap_base = mmap(NULL, FILE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+
+ if (mmap_base == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed: %d\n", errno);
+
+ if (madvise(mmap_base, FILE_SIZE, MADV_HUGEPAGE) != 0)
+ ksft_exit_fail_msg("madvise(MADV_HUGEPAGE) failed: %d\n",
+ errno);
+
+ for (i = 0; i < TOTAL_PAGES; i++)
+ fill_page(mmap_base, i);
+
+ if (!check_huge_shmem(mmap_base, NR_PMD_PAGE, pmd_pagesize))
+ ksft_exit_fail_msg("No shmem THP is allocated\n");
+
+ if (pthread_barrier_init(&ctl.barrier, NULL, NUM_READER_THREADS + 1) != 0)
+ ksft_exit_fail_msg("pthread_barrier_init failed\n");
+
+ create_readers(threads, args, mmap_base, &ctl);
+
+ /* Wait for all reader threads to be ready before punching holes. */
+ pthread_barrier_wait(&ctl.barrier);
+
+ for (i = 0; i < TOTAL_PAGES; i++) {
+ if (i % PUNCH_INTERVAL != 0)
+ continue;
+ if (madvise(mmap_base + i * page_size,
+ PUNCH_SIZE_FACTOR * page_size, MADV_REMOVE) != 0) {
+ ksft_exit_fail_msg(
+ "madvise(MADV_REMOVE) failed on page %" PRIu64 ": %d\n",
+ i, errno);
+ }
+
+ i += PUNCH_SIZE_FACTOR - 1;
+ }
+
+ atomic_store_explicit(&ctl.stop, 1, memory_order_release);
+
+ for (i = 0; i < NUM_READER_THREADS; i++)
+ pthread_join(threads[i], NULL);
+
+ pthread_barrier_destroy(&ctl.barrier);
+
+ reader_failures = atomic_load_explicit(&ctl.reader_failures,
+ memory_order_acquire);
+ reader_verified = atomic_load_explicit(&ctl.reader_verified,
+ memory_order_acquire);
+ if (reader_failures)
+ ksft_print_msg("Child: %" PRIu64 " pages verified, %" PRIu64 " failures\n",
+ reader_verified, reader_failures);
+
+ munmap(mmap_base, FILE_SIZE);
+
+ return reader_failures;
+}
+
+static void thp_cleanup_handler(int signum)
+{
+ thp_restore_settings();
+ /*
+ * Restore default handler and re-raise the signal to exit.
+ * This is to ensure the test process exits with the correct
+ * status code corresponding to the signal.
+ */
+ signal(signum, SIG_DFL);
+ raise(signum);
+}
+
+static void thp_settings_cleanup(void)
+{
+ thp_restore_settings();
+}
+
+int main(void)
+{
+ struct thp_settings current_settings;
+ uint64_t corrupted_pages;
+ uint64_t iter;
+
+ ksft_print_header();
+
+ page_size = getpagesize();
+ pmd_pagesize = read_pmd_pagesize();
+
+ if (!thp_available() || !pmd_pagesize)
+ ksft_exit_skip("Transparent Hugepages not available\n");
+
+ if (geteuid() != 0)
+ ksft_exit_skip("Please run the test as root\n");
+
+ thp_save_settings();
+ /* make sure thp settings are restored */
+ if (atexit(thp_settings_cleanup) != 0)
+ ksft_exit_fail_msg("atexit failed\n");
+
+ signal(SIGINT, thp_cleanup_handler);
+ signal(SIGTERM, thp_cleanup_handler);
+
+ thp_read_settings(&current_settings);
+ current_settings.shmem_enabled = SHMEM_ADVISE;
+ thp_write_settings(&current_settings);
+
+ ksft_set_plan(1);
+
+ ksft_print_msg("folio split race test\n");
+
+ for (iter = 0; iter < NUM_ITERATIONS; iter++) {
+ corrupted_pages = run_iteration();
+ if (corrupted_pages > 0)
+ break;
+ }
+
+ if (iter < NUM_ITERATIONS)
+ ksft_test_result_fail("FAILED on iteration %" PRIu64
+ ": %" PRIu64
+ " pages corrupted by MADV_REMOVE!\n",
+ iter, corrupted_pages);
+ else
+ ksft_test_result_pass("All %d iterations passed\n",
+ NUM_ITERATIONS);
+
+ ksft_exit(iter == NUM_ITERATIONS);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/mm/memory-failure.c b/tools/testing/selftests/mm/memory-failure.c
index 3d9e0b9ffb41..032ed952057c 100644
--- a/tools/testing/selftests/mm/memory-failure.c
+++ b/tools/testing/selftests/mm/memory-failure.c
@@ -13,7 +13,6 @@
#include <unistd.h>
#include <signal.h>
#include <setjmp.h>
-#include <unistd.h>
#include <fcntl.h>
#include <sys/vfs.h>
#include <linux/magic.h>
diff --git a/tools/testing/selftests/mm/migration.c b/tools/testing/selftests/mm/migration.c
index ee24b88c2b24..60e78bbfc0e3 100644
--- a/tools/testing/selftests/mm/migration.c
+++ b/tools/testing/selftests/mm/migration.c
@@ -36,7 +36,8 @@ FIXTURE_SETUP(migration)
{
int n;
- ASSERT_EQ(numa_available(), 0);
+ if (numa_available() < 0)
+ SKIP(return, "NUMA not available");
self->nthreads = numa_num_task_cpus() - 1;
self->n1 = -1;
self->n2 = -1;
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c
index 2ca8a7e3c27e..7f9428d6062c 100644
--- a/tools/testing/selftests/mm/pagemap_ioctl.c
+++ b/tools/testing/selftests/mm/pagemap_ioctl.c
@@ -113,13 +113,13 @@ int init_uffd(void)
return 0;
}
-int wp_init(void *lpBaseAddress, long dwRegionSize)
+int wp_init(void *addr, long size)
{
struct uffdio_register uffdio_register;
struct uffdio_writeprotect wp;
- uffdio_register.range.start = (unsigned long)lpBaseAddress;
- uffdio_register.range.len = dwRegionSize;
+ uffdio_register.range.start = (unsigned long)addr;
+ uffdio_register.range.len = size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
ksft_exit_fail_msg("ioctl(UFFDIO_REGISTER) %d %s\n", errno, strerror(errno));
@@ -127,8 +127,8 @@ int wp_init(void *lpBaseAddress, long dwRegionSize)
if (!(uffdio_register.ioctls & UFFDIO_WRITEPROTECT))
ksft_exit_fail_msg("ioctl set is incorrect\n");
- wp.range.start = (unsigned long)lpBaseAddress;
- wp.range.len = dwRegionSize;
+ wp.range.start = (unsigned long)addr;
+ wp.range.len = size;
wp.mode = UFFDIO_WRITEPROTECT_MODE_WP;
if (ioctl(uffd, UFFDIO_WRITEPROTECT, &wp))
@@ -137,21 +137,21 @@ int wp_init(void *lpBaseAddress, long dwRegionSize)
return 0;
}
-int wp_free(void *lpBaseAddress, long dwRegionSize)
+int wp_free(void *addr, long size)
{
struct uffdio_register uffdio_register;
- uffdio_register.range.start = (unsigned long)lpBaseAddress;
- uffdio_register.range.len = dwRegionSize;
+ uffdio_register.range.start = (unsigned long)addr;
+ uffdio_register.range.len = size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range))
ksft_exit_fail_msg("ioctl unregister failure\n");
return 0;
}
-int wp_addr_range(void *lpBaseAddress, int dwRegionSize)
+int wp_addr_range(void *addr, int size)
{
- if (pagemap_ioctl(lpBaseAddress, dwRegionSize, NULL, 0,
+ if (pagemap_ioctl(addr, size, NULL, 0,
PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0)
ksft_exit_fail_msg("error %d %d %s\n", 1, errno, strerror(errno));
diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh
index afdcfd0d7cef..d8468451b3a3 100755
--- a/tools/testing/selftests/mm/run_vmtests.sh
+++ b/tools/testing/selftests/mm/run_vmtests.sh
@@ -515,6 +515,8 @@ if [ -n "${MOUNTED_XFS}" ]; then
rm -f ${XFS_IMG}
fi
+CATEGORY="thp" run_test ./folio_split_race_test
+
CATEGORY="migration" run_test ./migration
CATEGORY="mkdirty" run_test ./mkdirty
diff --git a/tools/testing/vma/include/custom.h b/tools/testing/vma/include/custom.h
index 802a76317245..744fe874c168 100644
--- a/tools/testing/vma/include/custom.h
+++ b/tools/testing/vma/include/custom.h
@@ -15,15 +15,6 @@ extern unsigned long dac_mmap_min_addr;
#define dac_mmap_min_addr 0UL
#endif
-#define VM_WARN_ON(_expr) (WARN_ON(_expr))
-#define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
-#define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr))
-#define VM_BUG_ON(_expr) (BUG_ON(_expr))
-#define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
-
-/* We hardcode this for now. */
-#define sysctl_max_map_count 0x1000000UL
-
#define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
/*
@@ -32,8 +23,6 @@ extern unsigned long dac_mmap_min_addr;
*/
#define pr_warn_once pr_err
-#define pgtable_supports_soft_dirty() 1
-
struct anon_vma {
struct anon_vma *root;
struct rb_root_cached rb_root;
@@ -102,18 +91,7 @@ static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
refcount_set(&vma->vm_refcnt, 0);
}
-static inline vma_flags_t __mk_vma_flags(size_t count, const vma_flag_t *bits)
+static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
{
- vma_flags_t flags;
- int i;
-
- /*
- * For testing purposes: allow invalid bit specification so we can
- * easily test.
- */
- vma_flags_clear_all(&flags);
- for (i = 0; i < count; i++)
- if (bits[i] < NUM_VMA_FLAG_BITS)
- vma_flag_set(&flags, bits[i]);
- return flags;
+ return PAGE_SIZE;
}
diff --git a/tools/testing/vma/include/dup.h b/tools/testing/vma/include/dup.h
index 3078ff1487d3..b4864aad2db0 100644
--- a/tools/testing/vma/include/dup.h
+++ b/tools/testing/vma/include/dup.h
@@ -33,7 +33,10 @@ struct mm_struct {
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
+ union {
+ vm_flags_t def_flags;
+ vma_flags_t def_vma_flags;
+ };
mm_flags_t flags; /* Must use mm_flags_* helpers to access */
};
@@ -264,8 +267,10 @@ enum {
#endif /* CONFIG_ARCH_HAS_PKEYS */
#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
+#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT, VMA_SHADOW_STACK_BIT)
#else
#define VM_SHADOW_STACK VM_NONE
+#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT)
#endif
#if defined(CONFIG_PPC64)
#define VM_SAO INIT_VM_FLAG(SAO)
@@ -311,36 +316,49 @@ enum {
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
-#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
+#define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \
+ VM_EXEC_BIT : VM_READ_BIT)
/* Common data flag combinations */
-#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
- VM_MAYWRITE | VM_MAYEXEC)
-#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */
-#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
+#define VMA_DATA_FLAGS_TSK_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
+ TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
+ VMA_MAYEXEC_BIT)
+#define VMA_DATA_FLAGS_NON_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT)
+#define VMA_DATA_FLAGS_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
+ VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
+ VMA_MAYEXEC_BIT)
+
+#ifndef VMA_DATA_DEFAULT_FLAGS /* arch can override this */
+#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_EXEC
#endif
-#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
-#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+#ifndef VMA_STACK_DEFAULT_FLAGS /* arch can override this */
+#define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS
#endif
-#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
+#define VMA_STACK_FLAGS append_vma_flags(VMA_STACK_DEFAULT_FLAGS, \
+ VMA_STACK_BIT, VMA_ACCOUNT_BIT)
+/* Temporary until VMA flags conversion complete. */
+#define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS)
-#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
/* VMA basic access permission flags */
#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
+#define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT)
/*
* Special vmas that are non-mergable, non-mlock()able.
*/
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+#define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \
+ VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
+
+#define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT, \
+ VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)
+
#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
#define TASK_SIZE_LOW DEFAULT_MAP_WINDOW
#define TASK_SIZE_MAX DEFAULT_MAP_WINDOW
@@ -350,19 +368,20 @@ enum {
/* This mask represents all the VMA flag bits used by mlock */
#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
-#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
-
-#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VMA_LOCKED_MASK mk_vma_flags(VMA_LOCKED_BIT, VMA_LOCKONFAULT_BIT)
#define RLIMIT_STACK 3 /* max stack size */
#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
#define CAP_IPC_LOCK 14
-#define VM_STICKY (VM_SOFTDIRTY | VM_MAYBE_GUARD)
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define VMA_STICKY_FLAGS mk_vma_flags(VMA_SOFTDIRTY_BIT, VMA_MAYBE_GUARD_BIT)
+#else
+#define VMA_STICKY_FLAGS mk_vma_flags(VMA_MAYBE_GUARD_BIT)
+#endif
-#define VM_IGNORE_MERGE VM_STICKY
+#define VMA_IGNORE_MERGE_FLAGS VMA_STICKY_FLAGS
#define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD)
@@ -419,11 +438,23 @@ struct vma_iterator {
#define EMPTY_VMA_FLAGS ((vma_flags_t){ })
+#define MAPCOUNT_ELF_CORE_MARGIN (5)
+#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+static __always_inline bool vma_flags_empty(const vma_flags_t *flags)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+
+ return bitmap_empty(bitmap, NUM_VMA_FLAG_BITS);
+}
+
/* What action should be taken after an .mmap_prepare call is complete? */
enum mmap_action_type {
MMAP_NOTHING, /* Mapping is complete, no further action. */
MMAP_REMAP_PFN, /* Remap PFN range. */
MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */
+ MMAP_SIMPLE_IO_REMAP, /* I/O remap with guardrails. */
+ MMAP_MAP_KERNEL_PAGES, /* Map kernel page range from an array. */
};
/*
@@ -432,13 +463,22 @@ enum mmap_action_type {
*/
struct mmap_action {
union {
- /* Remap range. */
struct {
unsigned long start;
unsigned long start_pfn;
unsigned long size;
pgprot_t pgprot;
} remap;
+ struct {
+ phys_addr_t start_phys_addr;
+ unsigned long size;
+ } simple_ioremap;
+ struct {
+ unsigned long start;
+ struct page **pages;
+ unsigned long nr_pages;
+ pgoff_t pgoff;
+ } map_kernel;
};
enum mmap_action_type type;
@@ -486,18 +526,15 @@ enum vma_operation {
*/
struct vm_area_desc {
/* Immutable state. */
- const struct mm_struct *const mm;
- struct file *const file; /* May vary from vm_file in stacked callers. */
+ struct mm_struct *mm;
+ struct file *file; /* May vary from vm_file in stacked callers. */
unsigned long start;
unsigned long end;
/* Mutable fields. Populated with initial state. */
pgoff_t pgoff;
struct file *vm_file;
- union {
- vm_flags_t vm_flags;
- vma_flags_t vma_flags;
- };
+ vma_flags_t vma_flags;
pgprot_t page_prot;
/* Write-only fields. */
@@ -606,15 +643,37 @@ struct vm_area_struct {
} __randomize_layout;
struct vm_operations_struct {
- void (*open)(struct vm_area_struct * area);
+ /**
+ * @open: Called when a VMA is remapped, split or forked. Not called
+ * upon first mapping a VMA.
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
+ void (*open)(struct vm_area_struct *vma);
/**
* @close: Called when the VMA is being removed from the MM.
* Context: User context. May sleep. Caller holds mmap_lock.
*/
- void (*close)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct *vma);
+ /**
+ * @mapped: Called when the VMA is first mapped in the MM. Not called if
+ * the new VMA is merged with an adjacent VMA.
+ *
+ * The @vm_private_data field is an output field allowing the user to
+ * modify vma->vm_private_data as necessary.
+ *
+ * ONLY valid if set from f_op->mmap_prepare. Will result in an error if
+ * set from f_op->mmap.
+ *
+ * Returns %0 on success, or an error otherwise. On error, the VMA will
+ * be unmapped.
+ *
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
+ int (*mapped)(unsigned long start, unsigned long end, pgoff_t pgoff,
+ const struct file *file, void **vm_private_data);
/* Called any time before splitting to check if it's allowed */
- int (*may_split)(struct vm_area_struct *area, unsigned long addr);
- int (*mremap)(struct vm_area_struct *area);
+ int (*may_split)(struct vm_area_struct *vma, unsigned long addr);
+ int (*mremap)(struct vm_area_struct *vma);
/*
* Called by mprotect() to make driver-specific permission
* checks before mprotect() is finalised. The VMA must not
@@ -626,7 +685,7 @@ struct vm_operations_struct {
vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
- unsigned long (*pagesize)(struct vm_area_struct * area);
+ unsigned long (*pagesize)(struct vm_area_struct *vma);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
@@ -746,9 +805,12 @@ static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
* IMPORTANT: This does not overwrite bytes past the first system word. The
* caller must account for this.
*/
-static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_overwrite_word(vma_flags_t *flags,
+ unsigned long value)
{
- *ACCESS_PRIVATE(flags, __vma_flags) = value;
+ unsigned long *bitmap = flags->__vma_flags;
+
+ bitmap[0] = value;
}
/*
@@ -757,35 +819,65 @@ static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long va
* IMPORTANT: This does not overwrite bytes past the first system word. The
* caller must account for this.
*/
-static inline void vma_flags_overwrite_word_once(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_overwrite_word_once(vma_flags_t *flags,
+ unsigned long value)
{
- unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+ unsigned long *bitmap = flags->__vma_flags;
WRITE_ONCE(*bitmap, value);
}
/* Update the first system word of VMA flags setting bits, non-atomically. */
-static inline void vma_flags_set_word(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_set_word(vma_flags_t *flags,
+ unsigned long value)
{
- unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+ unsigned long *bitmap = flags->__vma_flags;
*bitmap |= value;
}
/* Update the first system word of VMA flags clearing bits, non-atomically. */
-static inline void vma_flags_clear_word(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_clear_word(vma_flags_t *flags,
+ unsigned long value)
{
- unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+ unsigned long *bitmap = flags->__vma_flags;
*bitmap &= ~value;
}
-static inline void vma_flags_clear_all(vma_flags_t *flags)
+static __always_inline void vma_flags_clear_all(vma_flags_t *flags)
{
bitmap_zero(ACCESS_PRIVATE(flags, __vma_flags), NUM_VMA_FLAG_BITS);
}
-static inline void vma_flag_set(vma_flags_t *flags, vma_flag_t bit)
+/*
+ * Helper function which converts a vma_flags_t value to a legacy vm_flags_t
+ * value. This is only valid if the input flags value can be expressed in a
+ * system word.
+ *
+ * Will be removed once the conversion to VMA flags is complete.
+ */
+static __always_inline vm_flags_t vma_flags_to_legacy(vma_flags_t flags)
+{
+ return (vm_flags_t)flags.__vma_flags[0];
+}
+
+/*
+ * Helper function which converts a legacy vm_flags_t value to a vma_flags_t
+ * value.
+ *
+ * Will be removed once the conversion to VMA flags is complete.
+ */
+static __always_inline vma_flags_t legacy_to_vma_flags(vm_flags_t flags)
+{
+ vma_flags_t ret = EMPTY_VMA_FLAGS;
+
+ vma_flags_overwrite_word(&ret, flags);
+ return ret;
+}
+
+static __always_inline void vma_flags_set_flag(vma_flags_t *flags,
+ vma_flag_t bit)
{
unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
@@ -812,16 +904,20 @@ static inline void vm_flags_reset(struct vm_area_struct *vma,
vm_flags_init(vma, flags);
}
-static inline void vm_flags_reset_once(struct vm_area_struct *vma,
- vm_flags_t flags)
+static inline void vma_flags_reset_once(struct vm_area_struct *vma,
+ vma_flags_t *flags)
{
- vma_assert_write_locked(vma);
- /*
- * The user should only be interested in avoiding reordering of
- * assignment to the first word.
- */
- vma_flags_clear_all(&vma->flags);
- vma_flags_overwrite_word_once(&vma->flags, flags);
+ const unsigned long word = flags->__vma_flags[0];
+
+ /* It is assumed only the first system word must be written once. */
+ vma_flags_overwrite_word_once(&vma->flags, word);
+ /* The remainder can be copied normally. */
+ if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) {
+ unsigned long *dst = &vma->flags.__vma_flags[1];
+ const unsigned long *src = &flags->__vma_flags[1];
+
+ bitmap_copy(dst, src, NUM_VMA_FLAG_BITS - BITS_PER_LONG);
+ }
}
static inline void vm_flags_set(struct vm_area_struct *vma,
@@ -838,12 +934,53 @@ static inline void vm_flags_clear(struct vm_area_struct *vma,
vma_flags_clear_word(&vma->flags, flags);
}
-static inline vma_flags_t __mk_vma_flags(size_t count, const vma_flag_t *bits);
+static __always_inline vma_flags_t __mk_vma_flags(vma_flags_t flags,
+ size_t count, const vma_flag_t *bits)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ vma_flags_set_flag(&flags, bits[i]);
+ return flags;
+}
+
+#define mk_vma_flags(...) __mk_vma_flags(EMPTY_VMA_FLAGS, \
+ COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
-#define mk_vma_flags(...) __mk_vma_flags(COUNT_ARGS(__VA_ARGS__), \
- (const vma_flag_t []){__VA_ARGS__})
+#define append_vma_flags(flags, ...) __mk_vma_flags(flags, \
+ COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
-static __always_inline bool vma_flags_test_mask(const vma_flags_t *flags,
+static __always_inline int vma_flags_count(const vma_flags_t *flags)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+
+ return bitmap_weight(bitmap, NUM_VMA_FLAG_BITS);
+}
+
+static __always_inline bool vma_flags_test(const vma_flags_t *flags,
+ vma_flag_t bit)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+
+ return test_bit((__force int)bit, bitmap);
+}
+
+static __always_inline vma_flags_t vma_flags_and_mask(const vma_flags_t *flags,
+ vma_flags_t to_and)
+{
+ vma_flags_t dst;
+ unsigned long *bitmap_dst = dst.__vma_flags;
+ const unsigned long *bitmap = flags->__vma_flags;
+ const unsigned long *bitmap_to_and = to_and.__vma_flags;
+
+ bitmap_and(bitmap_dst, bitmap, bitmap_to_and, NUM_VMA_FLAG_BITS);
+ return dst;
+}
+
+#define vma_flags_and(flags, ...) \
+ vma_flags_and_mask(flags, mk_vma_flags(__VA_ARGS__))
+
+static __always_inline bool vma_flags_test_any_mask(const vma_flags_t *flags,
vma_flags_t to_test)
{
const unsigned long *bitmap = flags->__vma_flags;
@@ -852,8 +989,8 @@ static __always_inline bool vma_flags_test_mask(const vma_flags_t *flags,
return bitmap_intersects(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
}
-#define vma_flags_test(flags, ...) \
- vma_flags_test_mask(flags, mk_vma_flags(__VA_ARGS__))
+#define vma_flags_test_any(flags, ...) \
+ vma_flags_test_any_mask(flags, mk_vma_flags(__VA_ARGS__))
static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
vma_flags_t to_test)
@@ -867,6 +1004,14 @@ static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
#define vma_flags_test_all(flags, ...) \
vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__))
+static __always_inline bool vma_flags_test_single_mask(const vma_flags_t *flags,
+ vma_flags_t flagmask)
+{
+ VM_WARN_ON_ONCE(vma_flags_count(&flagmask) > 1);
+
+ return vma_flags_test_any_mask(flags, flagmask);
+}
+
static __always_inline void vma_flags_set_mask(vma_flags_t *flags, vma_flags_t to_set)
{
unsigned long *bitmap = flags->__vma_flags;
@@ -889,23 +1034,71 @@ static __always_inline void vma_flags_clear_mask(vma_flags_t *flags, vma_flags_t
#define vma_flags_clear(flags, ...) \
vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__))
-static inline bool vma_test_all_flags_mask(const struct vm_area_struct *vma,
- vma_flags_t flags)
+static __always_inline vma_flags_t vma_flags_diff_pair(const vma_flags_t *flags,
+ const vma_flags_t *flags_other)
+{
+ vma_flags_t dst;
+ const unsigned long *bitmap_other = flags_other->__vma_flags;
+ const unsigned long *bitmap = flags->__vma_flags;
+ unsigned long *bitmap_dst = dst.__vma_flags;
+
+ bitmap_xor(bitmap_dst, bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
+ return dst;
+}
+
+static __always_inline bool vma_flags_same_pair(const vma_flags_t *flags,
+ const vma_flags_t *flags_other)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+ const unsigned long *bitmap_other = flags_other->__vma_flags;
+
+ return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
+}
+
+static __always_inline bool vma_flags_same_mask(const vma_flags_t *flags,
+ vma_flags_t flags_other)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+ const unsigned long *bitmap_other = flags_other.__vma_flags;
+
+ return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
+}
+
+#define vma_flags_same(flags, ...) \
+ vma_flags_same_mask(flags, mk_vma_flags(__VA_ARGS__))
+
+static __always_inline bool vma_test(const struct vm_area_struct *vma,
+ vma_flag_t bit)
+{
+ return vma_flags_test(&vma->flags, bit);
+}
+
+static __always_inline bool vma_test_any_mask(const struct vm_area_struct *vma,
+ vma_flags_t flags)
+{
+ return vma_flags_test_any_mask(&vma->flags, flags);
+}
+
+#define vma_test_any(vma, ...) \
+ vma_test_any_mask(vma, mk_vma_flags(__VA_ARGS__))
+
+static __always_inline bool vma_test_all_mask(const struct vm_area_struct *vma,
+ vma_flags_t flags)
{
return vma_flags_test_all_mask(&vma->flags, flags);
}
-#define vma_test_all_flags(vma, ...) \
- vma_test_all_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
+#define vma_test_all(vma, ...) \
+ vma_test_all_mask(vma, mk_vma_flags(__VA_ARGS__))
-static inline bool is_shared_maywrite_vm_flags(vm_flags_t vm_flags)
+static __always_inline bool
+vma_test_single_mask(const struct vm_area_struct *vma, vma_flags_t flagmask)
{
- return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
- (VM_SHARED | VM_MAYWRITE);
+ return vma_flags_test_single_mask(&vma->flags, flagmask);
}
-static inline void vma_set_flags_mask(struct vm_area_struct *vma,
- vma_flags_t flags)
+static __always_inline void vma_set_flags_mask(struct vm_area_struct *vma,
+ vma_flags_t flags)
{
vma_flags_set_mask(&vma->flags, flags);
}
@@ -913,17 +1106,41 @@ static inline void vma_set_flags_mask(struct vm_area_struct *vma,
#define vma_set_flags(vma, ...) \
vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
-static inline bool vma_desc_test_flags_mask(const struct vm_area_desc *desc,
- vma_flags_t flags)
+static __always_inline void vma_clear_flags_mask(struct vm_area_struct *vma,
+ vma_flags_t flags)
{
- return vma_flags_test_mask(&desc->vma_flags, flags);
+ vma_flags_clear_mask(&vma->flags, flags);
}
-#define vma_desc_test_flags(desc, ...) \
- vma_desc_test_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
+#define vma_clear_flags(vma, ...) \
+ vma_clear_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
-static inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
- vma_flags_t flags)
+static __always_inline bool vma_desc_test(const struct vm_area_desc *desc,
+ vma_flag_t bit)
+{
+ return vma_flags_test(&desc->vma_flags, bit);
+}
+
+static __always_inline bool vma_desc_test_any_mask(const struct vm_area_desc *desc,
+ vma_flags_t flags)
+{
+ return vma_flags_test_any_mask(&desc->vma_flags, flags);
+}
+
+#define vma_desc_test_any(desc, ...) \
+ vma_desc_test_any_mask(desc, mk_vma_flags(__VA_ARGS__))
+
+static __always_inline bool vma_desc_test_all_mask(const struct vm_area_desc *desc,
+ vma_flags_t flags)
+{
+ return vma_flags_test_all_mask(&desc->vma_flags, flags);
+}
+
+#define vma_desc_test_all(desc, ...) \
+ vma_desc_test_all_mask(desc, mk_vma_flags(__VA_ARGS__))
+
+static __always_inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
+ vma_flags_t flags)
{
vma_flags_set_mask(&desc->vma_flags, flags);
}
@@ -931,8 +1148,8 @@ static inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
#define vma_desc_set_flags(desc, ...) \
vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
-static inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
- vma_flags_t flags)
+static __always_inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
+ vma_flags_t flags)
{
vma_flags_clear_mask(&desc->vma_flags, flags);
}
@@ -1068,42 +1285,71 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma)
}
/* Declared in vma.h. */
-static inline void set_vma_from_desc(struct vm_area_struct *vma,
+static inline void compat_set_vma_from_desc(struct vm_area_struct *vma,
struct vm_area_desc *desc);
-static inline int __compat_vma_mmap(const struct file_operations *f_op,
- struct file *file, struct vm_area_struct *vma)
+static inline void compat_set_desc_from_vma(struct vm_area_desc *desc,
+ const struct file *file,
+ const struct vm_area_struct *vma)
{
- struct vm_area_desc desc = {
- .mm = vma->vm_mm,
- .file = file,
- .start = vma->vm_start,
- .end = vma->vm_end,
+ memset(desc, 0, sizeof(*desc));
- .pgoff = vma->vm_pgoff,
- .vm_file = vma->vm_file,
- .vm_flags = vma->vm_flags,
- .page_prot = vma->vm_page_prot,
+ desc->mm = vma->vm_mm;
+ desc->file = (struct file *)file;
+ desc->start = vma->vm_start;
+ desc->end = vma->vm_end;
- .action.type = MMAP_NOTHING, /* Default */
- };
- int err;
+ desc->pgoff = vma->vm_pgoff;
+ desc->vm_file = vma->vm_file;
+ desc->vma_flags = vma->flags;
+ desc->page_prot = vma->vm_page_prot;
- err = f_op->mmap_prepare(&desc);
- if (err)
- return err;
+ /* Default. */
+ desc->action.type = MMAP_NOTHING;
+}
- mmap_action_prepare(&desc.action, &desc);
- set_vma_from_desc(vma, &desc);
- return mmap_action_complete(&desc.action, vma);
+static inline unsigned long vma_pages(const struct vm_area_struct *vma)
+{
+ return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+}
+
+static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
+{
+ return file->f_op->mmap_prepare(desc);
}
-static inline int compat_vma_mmap(struct file *file,
+static inline int __compat_vma_mmap(struct vm_area_desc *desc,
struct vm_area_struct *vma)
{
- return __compat_vma_mmap(file->f_op, file, vma);
+ int err;
+
+ /* Perform any preparatory tasks for mmap action. */
+ err = mmap_action_prepare(desc);
+ if (err)
+ return err;
+ /* Update the VMA from the descriptor. */
+ compat_set_vma_from_desc(vma, desc);
+ /* Complete any specified mmap actions. */
+ return mmap_action_complete(vma, &desc->action);
}
+static inline int compat_vma_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vm_area_desc desc;
+ struct mmap_action *action;
+ int err;
+
+ compat_set_desc_from_vma(&desc, file, vma);
+ err = vfs_mmap_prepare(file, &desc);
+ if (err)
+ return err;
+ action = &desc.action;
+
+ /* being invoked from .mmmap means we don't have to enforce this. */
+ action->hide_from_rmap_until_complete = false;
+
+ return __compat_vma_mmap(&desc, vma);
+}
static inline void vma_iter_init(struct vma_iterator *vmi,
struct mm_struct *mm, unsigned long addr)
@@ -1111,11 +1357,6 @@ static inline void vma_iter_init(struct vma_iterator *vmi,
mas_init(&vmi->mas, &mm->mm_mt, addr);
}
-static inline unsigned long vma_pages(struct vm_area_struct *vma)
-{
- return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-}
-
static inline void mmap_assert_locked(struct mm_struct *);
static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
unsigned long start_addr,
@@ -1256,27 +1497,6 @@ static inline bool mlock_future_ok(const struct mm_struct *mm,
return locked_pages <= limit_pages;
}
-static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
-{
- /* If MDWE is disabled, we have nothing to deny. */
- if (mm_flags_test(MMF_HAS_MDWE, current->mm))
- return false;
-
- /* If the new VMA is not executable, we have nothing to deny. */
- if (!(new & VM_EXEC))
- return false;
-
- /* Under MDWE we do not accept newly writably executable VMAs... */
- if (new & VM_WRITE)
- return true;
-
- /* ...nor previously non-executable VMAs becoming executable. */
- if (!(old & VM_EXEC))
- return true;
-
- return false;
-}
-
static inline int mapping_map_writable(struct address_space *mapping)
{
return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
@@ -1306,11 +1526,6 @@ static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
return file->f_op->mmap(file, vma);
}
-static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
-{
- return file->f_op->mmap_prepare(desc);
-}
-
static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
{
/* Changing an anonymous vma with this is illegal */
@@ -1318,3 +1533,20 @@ static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
swap(vma->vm_file, file);
fput(file);
}
+
+extern int sysctl_max_map_count;
+static inline int get_sysctl_max_map_count(void)
+{
+ return READ_ONCE(sysctl_max_map_count);
+}
+
+#ifndef pgtable_supports_soft_dirty
+#define pgtable_supports_soft_dirty() IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)
+#endif
+
+static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
+{
+ const vm_flags_t vm_flags = vma_flags_to_legacy(vma_flags);
+
+ return vm_get_page_prot(vm_flags);
+}
diff --git a/tools/testing/vma/include/stubs.h b/tools/testing/vma/include/stubs.h
index 947a3a0c2566..a30b8bc84955 100644
--- a/tools/testing/vma/include/stubs.h
+++ b/tools/testing/vma/include/stubs.h
@@ -81,13 +81,13 @@ static inline void free_anon_vma_name(struct vm_area_struct *vma)
{
}
-static inline void mmap_action_prepare(struct mmap_action *action,
- struct vm_area_desc *desc)
+static inline int mmap_action_prepare(struct vm_area_desc *desc)
{
+ return 0;
}
-static inline int mmap_action_complete(struct mmap_action *action,
- struct vm_area_struct *vma)
+static inline int mmap_action_complete(struct vm_area_struct *vma,
+ struct mmap_action *action)
{
return 0;
}
@@ -101,10 +101,10 @@ static inline bool shmem_file(struct file *file)
return false;
}
-static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm,
- const struct file *file, vm_flags_t vm_flags)
+static inline vma_flags_t ksm_vma_flags(struct mm_struct *mm,
+ const struct file *file, vma_flags_t vma_flags)
{
- return vm_flags;
+ return vma_flags;
}
static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
@@ -229,7 +229,7 @@ static inline bool signal_pending(void *p)
return false;
}
-static inline bool is_file_hugepages(struct file *file)
+static inline bool is_file_hugepages(const struct file *file)
{
return false;
}
@@ -239,7 +239,8 @@ static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
return 0;
}
-static inline bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags,
+static inline bool may_expand_vm(struct mm_struct *mm,
+ const vma_flags_t *vma_flags,
unsigned long npages)
{
return true;
@@ -426,3 +427,8 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
}
static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
+
+static inline bool vma_supports_mlock(const struct vm_area_struct *vma)
+{
+ return false;
+}
diff --git a/tools/testing/vma/main.c b/tools/testing/vma/main.c
index 49b09e97a51f..18338f5d29e0 100644
--- a/tools/testing/vma/main.c
+++ b/tools/testing/vma/main.c
@@ -14,6 +14,8 @@
#include "tests/mmap.c"
#include "tests/vma.c"
+int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
+
/* Helper functions which utilise static kernel functions. */
struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
diff --git a/tools/testing/vma/shared.c b/tools/testing/vma/shared.c
index bda578cc3304..2565a5aecb80 100644
--- a/tools/testing/vma/shared.c
+++ b/tools/testing/vma/shared.c
@@ -14,7 +14,7 @@ struct task_struct __current;
struct vm_area_struct *alloc_vma(struct mm_struct *mm,
unsigned long start, unsigned long end,
- pgoff_t pgoff, vm_flags_t vm_flags)
+ pgoff_t pgoff, vma_flags_t vma_flags)
{
struct vm_area_struct *vma = vm_area_alloc(mm);
@@ -24,7 +24,7 @@ struct vm_area_struct *alloc_vma(struct mm_struct *mm,
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
- vm_flags_reset(vma, vm_flags);
+ vma->flags = vma_flags;
vma_assert_detached(vma);
return vma;
@@ -38,9 +38,9 @@ void detach_free_vma(struct vm_area_struct *vma)
struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
unsigned long start, unsigned long end,
- pgoff_t pgoff, vm_flags_t vm_flags)
+ pgoff_t pgoff, vma_flags_t vma_flags)
{
- struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags);
+ struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vma_flags);
if (vma == NULL)
return NULL;
diff --git a/tools/testing/vma/shared.h b/tools/testing/vma/shared.h
index 6c64211cfa22..8b9e3b11c3cb 100644
--- a/tools/testing/vma/shared.h
+++ b/tools/testing/vma/shared.h
@@ -35,6 +35,24 @@
#define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
#define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
+#define ASSERT_FLAGS_SAME_MASK(_flags, _flags_other) \
+ ASSERT_TRUE(vma_flags_same_mask((_flags), (_flags_other)))
+
+#define ASSERT_FLAGS_NOT_SAME_MASK(_flags, _flags_other) \
+ ASSERT_FALSE(vma_flags_same_mask((_flags), (_flags_other)))
+
+#define ASSERT_FLAGS_SAME(_flags, ...) \
+ ASSERT_TRUE(vma_flags_same(_flags, __VA_ARGS__))
+
+#define ASSERT_FLAGS_NOT_SAME(_flags, ...) \
+ ASSERT_FALSE(vma_flags_same(_flags, __VA_ARGS__))
+
+#define ASSERT_FLAGS_EMPTY(_flags) \
+ ASSERT_TRUE(vma_flags_empty(_flags))
+
+#define ASSERT_FLAGS_NONEMPTY(_flags) \
+ ASSERT_FALSE(vma_flags_empty(_flags))
+
#define IS_SET(_val, _flags) ((_val & _flags) == _flags)
extern bool fail_prealloc;
@@ -76,7 +94,7 @@ static inline void dummy_close(struct vm_area_struct *)
/* Helper function to simply allocate a VMA. */
struct vm_area_struct *alloc_vma(struct mm_struct *mm,
unsigned long start, unsigned long end,
- pgoff_t pgoff, vm_flags_t vm_flags);
+ pgoff_t pgoff, vma_flags_t vma_flags);
/* Helper function to detach and free a VMA. */
void detach_free_vma(struct vm_area_struct *vma);
@@ -84,7 +102,7 @@ void detach_free_vma(struct vm_area_struct *vma);
/* Helper function to allocate a VMA and link it to the tree. */
struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
unsigned long start, unsigned long end,
- pgoff_t pgoff, vm_flags_t vm_flags);
+ pgoff_t pgoff, vma_flags_t vma_flags);
/*
* Helper function to reset the dummy anon_vma to indicate it has not been
diff --git a/tools/testing/vma/tests/merge.c b/tools/testing/vma/tests/merge.c
index 3708dc6945b0..03b6f9820e0a 100644
--- a/tools/testing/vma/tests/merge.c
+++ b/tools/testing/vma/tests/merge.c
@@ -33,7 +33,7 @@ static int expand_existing(struct vma_merge_struct *vmg)
* specified new range.
*/
void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
- unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags)
+ unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags)
{
vma_iter_set(vmg->vmi, start);
@@ -45,7 +45,7 @@ void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
vmg->start = start;
vmg->end = end;
vmg->pgoff = pgoff;
- vmg->vm_flags = vm_flags;
+ vmg->vma_flags = vma_flags;
vmg->just_expand = false;
vmg->__remove_middle = false;
@@ -56,10 +56,10 @@ void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
/* Helper function to set both the VMG range and its anon_vma. */
static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start,
- unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
+ unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags,
struct anon_vma *anon_vma)
{
- vmg_set_range(vmg, start, end, pgoff, vm_flags);
+ vmg_set_range(vmg, start, end, pgoff, vma_flags);
vmg->anon_vma = anon_vma;
}
@@ -71,12 +71,12 @@ static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long s
*/
static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
struct vma_merge_struct *vmg, unsigned long start,
- unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
+ unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags,
bool *was_merged)
{
struct vm_area_struct *merged;
- vmg_set_range(vmg, start, end, pgoff, vm_flags);
+ vmg_set_range(vmg, start, end, pgoff, vma_flags);
merged = merge_new(vmg);
if (merged) {
@@ -89,23 +89,24 @@ static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
- return alloc_and_link_vma(mm, start, end, pgoff, vm_flags);
+ return alloc_and_link_vma(mm, start, end, pgoff, vma_flags);
}
static bool test_simple_merge(void)
{
struct vm_area_struct *vma;
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
- struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
- struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vm_flags);
+ struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vma_flags);
+ struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vma_flags);
VMA_ITERATOR(vmi, &mm, 0x1000);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
.start = 0x1000,
.end = 0x2000,
- .vm_flags = vm_flags,
+ .vma_flags = vma_flags,
.pgoff = 1,
};
@@ -118,7 +119,7 @@ static bool test_simple_merge(void)
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x3000);
ASSERT_EQ(vma->vm_pgoff, 0);
- ASSERT_EQ(vma->vm_flags, vm_flags);
+ ASSERT_FLAGS_SAME_MASK(&vma->flags, vma_flags);
detach_free_vma(vma);
mtree_destroy(&mm.mm_mt);
@@ -129,11 +130,11 @@ static bool test_simple_merge(void)
static bool test_simple_modify(void)
{
struct vm_area_struct *vma;
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
- struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
+ struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags);
VMA_ITERATOR(vmi, &mm, 0x1000);
- vm_flags_t flags = VM_READ | VM_MAYREAD;
ASSERT_FALSE(attach_vma(&mm, init_vma));
@@ -142,7 +143,7 @@ static bool test_simple_modify(void)
* performs the merge/split only.
*/
vma = vma_modify_flags(&vmi, init_vma, init_vma,
- 0x1000, 0x2000, &flags);
+ 0x1000, 0x2000, &vma_flags);
ASSERT_NE(vma, NULL);
/* We modify the provided VMA, and on split allocate new VMAs. */
ASSERT_EQ(vma, init_vma);
@@ -189,9 +190,10 @@ static bool test_simple_modify(void)
static bool test_simple_expand(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
- struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
+ struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vma_flags);
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.vmi = &vmi,
@@ -217,9 +219,10 @@ static bool test_simple_expand(void)
static bool test_simple_shrink(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
- struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
+ struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags);
VMA_ITERATOR(vmi, &mm, 0);
ASSERT_FALSE(attach_vma(&mm, vma));
@@ -238,7 +241,8 @@ static bool test_simple_shrink(void)
static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky, bool c_is_sticky)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
@@ -265,31 +269,31 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
bool merged;
if (is_sticky)
- vm_flags |= VM_STICKY;
+ vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS);
/*
* 0123456789abc
* AA B CC
*/
- vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
+ vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
ASSERT_NE(vma_a, NULL);
if (a_is_sticky)
- vm_flags_set(vma_a, VM_STICKY);
+ vma_flags_set_mask(&vma_a->flags, VMA_STICKY_FLAGS);
/* We give each VMA a single avc so we can test anon_vma duplication. */
INIT_LIST_HEAD(&vma_a->anon_vma_chain);
list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
- vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
+ vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
ASSERT_NE(vma_b, NULL);
if (b_is_sticky)
- vm_flags_set(vma_b, VM_STICKY);
+ vma_flags_set_mask(&vma_b->flags, VMA_STICKY_FLAGS);
INIT_LIST_HEAD(&vma_b->anon_vma_chain);
list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
- vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vm_flags);
+ vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vma_flags);
ASSERT_NE(vma_c, NULL);
if (c_is_sticky)
- vm_flags_set(vma_c, VM_STICKY);
+ vma_flags_set_mask(&vma_c->flags, VMA_STICKY_FLAGS);
INIT_LIST_HEAD(&vma_c->anon_vma_chain);
list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
@@ -299,7 +303,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
* 0123456789abc
* AA B ** CC
*/
- vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vm_flags, &merged);
+ vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vma_flags, &merged);
ASSERT_NE(vma_d, NULL);
INIT_LIST_HEAD(&vma_d->anon_vma_chain);
list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
@@ -314,7 +318,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
*/
vma_a->vm_ops = &vm_ops; /* This should have no impact. */
vma_b->anon_vma = &dummy_anon_vma;
- vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Merge with A, delete B. */
ASSERT_TRUE(merged);
@@ -325,7 +329,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
if (is_sticky || a_is_sticky || b_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge to PREVIOUS VMA.
@@ -333,7 +337,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
* 0123456789abc
* AAAA* DD CC
*/
- vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Extend A. */
ASSERT_TRUE(merged);
@@ -344,7 +348,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
if (is_sticky || a_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge to NEXT VMA.
@@ -354,7 +358,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
*/
vma_d->anon_vma = &dummy_anon_vma;
vma_d->vm_ops = &vm_ops; /* This should have no impact. */
- vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vma_flags, &merged);
ASSERT_EQ(vma, vma_d);
/* Prepend. */
ASSERT_TRUE(merged);
@@ -365,7 +369,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
if (is_sticky) /* D uses is_sticky. */
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge BOTH sides.
@@ -374,7 +378,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
* AAAAA*DDD CC
*/
vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
- vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Merge with A, delete D. */
ASSERT_TRUE(merged);
@@ -385,7 +389,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
if (is_sticky || a_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge to NEXT VMA.
@@ -394,7 +398,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
* AAAAAAAAA *CC
*/
vma_c->anon_vma = &dummy_anon_vma;
- vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vma_flags, &merged);
ASSERT_EQ(vma, vma_c);
/* Prepend C. */
ASSERT_TRUE(merged);
@@ -405,7 +409,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
if (is_sticky || c_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge BOTH sides.
@@ -413,7 +417,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
* 0123456789abc
* AAAAAAAAA*CCC
*/
- vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Extend A and delete C. */
ASSERT_TRUE(merged);
@@ -424,7 +428,7 @@ static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky,
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 1);
if (is_sticky || a_is_sticky || c_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Final state.
@@ -469,29 +473,30 @@ static bool test_merge_new(void)
static bool test_vma_merge_special_flags(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
- vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
- vm_flags_t all_special_flags = 0;
+ vma_flag_t special_flags[] = { VMA_IO_BIT, VMA_DONTEXPAND_BIT,
+ VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT };
+ vma_flags_t all_special_flags = EMPTY_VMA_FLAGS;
int i;
struct vm_area_struct *vma_left, *vma;
/* Make sure there aren't new VM_SPECIAL flags. */
- for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
- all_special_flags |= special_flags[i];
- }
- ASSERT_EQ(all_special_flags, VM_SPECIAL);
+ for (i = 0; i < ARRAY_SIZE(special_flags); i++)
+ vma_flags_set(&all_special_flags, special_flags[i]);
+ ASSERT_FLAGS_SAME_MASK(&all_special_flags, VMA_SPECIAL_FLAGS);
/*
* 01234
* AAA
*/
- vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
+ vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
ASSERT_NE(vma_left, NULL);
/* 1. Set up new VMA with special flag that would otherwise merge. */
@@ -502,12 +507,14 @@ static bool test_vma_merge_special_flags(void)
*
* This should merge if not for the VM_SPECIAL flag.
*/
- vmg_set_range(&vmg, 0x3000, 0x4000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x4000, 3, vma_flags);
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
- vm_flags_t special_flag = special_flags[i];
+ vma_flag_t special_flag = special_flags[i];
+ vma_flags_t flags = vma_flags;
- vm_flags_reset(vma_left, vm_flags | special_flag);
- vmg.vm_flags = vm_flags | special_flag;
+ vma_flags_set(&flags, special_flag);
+ vma_left->flags = flags;
+ vmg.vma_flags = flags;
vma = merge_new(&vmg);
ASSERT_EQ(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
@@ -521,15 +528,17 @@ static bool test_vma_merge_special_flags(void)
*
* Create a VMA to modify.
*/
- vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
ASSERT_NE(vma, NULL);
vmg.middle = vma;
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
- vm_flags_t special_flag = special_flags[i];
+ vma_flag_t special_flag = special_flags[i];
+ vma_flags_t flags = vma_flags;
- vm_flags_reset(vma_left, vm_flags | special_flag);
- vmg.vm_flags = vm_flags | special_flag;
+ vma_flags_set(&flags, special_flag);
+ vma_left->flags = flags;
+ vmg.vma_flags = flags;
vma = merge_existing(&vmg);
ASSERT_EQ(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
@@ -541,7 +550,8 @@ static bool test_vma_merge_special_flags(void)
static bool test_vma_merge_with_close(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
@@ -621,11 +631,11 @@ static bool test_vma_merge_with_close(void)
* PPPPPPNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma_next->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
ASSERT_EQ(merge_new(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
@@ -646,11 +656,11 @@ static bool test_vma_merge_with_close(void)
* proceed.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -674,11 +684,11 @@ static bool test_vma_merge_with_close(void)
* proceed.
*/
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
/*
@@ -702,12 +712,12 @@ static bool test_vma_merge_with_close(void)
* PPPVVNNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -728,12 +738,12 @@ static bool test_vma_merge_with_close(void)
* PPPPPNNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma_next->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -750,15 +760,16 @@ static bool test_vma_merge_with_close(void)
static bool test_vma_merge_new_with_close(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
- struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
- struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vm_flags);
+ struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
+ struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vma_flags);
const struct vm_operations_struct vm_ops = {
.close = dummy_close,
};
@@ -788,7 +799,7 @@ static bool test_vma_merge_new_with_close(void)
vma_prev->vm_ops = &vm_ops;
vma_next->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x2000, 0x5000, 2, vm_flags);
+ vmg_set_range(&vmg, 0x2000, 0x5000, 2, vma_flags);
vma = merge_new(&vmg);
ASSERT_NE(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -805,9 +816,10 @@ static bool test_vma_merge_new_with_close(void)
static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bool next_is_sticky)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
- vm_flags_t prev_flags = vm_flags;
- vm_flags_t next_flags = vm_flags;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
+ vma_flags_t prev_flags = vma_flags;
+ vma_flags_t next_flags = vma_flags;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma, *vma_prev, *vma_next;
@@ -821,11 +833,11 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
struct anon_vma_chain avc = {};
if (prev_is_sticky)
- prev_flags |= VM_STICKY;
+ vma_flags_set_mask(&prev_flags, VMA_STICKY_FLAGS);
if (middle_is_sticky)
- vm_flags |= VM_STICKY;
+ vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS);
if (next_is_sticky)
- next_flags |= VM_STICKY;
+ vma_flags_set_mask(&next_flags, VMA_STICKY_FLAGS);
/*
* Merge right case - partial span.
@@ -837,11 +849,11 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
* 0123456789
* VNNNNNN
*/
- vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags);
vma->vm_ops = &vm_ops; /* This should have no impact. */
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma);
vmg.middle = vma;
vmg.prev = vma;
vma_set_dummy_anon_vma(vma, &avc);
@@ -858,7 +870,7 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
ASSERT_TRUE(vma_write_started(vma_next));
ASSERT_EQ(mm.map_count, 2);
if (middle_is_sticky || next_is_sticky)
- ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
@@ -873,10 +885,10 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
* 0123456789
* NNNNNNN
*/
- vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags);
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
- vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vma_flags, &dummy_anon_vma);
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_EQ(merge_existing(&vmg), vma_next);
@@ -888,7 +900,7 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
ASSERT_TRUE(vma_write_started(vma_next));
ASSERT_EQ(mm.map_count, 1);
if (middle_is_sticky || next_is_sticky)
- ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. We should have deleted vma. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
@@ -905,9 +917,9 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
vma->vm_ops = &vm_ops; /* This should have no impact. */
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
@@ -924,7 +936,7 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
if (prev_is_sticky || middle_is_sticky)
- ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
@@ -941,8 +953,8 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
@@ -955,7 +967,7 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
ASSERT_TRUE(vma_write_started(vma_prev));
ASSERT_EQ(mm.map_count, 1);
if (prev_is_sticky || middle_is_sticky)
- ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. We should have deleted vma. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
@@ -972,9 +984,9 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, next_flags);
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
@@ -987,7 +999,7 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
ASSERT_TRUE(vma_write_started(vma_prev));
ASSERT_EQ(mm.map_count, 1);
if (prev_is_sticky || middle_is_sticky || next_is_sticky)
- ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. We should have deleted prev and next. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
@@ -1008,40 +1020,40 @@ static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bo
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags);
vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, next_flags);
- vmg_set_range(&vmg, 0x4000, 0x5000, 4, vm_flags);
+ vmg_set_range(&vmg, 0x4000, 0x5000, 4, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
+ vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x6000, 0x7000, 6, vm_flags);
+ vmg_set_range(&vmg, 0x6000, 0x7000, 6, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x4000, 0x7000, 4, vm_flags);
+ vmg_set_range(&vmg, 0x4000, 0x7000, 4, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x4000, 0x6000, 4, vm_flags);
+ vmg_set_range(&vmg, 0x4000, 0x6000, 4, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
+ vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
@@ -1067,7 +1079,8 @@ static bool test_merge_existing(void)
static bool test_anon_vma_non_mergeable(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma, *vma_prev, *vma_next;
@@ -1091,9 +1104,9 @@ static bool test_anon_vma_non_mergeable(void)
* 0123456789
* PPPPPPPNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags);
/*
* Give both prev and next single anon_vma_chain fields, so they will
@@ -1101,7 +1114,7 @@ static bool test_anon_vma_non_mergeable(void)
*
* However, when prev is compared to next, the merge should fail.
*/
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
@@ -1129,10 +1142,10 @@ static bool test_anon_vma_non_mergeable(void)
* 0123456789
* PPPPPPPNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags);
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL);
vmg.prev = vma_prev;
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
@@ -1154,7 +1167,8 @@ static bool test_anon_vma_non_mergeable(void)
static bool test_dup_anon_vma(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
@@ -1175,11 +1189,11 @@ static bool test_dup_anon_vma(void)
* This covers new VMA merging, as these operations amount to a VMA
* expand.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma_next->anon_vma = &dummy_anon_vma;
- vmg_set_range(&vmg, 0, 0x5000, 0, vm_flags);
+ vmg_set_range(&vmg, 0, 0x5000, 0, vma_flags);
vmg.target = vma_prev;
vmg.next = vma_next;
@@ -1201,16 +1215,16 @@ static bool test_dup_anon_vma(void)
* extend delete delete
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
/* Initialise avc so mergeability check passes. */
INIT_LIST_HEAD(&vma_next->anon_vma_chain);
list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
vma_next->anon_vma = &dummy_anon_vma;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -1234,12 +1248,12 @@ static bool test_dup_anon_vma(void)
* extend delete delete
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
vmg.anon_vma = &dummy_anon_vma;
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -1263,11 +1277,11 @@ static bool test_dup_anon_vma(void)
* extend shrink/delete
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags);
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
@@ -1291,11 +1305,11 @@ static bool test_dup_anon_vma(void)
* shrink/delete extend
*/
- vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
@@ -1314,7 +1328,8 @@ static bool test_dup_anon_vma(void)
static bool test_vmi_prealloc_fail(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
@@ -1330,11 +1345,11 @@ static bool test_vmi_prealloc_fail(void)
* the duplicated anon_vma is unlinked.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma->anon_vma = &dummy_anon_vma;
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
@@ -1358,11 +1373,11 @@ static bool test_vmi_prealloc_fail(void)
* performed in this case too.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma->anon_vma = &dummy_anon_vma;
- vmg_set_range(&vmg, 0, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0, 0x5000, 3, vma_flags);
vmg.target = vma_prev;
vmg.next = vma;
@@ -1380,13 +1395,14 @@ static bool test_vmi_prealloc_fail(void)
static bool test_merge_extend(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0x1000);
struct vm_area_struct *vma;
- vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vm_flags);
- alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vma_flags);
+ alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
/*
* Extend a VMA into the gap between itself and the following VMA.
@@ -1410,11 +1426,12 @@ static bool test_merge_extend(void)
static bool test_expand_only_mode(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma_prev, *vma;
- VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vm_flags, 5);
+ VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vma_flags, 5);
/*
* Place a VMA prior to the one we're expanding so we assert that we do
@@ -1422,14 +1439,14 @@ static bool test_expand_only_mode(void)
* have, through the use of the just_expand flag, indicated we do not
* need to do so.
*/
- alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
+ alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
/*
* We will be positioned at the prev VMA, but looking to expand to
* 0x9000.
*/
vma_iter_set(&vmi, 0x3000);
- vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.just_expand = true;
diff --git a/tools/testing/vma/tests/mmap.c b/tools/testing/vma/tests/mmap.c
index bded4ecbe5db..c85bc000d1cb 100644
--- a/tools/testing/vma/tests/mmap.c
+++ b/tools/testing/vma/tests/mmap.c
@@ -2,6 +2,8 @@
static bool test_mmap_region_basic(void)
{
+ const vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
unsigned long addr;
struct vm_area_struct *vma;
@@ -10,27 +12,19 @@ static bool test_mmap_region_basic(void)
current->mm = &mm;
/* Map at 0x300000, length 0x3000. */
- addr = __mmap_region(NULL, 0x300000, 0x3000,
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
- 0x300, NULL);
+ addr = __mmap_region(NULL, 0x300000, 0x3000, vma_flags, 0x300, NULL);
ASSERT_EQ(addr, 0x300000);
/* Map at 0x250000, length 0x3000. */
- addr = __mmap_region(NULL, 0x250000, 0x3000,
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
- 0x250, NULL);
+ addr = __mmap_region(NULL, 0x250000, 0x3000, vma_flags, 0x250, NULL);
ASSERT_EQ(addr, 0x250000);
/* Map at 0x303000, merging to 0x300000 of length 0x6000. */
- addr = __mmap_region(NULL, 0x303000, 0x3000,
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
- 0x303, NULL);
+ addr = __mmap_region(NULL, 0x303000, 0x3000, vma_flags, 0x303, NULL);
ASSERT_EQ(addr, 0x303000);
/* Map at 0x24d000, merging to 0x250000 of length 0x6000. */
- addr = __mmap_region(NULL, 0x24d000, 0x3000,
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
- 0x24d, NULL);
+ addr = __mmap_region(NULL, 0x24d000, 0x3000, vma_flags, 0x24d, NULL);
ASSERT_EQ(addr, 0x24d000);
ASSERT_EQ(mm.map_count, 2);
diff --git a/tools/testing/vma/tests/vma.c b/tools/testing/vma/tests/vma.c
index c54ffc954f11..754a2da06321 100644
--- a/tools/testing/vma/tests/vma.c
+++ b/tools/testing/vma/tests/vma.c
@@ -5,11 +5,12 @@ static bool compare_legacy_flags(vm_flags_t legacy_flags, vma_flags_t flags)
const unsigned long legacy_val = legacy_flags;
/* The lower word should contain the precise same value. */
const unsigned long flags_lower = flags.__vma_flags[0];
-#if NUM_VMA_FLAGS > BITS_PER_LONG
+ vma_flags_t converted_flags;
+#if NUM_VMA_FLAG_BITS > BITS_PER_LONG
int i;
/* All bits in higher flag values should be zero. */
- for (i = 1; i < NUM_VMA_FLAGS / BITS_PER_LONG; i++) {
+ for (i = 1; i < NUM_VMA_FLAG_BITS / BITS_PER_LONG; i++) {
if (flags.__vma_flags[i] != 0)
return false;
}
@@ -17,12 +18,18 @@ static bool compare_legacy_flags(vm_flags_t legacy_flags, vma_flags_t flags)
static_assert(sizeof(legacy_flags) == sizeof(unsigned long));
+ /* Assert that legacy flag helpers work correctly. */
+ converted_flags = legacy_to_vma_flags(legacy_flags);
+ ASSERT_FLAGS_SAME_MASK(&converted_flags, flags);
+ ASSERT_EQ(vma_flags_to_legacy(flags), legacy_flags);
+
return legacy_val == flags_lower;
}
static bool test_copy_vma(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
bool need_locks = false;
VMA_ITERATOR(vmi, &mm, 0);
@@ -30,7 +37,7 @@ static bool test_copy_vma(void)
/* Move backwards and do not merge. */
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
ASSERT_NE(vma_new, vma);
ASSERT_EQ(vma_new->vm_start, 0);
@@ -42,8 +49,8 @@ static bool test_copy_vma(void)
/* Move a VMA into position next to another and merge the two. */
- vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, vma_flags);
vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
vma_assert_attached(vma_new);
@@ -61,7 +68,6 @@ static bool test_vma_flags_unchanged(void)
struct vm_area_struct vma;
struct vm_area_desc desc;
-
vma.flags = EMPTY_VMA_FLAGS;
desc.vma_flags = EMPTY_VMA_FLAGS;
@@ -116,6 +122,7 @@ static bool test_vma_flags_cleared(void)
return true;
}
+#if NUM_VMA_FLAG_BITS > 64
/*
* Assert that VMA flag functions that operate at the system word level function
* correctly.
@@ -124,10 +131,14 @@ static bool test_vma_flags_word(void)
{
vma_flags_t flags = EMPTY_VMA_FLAGS;
const vma_flags_t comparison =
- mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 64, 65);
+ mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT
+
+ , 64, 65
+ );
/* Set some custom high flags. */
vma_flags_set(&flags, 64, 65);
+
/* Now overwrite the first word. */
vma_flags_overwrite_word(&flags, VM_READ | VM_WRITE);
/* Ensure they are equal. */
@@ -158,29 +169,93 @@ static bool test_vma_flags_word(void)
return true;
}
+#endif /* NUM_VMA_FLAG_BITS > 64 */
/* Ensure that vma_flags_test() and friends works correctly. */
static bool test_vma_flags_test(void)
{
- const vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
- VMA_EXEC_BIT, 64, 65);
- struct vm_area_struct vma;
- struct vm_area_desc desc;
+ vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+ struct vm_area_desc desc = {
+ .vma_flags = flags,
+ };
+ struct vm_area_struct vma = {
+ .flags = flags,
+ };
+
+#define do_test(_flag) \
+ ASSERT_TRUE(vma_flags_test(&flags, _flag)); \
+ ASSERT_TRUE(vma_flags_test_single_mask(&flags, mk_vma_flags(_flag))); \
+ ASSERT_TRUE(vma_test(&vma, _flag)); \
+ ASSERT_TRUE(vma_test_single_mask(&vma, mk_vma_flags(_flag))); \
+ ASSERT_TRUE(vma_desc_test(&desc, _flag))
+
+#define do_test_false(_flag) \
+ ASSERT_FALSE(vma_flags_test(&flags, _flag)); \
+ ASSERT_FALSE(vma_flags_test_single_mask(&flags, mk_vma_flags(_flag))); \
+ ASSERT_FALSE(vma_test(&vma, _flag)); \
+ ASSERT_FALSE(vma_test_single_mask(&vma, mk_vma_flags(_flag))); \
+ ASSERT_FALSE(vma_desc_test(&desc, _flag))
- vma.flags = flags;
- desc.vma_flags = flags;
+ do_test(VMA_READ_BIT);
+ do_test(VMA_WRITE_BIT);
+ do_test(VMA_EXEC_BIT);
+#if NUM_VMA_FLAG_BITS > 64
+ do_test(64);
+ do_test(65);
+#endif
+ do_test_false(VMA_MAYWRITE_BIT);
+#if NUM_VMA_FLAG_BITS > 64
+ do_test_false(66);
+#endif
+
+#undef do_test
+#undef do_test_false
+
+ /* We define the _single_mask() variants to return false if empty. */
+ ASSERT_FALSE(vma_flags_test_single_mask(&flags, EMPTY_VMA_FLAGS));
+ ASSERT_FALSE(vma_test_single_mask(&vma, EMPTY_VMA_FLAGS));
+ /* Even when both flags and tested flag mask are empty! */
+ flags = EMPTY_VMA_FLAGS;
+ vma.flags = EMPTY_VMA_FLAGS;
+ ASSERT_FALSE(vma_flags_test_single_mask(&flags, EMPTY_VMA_FLAGS));
+ ASSERT_FALSE(vma_test_single_mask(&vma, EMPTY_VMA_FLAGS));
+
+ return true;
+}
+
+/* Ensure that vma_flags_test_any() and friends works correctly. */
+static bool test_vma_flags_test_any(void)
+{
+ const vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+ struct vm_area_struct vma = {
+ .flags = flags,
+ };
+ struct vm_area_desc desc = {
+ .vma_flags = flags,
+ };
#define do_test(...) \
- ASSERT_TRUE(vma_flags_test(&flags, __VA_ARGS__)); \
- ASSERT_TRUE(vma_desc_test_flags(&desc, __VA_ARGS__))
+ ASSERT_TRUE(vma_flags_test_any(&flags, __VA_ARGS__)); \
+ ASSERT_TRUE(vma_desc_test_any(&desc, __VA_ARGS__)); \
+ ASSERT_TRUE(vma_test_any(&vma, __VA_ARGS__));
#define do_test_all_true(...) \
ASSERT_TRUE(vma_flags_test_all(&flags, __VA_ARGS__)); \
- ASSERT_TRUE(vma_test_all_flags(&vma, __VA_ARGS__))
+ ASSERT_TRUE(vma_test_all(&vma, __VA_ARGS__))
#define do_test_all_false(...) \
ASSERT_FALSE(vma_flags_test_all(&flags, __VA_ARGS__)); \
- ASSERT_FALSE(vma_test_all_flags(&vma, __VA_ARGS__))
+ ASSERT_FALSE(vma_test_all(&vma, __VA_ARGS__))
/*
* Testing for some flags that are present, some that are not - should
@@ -189,10 +264,12 @@ static bool test_vma_flags_test(void)
do_test(VMA_READ_BIT, VMA_MAYREAD_BIT, VMA_SEQ_READ_BIT);
/* However, the ...test_all() variant should NOT pass. */
do_test_all_false(VMA_READ_BIT, VMA_MAYREAD_BIT, VMA_SEQ_READ_BIT);
+#if NUM_VMA_FLAG_BITS > 64
/* But should pass for flags present. */
do_test_all_true(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT, 64, 65);
/* Also subsets... */
do_test_all_true(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT, 64);
+#endif
do_test_all_true(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
do_test_all_true(VMA_READ_BIT, VMA_WRITE_BIT);
do_test_all_true(VMA_READ_BIT);
@@ -200,7 +277,7 @@ static bool test_vma_flags_test(void)
* Check _mask variant. We don't need to test extensively as macro
* helper is the equivalent.
*/
- ASSERT_TRUE(vma_flags_test_mask(&flags, flags));
+ ASSERT_TRUE(vma_flags_test_any_mask(&flags, flags));
ASSERT_TRUE(vma_flags_test_all_mask(&flags, flags));
/* Single bits. */
@@ -245,6 +322,10 @@ static bool test_vma_flags_test(void)
do_test(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT, 64, 65);
#endif
+ /* Testing all flags against none trivially succeeds. */
+ ASSERT_TRUE(vma_flags_test_all_mask(&flags, EMPTY_VMA_FLAGS));
+ ASSERT_TRUE(vma_test_all_mask(&vma, EMPTY_VMA_FLAGS));
+
#undef do_test
#undef do_test_all_true
#undef do_test_all_false
@@ -256,59 +337,77 @@ static bool test_vma_flags_test(void)
static bool test_vma_flags_clear(void)
{
vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
- VMA_EXEC_BIT, 64, 65);
- vma_flags_t mask = mk_vma_flags(VMA_EXEC_BIT, 64);
- struct vm_area_struct vma;
- struct vm_area_desc desc;
-
- vma.flags = flags;
- desc.vma_flags = flags;
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+ vma_flags_t mask = mk_vma_flags(VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64
+#endif
+ );
+ struct vm_area_struct vma = {
+ .flags = flags,
+ };
+ struct vm_area_desc desc = {
+ .vma_flags = flags,
+ };
/* Cursory check of _mask() variant, as the helper macros imply. */
vma_flags_clear_mask(&flags, mask);
- vma_flags_clear_mask(&vma.flags, mask);
+ vma_clear_flags_mask(&vma, mask);
vma_desc_clear_flags_mask(&desc, mask);
- ASSERT_FALSE(vma_flags_test(&flags, VMA_EXEC_BIT, 64));
- ASSERT_FALSE(vma_flags_test(&vma.flags, VMA_EXEC_BIT, 64));
- ASSERT_FALSE(vma_desc_test_flags(&desc, VMA_EXEC_BIT, 64));
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FALSE(vma_flags_test_any(&flags, VMA_EXEC_BIT, 64));
+ ASSERT_FALSE(vma_test_any(&vma, VMA_EXEC_BIT, 64));
+ ASSERT_FALSE(vma_desc_test_any(&desc, VMA_EXEC_BIT, 64));
/* Reset. */
vma_flags_set(&flags, VMA_EXEC_BIT, 64);
vma_set_flags(&vma, VMA_EXEC_BIT, 64);
vma_desc_set_flags(&desc, VMA_EXEC_BIT, 64);
+#endif
/*
* Clear the flags and assert clear worked, then reset flags back to
* include specified flags.
*/
-#define do_test_and_reset(...) \
- vma_flags_clear(&flags, __VA_ARGS__); \
- vma_flags_clear(&vma.flags, __VA_ARGS__); \
- vma_desc_clear_flags(&desc, __VA_ARGS__); \
- ASSERT_FALSE(vma_flags_test(&flags, __VA_ARGS__)); \
- ASSERT_FALSE(vma_flags_test(&vma.flags, __VA_ARGS__)); \
- ASSERT_FALSE(vma_desc_test_flags(&desc, __VA_ARGS__)); \
- vma_flags_set(&flags, __VA_ARGS__); \
- vma_set_flags(&vma, __VA_ARGS__); \
+#define do_test_and_reset(...) \
+ vma_flags_clear(&flags, __VA_ARGS__); \
+ vma_clear_flags(&vma, __VA_ARGS__); \
+ vma_desc_clear_flags(&desc, __VA_ARGS__); \
+ ASSERT_FALSE(vma_flags_test_any(&flags, __VA_ARGS__)); \
+ ASSERT_FALSE(vma_test_any(&vma, __VA_ARGS__)); \
+ ASSERT_FALSE(vma_desc_test_any(&desc, __VA_ARGS__)); \
+ vma_flags_set(&flags, __VA_ARGS__); \
+ vma_set_flags(&vma, __VA_ARGS__); \
vma_desc_set_flags(&desc, __VA_ARGS__)
/* Single flags. */
do_test_and_reset(VMA_READ_BIT);
do_test_and_reset(VMA_WRITE_BIT);
do_test_and_reset(VMA_EXEC_BIT);
+#if NUM_VMA_FLAG_BITS > 64
do_test_and_reset(64);
do_test_and_reset(65);
+#endif
/* Two flags, in different orders. */
do_test_and_reset(VMA_READ_BIT, VMA_WRITE_BIT);
do_test_and_reset(VMA_READ_BIT, VMA_EXEC_BIT);
+#if NUM_VMA_FLAG_BITS > 64
do_test_and_reset(VMA_READ_BIT, 64);
do_test_and_reset(VMA_READ_BIT, 65);
+#endif
do_test_and_reset(VMA_WRITE_BIT, VMA_READ_BIT);
do_test_and_reset(VMA_WRITE_BIT, VMA_EXEC_BIT);
+#if NUM_VMA_FLAG_BITS > 64
do_test_and_reset(VMA_WRITE_BIT, 64);
do_test_and_reset(VMA_WRITE_BIT, 65);
+#endif
do_test_and_reset(VMA_EXEC_BIT, VMA_READ_BIT);
do_test_and_reset(VMA_EXEC_BIT, VMA_WRITE_BIT);
+#if NUM_VMA_FLAG_BITS > 64
do_test_and_reset(VMA_EXEC_BIT, 64);
do_test_and_reset(VMA_EXEC_BIT, 65);
do_test_and_reset(64, VMA_READ_BIT);
@@ -319,6 +418,7 @@ static bool test_vma_flags_clear(void)
do_test_and_reset(65, VMA_WRITE_BIT);
do_test_and_reset(65, VMA_EXEC_BIT);
do_test_and_reset(65, 64);
+#endif
/* Three flags. */
@@ -328,12 +428,229 @@ static bool test_vma_flags_clear(void)
return true;
}
+/* Ensure that vma_flags_empty() works correctly. */
+static bool test_vma_flags_empty(void)
+{
+ vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+
+ ASSERT_FLAGS_NONEMPTY(&flags);
+ vma_flags_clear(&flags, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FLAGS_NONEMPTY(&flags);
+ vma_flags_clear(&flags, 64, 65);
+ ASSERT_FLAGS_EMPTY(&flags);
+#else
+ ASSERT_FLAGS_EMPTY(&flags);
+#endif
+
+ return true;
+}
+
+/* Ensure that vma_flags_diff_pair() works correctly. */
+static bool test_vma_flags_diff(void)
+{
+ vma_flags_t flags1 = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+
+ vma_flags_t flags2 = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT, VMA_MAYWRITE_BIT,
+ VMA_MAYEXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65, 66, 67
+#endif
+ );
+ vma_flags_t diff = vma_flags_diff_pair(&flags1, &flags2);
+
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FLAGS_SAME(&diff, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT, 66, 67);
+#else
+ ASSERT_FLAGS_SAME(&diff, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT);
+#endif
+ /* Should be the same even if re-ordered. */
+ diff = vma_flags_diff_pair(&flags2, &flags1);
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FLAGS_SAME(&diff, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT, 66, 67);
+#else
+ ASSERT_FLAGS_SAME(&diff, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT);
+#endif
+
+ /* Should be no difference when applied against themselves. */
+ diff = vma_flags_diff_pair(&flags1, &flags1);
+ ASSERT_FLAGS_EMPTY(&diff);
+ diff = vma_flags_diff_pair(&flags2, &flags2);
+ ASSERT_FLAGS_EMPTY(&diff);
+
+ /* One set of flags against an empty one should equal the original. */
+ flags2 = EMPTY_VMA_FLAGS;
+ diff = vma_flags_diff_pair(&flags1, &flags2);
+ ASSERT_FLAGS_SAME_MASK(&diff, flags1);
+
+ /* A subset should work too. */
+ flags2 = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT);
+ diff = vma_flags_diff_pair(&flags1, &flags2);
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FLAGS_SAME(&diff, VMA_EXEC_BIT, 64, 65);
+#else
+ ASSERT_FLAGS_SAME(&diff, VMA_EXEC_BIT);
+#endif
+
+ return true;
+}
+
+/* Ensure that vma_flags_and() and friends work correctly. */
+static bool test_vma_flags_and(void)
+{
+ vma_flags_t flags1 = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+ vma_flags_t flags2 = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT, VMA_MAYWRITE_BIT,
+ VMA_MAYEXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65, 66, 67
+#endif
+ );
+ vma_flags_t flags3 = mk_vma_flags(VMA_IO_BIT, VMA_MAYBE_GUARD_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 68, 69
+#endif
+ );
+ vma_flags_t and = vma_flags_and_mask(&flags1, flags2);
+
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ 64, 65);
+#else
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+#endif
+
+ and = vma_flags_and_mask(&flags1, flags1);
+ ASSERT_FLAGS_SAME_MASK(&and, flags1);
+
+ and = vma_flags_and_mask(&flags2, flags2);
+ ASSERT_FLAGS_SAME_MASK(&and, flags2);
+
+ and = vma_flags_and_mask(&flags1, flags3);
+ ASSERT_FLAGS_EMPTY(&and);
+ and = vma_flags_and_mask(&flags2, flags3);
+ ASSERT_FLAGS_EMPTY(&and);
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT);
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT);
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+
+#if NUM_VMA_FLAG_BITS > 64
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ 64);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT, 64);
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ 64, 65);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT, 64,
+ 65);
+#endif
+
+ /* And against some missing values. */
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ VMA_IO_BIT);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ VMA_IO_BIT, VMA_RAND_READ_BIT);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+
+#if NUM_VMA_FLAG_BITS > 64
+ and = vma_flags_and(&flags1, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT,
+ VMA_IO_BIT, VMA_RAND_READ_BIT, 69);
+ ASSERT_FLAGS_SAME(&and, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
+#endif
+
+ return true;
+}
+
+/* Ensure append_vma_flags() acts as expected. */
+static bool test_append_vma_flags(void)
+{
+ vma_flags_t flags = append_vma_flags(VMA_REMAP_FLAGS, VMA_READ_BIT,
+ VMA_WRITE_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+
+ ASSERT_FLAGS_SAME(&flags, VMA_IO_BIT, VMA_PFNMAP_BIT,
+ VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT, VMA_READ_BIT,
+ VMA_WRITE_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+
+ flags = append_vma_flags(EMPTY_VMA_FLAGS, VMA_READ_BIT, VMA_WRITE_BIT);
+ ASSERT_FLAGS_SAME(&flags, VMA_READ_BIT, VMA_WRITE_BIT);
+
+ return true;
+}
+
+/* Assert that vma_flags_count() behaves as expected. */
+static bool test_vma_flags_count(void)
+{
+ vma_flags_t flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_EXEC_BIT
+#if NUM_VMA_FLAG_BITS > 64
+ , 64, 65
+#endif
+ );
+
+#if NUM_VMA_FLAG_BITS > 64
+ ASSERT_EQ(vma_flags_count(&flags), 5);
+ vma_flags_clear(&flags, 64);
+ ASSERT_EQ(vma_flags_count(&flags), 4);
+ vma_flags_clear(&flags, 65);
+#endif
+ ASSERT_EQ(vma_flags_count(&flags), 3);
+ vma_flags_clear(&flags, VMA_EXEC_BIT);
+ ASSERT_EQ(vma_flags_count(&flags), 2);
+ vma_flags_clear(&flags, VMA_WRITE_BIT);
+ ASSERT_EQ(vma_flags_count(&flags), 1);
+ vma_flags_clear(&flags, VMA_READ_BIT);
+ ASSERT_EQ(vma_flags_count(&flags), 0);
+
+ return true;
+}
+
static void run_vma_tests(int *num_tests, int *num_fail)
{
TEST(copy_vma);
TEST(vma_flags_unchanged);
TEST(vma_flags_cleared);
+#if NUM_VMA_FLAG_BITS > 64
TEST(vma_flags_word);
+#endif
TEST(vma_flags_test);
+ TEST(vma_flags_test_any);
TEST(vma_flags_clear);
+ TEST(vma_flags_empty);
+ TEST(vma_flags_diff);
+ TEST(vma_flags_and);
+ TEST(append_vma_flags);
+ TEST(vma_flags_count);
}
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
index 0e1121e2ef23..e12ab2c80f95 100644
--- a/tools/testing/vma/vma_internal.h
+++ b/tools/testing/vma/vma_internal.h
@@ -51,6 +51,12 @@ typedef unsigned long pgprotval_t;
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
typedef __bitwise unsigned int vm_fault_t;
+#define VM_WARN_ON(_expr) (WARN_ON(_expr))
+#define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
+#define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr))
+#define VM_BUG_ON(_expr) (BUG_ON(_expr))
+#define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
+
#include "include/stubs.h"
#include "include/dup.h"
#include "include/custom.h"