From 63c16c3a7608558a8e5ced96b8b6b06c490fd513 Mon Sep 17 00:00:00 2001 From: Chris Coulson Date: Wed, 23 Jan 2019 19:17:09 +0000 Subject: apparmor: Initial implementation of raw policy blob compression This adds an initial implementation of raw policy blob compression, using deflate. Compression level can be controlled via a new sysctl, "apparmor.rawdata_compression_level", which can be set to a value between 0 (no compression) and 9 (highest compression). Signed-off-by: Chris Coulson Signed-off-by: John Johansen --- security/apparmor/lsm.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) (limited to 'security/apparmor/lsm.c') diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 87500bde5a92..502846789965 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -1266,6 +1267,16 @@ static const struct kernel_param_ops param_ops_aauint = { .get = param_get_aauint }; +static int param_set_aacompressionlevel(const char *val, + const struct kernel_param *kp); +static int param_get_aacompressionlevel(char *buffer, + const struct kernel_param *kp); +#define param_check_aacompressionlevel param_check_int +static const struct kernel_param_ops param_ops_aacompressionlevel = { + .set = param_set_aacompressionlevel, + .get = param_get_aacompressionlevel +}; + static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp); static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp); #define param_check_aalockpolicy param_check_bool @@ -1296,6 +1307,11 @@ bool aa_g_hash_policy = IS_ENABLED(CONFIG_SECURITY_APPARMOR_HASH_DEFAULT); module_param_named(hash_policy, aa_g_hash_policy, aabool, S_IRUSR | S_IWUSR); #endif +/* policy loaddata compression level */ +int aa_g_rawdata_compression_level = Z_DEFAULT_COMPRESSION; +module_param_named(rawdata_compression_level, aa_g_rawdata_compression_level, + aacompressionlevel, 0400); + /* Debug mode */ bool aa_g_debug = IS_ENABLED(CONFIG_SECURITY_APPARMOR_DEBUG_MESSAGES); module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR); @@ -1460,6 +1476,37 @@ static int param_get_aaintbool(char *buffer, const struct kernel_param *kp) return param_get_bool(buffer, &kp_local); } +static int param_set_aacompressionlevel(const char *val, + const struct kernel_param *kp) +{ + int error; + + if (!apparmor_enabled) + return -EINVAL; + if (apparmor_initialized) + return -EPERM; + + error = param_set_int(val, kp); + + aa_g_rawdata_compression_level = clamp(aa_g_rawdata_compression_level, + Z_NO_COMPRESSION, + Z_BEST_COMPRESSION); + pr_info("AppArmor: policy rawdata compression level set to %u\n", + aa_g_rawdata_compression_level); + + return error; +} + +static int param_get_aacompressionlevel(char *buffer, + const struct kernel_param *kp) +{ + if (!apparmor_enabled) + return -EINVAL; + if (apparmor_initialized && !policy_view_capable(NULL)) + return -EPERM; + return param_get_int(buffer, kp); +} + static int param_get_audit(char *buffer, const struct kernel_param *kp) { if (!apparmor_enabled) -- cgit v1.2.3 From bf1d2ee7bc6215dd92427625a4c707227457a5db Mon Sep 17 00:00:00 2001 From: Bharath Vedartham Date: Tue, 23 Apr 2019 22:23:00 +0530 Subject: apparmor: Force type-casting of current->real_cred This patch fixes the sparse warning: warning: cast removes address space '' of expression. Signed-off-by: Bharath Vedartham Signed-off-by: John Johansen --- security/apparmor/lsm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'security/apparmor/lsm.c') diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 502846789965..8f7ffc51ad86 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -1576,7 +1576,7 @@ static int param_set_mode(const char *val, const struct kernel_param *kp) */ static int __init set_init_ctx(void) { - struct cred *cred = (struct cred *)current->real_cred; + struct cred *cred = (__force struct cred *)current->real_cred; set_cred_label(cred, aa_get_label(ns_unconfined(root_ns))); -- cgit v1.2.3 From df323337e507a0009d3db1ea25948d4c7f320d62 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 3 May 2019 16:12:21 +0200 Subject: apparmor: Use a memory pool instead per-CPU caches The get_buffers() macro may provide one or two buffers to the caller. Those buffers are pre-allocated on init for each CPU. By default it allocates 2* 2 * MAX_PATH * POSSIBLE_CPU which equals 64KiB on a system with 4 CPUs or 1MiB with 64 CPUs and so on. Replace the per-CPU buffers with a common memory pool which is shared across all CPUs. The pool grows on demand and never shrinks. The pool starts with two (UP) or four (SMP) elements. By using this pool it is possible to request a buffer and keeping preemption enabled which avoids the hack in profile_transition(). It has been pointed out by Tetsuo Handa that GFP_KERNEL allocations for small amount of memory do not fail. In order not to have an endless retry, __GFP_RETRY_MAYFAIL is passed (so the memory allocation is not repeated until success) and retried once hoping that in the meantime a buffer has been returned to the pool. Since now NULL is possible all allocation paths check the buffer pointer and return -ENOMEM on failure. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: John Johansen --- security/apparmor/lsm.c | 111 ++++++++++++++++++++++++++++++++++++------------ 1 file changed, 84 insertions(+), 27 deletions(-) (limited to 'security/apparmor/lsm.c') diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 8f7ffc51ad86..3e0cfdebee45 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -48,8 +48,13 @@ /* Flag indicating whether initialization completed */ int apparmor_initialized; -DEFINE_PER_CPU(struct aa_buffers, aa_buffers); +union aa_buffer { + struct list_head list; + char buffer[1]; +}; +static LIST_HEAD(aa_global_buffers); +static DEFINE_SPINLOCK(aa_buffers_lock); /* * LSM hook functions @@ -1422,6 +1427,7 @@ static int param_set_aauint(const char *val, const struct kernel_param *kp) return -EPERM; error = param_set_uint(val, kp); + aa_g_path_max = max_t(uint32_t, aa_g_path_max, sizeof(union aa_buffer)); pr_info("AppArmor: buffer size set to %d bytes\n", aa_g_path_max); return error; @@ -1565,6 +1571,48 @@ static int param_set_mode(const char *val, const struct kernel_param *kp) return 0; } +char *aa_get_buffer(void) +{ + union aa_buffer *aa_buf; + bool try_again = true; + +retry: + spin_lock(&aa_buffers_lock); + if (!list_empty(&aa_global_buffers)) { + aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer, + list); + list_del(&aa_buf->list); + spin_unlock(&aa_buffers_lock); + return &aa_buf->buffer[0]; + } + spin_unlock(&aa_buffers_lock); + + aa_buf = kmalloc(aa_g_path_max, GFP_KERNEL | __GFP_RETRY_MAYFAIL | + __GFP_NOWARN); + if (!aa_buf) { + if (try_again) { + try_again = false; + goto retry; + } + pr_warn_once("AppArmor: Failed to allocate a memory buffer.\n"); + return NULL; + } + return &aa_buf->buffer[0]; +} + +void aa_put_buffer(char *buf) +{ + union aa_buffer *aa_buf; + + if (!buf) + return; + aa_buf = container_of(buf, union aa_buffer, buffer[0]); + + spin_lock(&aa_buffers_lock); + list_add(&aa_buf->list, &aa_global_buffers); + spin_unlock(&aa_buffers_lock); +} + /* * AppArmor init functions */ @@ -1585,38 +1633,48 @@ static int __init set_init_ctx(void) static void destroy_buffers(void) { - u32 i, j; + union aa_buffer *aa_buf; - for_each_possible_cpu(i) { - for_each_cpu_buffer(j) { - kfree(per_cpu(aa_buffers, i).buf[j]); - per_cpu(aa_buffers, i).buf[j] = NULL; - } + spin_lock(&aa_buffers_lock); + while (!list_empty(&aa_global_buffers)) { + aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer, + list); + list_del(&aa_buf->list); + spin_unlock(&aa_buffers_lock); + kfree(aa_buf); + spin_lock(&aa_buffers_lock); } + spin_unlock(&aa_buffers_lock); } static int __init alloc_buffers(void) { - u32 i, j; - - for_each_possible_cpu(i) { - for_each_cpu_buffer(j) { - char *buffer; - - if (cpu_to_node(i) > num_online_nodes()) - /* fallback to kmalloc for offline nodes */ - buffer = kmalloc(aa_g_path_max, GFP_KERNEL); - else - buffer = kmalloc_node(aa_g_path_max, GFP_KERNEL, - cpu_to_node(i)); - if (!buffer) { - destroy_buffers(); - return -ENOMEM; - } - per_cpu(aa_buffers, i).buf[j] = buffer; + union aa_buffer *aa_buf; + int i, num; + + /* + * A function may require two buffers at once. Usually the buffers are + * used for a short period of time and are shared. On UP kernel buffers + * two should be enough, with more CPUs it is possible that more + * buffers will be used simultaneously. The preallocated pool may grow. + * This preallocation has also the side-effect that AppArmor will be + * disabled early at boot if aa_g_path_max is extremly high. + */ + if (num_online_cpus() > 1) + num = 4; + else + num = 2; + + for (i = 0; i < num; i++) { + + aa_buf = kmalloc(aa_g_path_max, GFP_KERNEL | + __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + if (!aa_buf) { + destroy_buffers(); + return -ENOMEM; } + aa_put_buffer(&aa_buf->buffer[0]); } - return 0; } @@ -1781,7 +1839,7 @@ static int __init apparmor_init(void) error = alloc_buffers(); if (error) { AA_ERROR("Unable to allocate work buffers\n"); - goto buffers_out; + goto alloc_out; } error = set_init_ctx(); @@ -1806,7 +1864,6 @@ static int __init apparmor_init(void) buffers_out: destroy_buffers(); - alloc_out: aa_destroy_aafs(); aa_teardown_dfa_engine(); -- cgit v1.2.3 From 341c1fda5e17156619fb71acfc7082b2669b4b72 Mon Sep 17 00:00:00 2001 From: John Johansen Date: Sat, 14 Sep 2019 03:34:06 -0700 Subject: apparmor: make it so work buffers can be allocated from atomic context In some situations AppArmor needs to be able to use its work buffers from atomic context. Add the ability to specify when in atomic context and hold a set of work buffers in reserve for atomic context to reduce the chance that a large work buffer allocation will need to be done. Fixes: df323337e507 ("apparmor: Use a memory pool instead per-CPU caches") Signed-off-by: John Johansen --- security/apparmor/lsm.c | 50 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 35 insertions(+), 15 deletions(-) (limited to 'security/apparmor/lsm.c') diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 3e0cfdebee45..c940e8988444 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -53,6 +53,10 @@ union aa_buffer { char buffer[1]; }; +#define RESERVE_COUNT 2 +static int reserve_count = RESERVE_COUNT; +static int buffer_count; + static LIST_HEAD(aa_global_buffers); static DEFINE_SPINLOCK(aa_buffers_lock); @@ -452,7 +456,8 @@ static void apparmor_file_free_security(struct file *file) aa_put_label(rcu_access_pointer(ctx->label)); } -static int common_file_perm(const char *op, struct file *file, u32 mask) +static int common_file_perm(const char *op, struct file *file, u32 mask, + bool in_atomic) { struct aa_label *label; int error = 0; @@ -462,7 +467,7 @@ static int common_file_perm(const char *op, struct file *file, u32 mask) return -EACCES; label = __begin_current_label_crit_section(); - error = aa_file_perm(op, label, file, mask); + error = aa_file_perm(op, label, file, mask, in_atomic); __end_current_label_crit_section(label); return error; @@ -470,12 +475,13 @@ static int common_file_perm(const char *op, struct file *file, u32 mask) static int apparmor_file_receive(struct file *file) { - return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file)); + return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file), + false); } static int apparmor_file_permission(struct file *file, int mask) { - return common_file_perm(OP_FPERM, file, mask); + return common_file_perm(OP_FPERM, file, mask, false); } static int apparmor_file_lock(struct file *file, unsigned int cmd) @@ -485,11 +491,11 @@ static int apparmor_file_lock(struct file *file, unsigned int cmd) if (cmd == F_WRLCK) mask |= MAY_WRITE; - return common_file_perm(OP_FLOCK, file, mask); + return common_file_perm(OP_FLOCK, file, mask, false); } static int common_mmap(const char *op, struct file *file, unsigned long prot, - unsigned long flags) + unsigned long flags, bool in_atomic) { int mask = 0; @@ -507,20 +513,21 @@ static int common_mmap(const char *op, struct file *file, unsigned long prot, if (prot & PROT_EXEC) mask |= AA_EXEC_MMAP; - return common_file_perm(op, file, mask); + return common_file_perm(op, file, mask, in_atomic); } static int apparmor_mmap_file(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) { - return common_mmap(OP_FMMAP, file, prot, flags); + return common_mmap(OP_FMMAP, file, prot, flags, GFP_ATOMIC); } static int apparmor_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { return common_mmap(OP_FMPROT, vma->vm_file, prot, - !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0); + !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0, + false); } static int apparmor_sb_mount(const char *dev_name, const struct path *path, @@ -1571,24 +1578,36 @@ static int param_set_mode(const char *val, const struct kernel_param *kp) return 0; } -char *aa_get_buffer(void) +char *aa_get_buffer(bool in_atomic) { union aa_buffer *aa_buf; bool try_again = true; + gfp_t flags = (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); retry: spin_lock(&aa_buffers_lock); - if (!list_empty(&aa_global_buffers)) { + if (buffer_count > reserve_count || + (in_atomic && !list_empty(&aa_global_buffers))) { aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer, list); list_del(&aa_buf->list); + buffer_count--; spin_unlock(&aa_buffers_lock); return &aa_buf->buffer[0]; } + if (in_atomic) { + /* + * out of reserve buffers and in atomic context so increase + * how many buffers to keep in reserve + */ + reserve_count++; + flags = GFP_ATOMIC; + } spin_unlock(&aa_buffers_lock); - aa_buf = kmalloc(aa_g_path_max, GFP_KERNEL | __GFP_RETRY_MAYFAIL | - __GFP_NOWARN); + if (!in_atomic) + might_sleep(); + aa_buf = kmalloc(aa_g_path_max, flags); if (!aa_buf) { if (try_again) { try_again = false; @@ -1610,6 +1629,7 @@ void aa_put_buffer(char *buf) spin_lock(&aa_buffers_lock); list_add(&aa_buf->list, &aa_global_buffers); + buffer_count++; spin_unlock(&aa_buffers_lock); } @@ -1661,9 +1681,9 @@ static int __init alloc_buffers(void) * disabled early at boot if aa_g_path_max is extremly high. */ if (num_online_cpus() > 1) - num = 4; + num = 4 + RESERVE_COUNT; else - num = 2; + num = 2 + RESERVE_COUNT; for (i = 0; i < num; i++) { -- cgit v1.2.3