From 43347d56c8d9dd732cee2f8efd384ad21dd1f6c4 Mon Sep 17 00:00:00 2001 From: Miroslav Benes Date: Wed, 15 Nov 2017 14:50:13 +0100 Subject: livepatch: send a fake signal to all blocking tasks Live patching consistency model is of LEAVE_PATCHED_SET and SWITCH_THREAD. This means that all tasks in the system have to be marked one by one as safe to call a new patched function. Safe means when a task is not (sleeping) in a set of patched functions. That is, no patched function is on the task's stack. Another clearly safe place is the boundary between kernel and userspace. The patching waits for all tasks to get outside of the patched set or to cross the boundary. The transition is completed afterwards. The problem is that a task can block the transition for quite a long time, if not forever. It could sleep in a set of patched functions, for example. Luckily we can force the task to leave the set by sending it a fake signal, that is a signal with no data in signal pending structures (no handler, no sign of proper signal delivered). Suspend/freezer use this to freeze the tasks as well. The task gets TIF_SIGPENDING set and is woken up (if it has been sleeping in the kernel before) or kicked by rescheduling IPI (if it was running on other CPU). This causes the task to go to kernel/userspace boundary where the signal would be handled and the task would be marked as safe in terms of live patching. There are tasks which are not affected by this technique though. The fake signal is not sent to kthreads. They should be handled differently. They can be woken up so they leave the patched set and their TIF_PATCH_PENDING can be cleared thanks to stack checking. For the sake of completeness, if the task is in TASK_RUNNING state but not currently running on some CPU it doesn't get the IPI, but it would eventually handle the signal anyway. Second, if the task runs in the kernel (in TASK_RUNNING state) it gets the IPI, but the signal is not handled on return from the interrupt. It would be handled on return to the userspace in the future when the fake signal is sent again. Stack checking deals with these cases in a better way. If the task was sleeping in a syscall it would be woken by our fake signal, it would check if TIF_SIGPENDING is set (by calling signal_pending() predicate) and return ERESTART* or EINTR. Syscalls with ERESTART* return values are restarted in case of the fake signal (see do_signal()). EINTR is propagated back to the userspace program. This could disturb the program, but... * each process dealing with signals should react accordingly to EINTR return values. * syscalls returning EINTR happen to be quite common situation in the system even if no fake signal is sent. * freezer sends the fake signal and does not deal with EINTR anyhow. Thus EINTR values are returned when the system is resumed. The very safe marking is done in architectures' "entry" on syscall and interrupt/exception exit paths, and in a stack checking functions of livepatch. TIF_PATCH_PENDING is cleared and the next recalc_sigpending() drops TIF_SIGPENDING. In connection with this, also call klp_update_patch_state() before do_signal(), so that recalc_sigpending() in dequeue_signal() can clear TIF_PATCH_PENDING immediately and thus prevent a double call of do_signal(). Note that the fake signal is not sent to stopped/traced tasks. Such task prevents the patching to finish till it continues again (is not traced anymore). Last, sending the fake signal is not automatic. It is done only when admin requests it by writing 1 to signal sysfs attribute in livepatch sysfs directory. Signed-off-by: Miroslav Benes Cc: Oleg Nesterov Cc: Michael Ellerman Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Andy Lutomirski Cc: linuxppc-dev@lists.ozlabs.org Cc: x86@kernel.org Acked-by: Michael Ellerman (powerpc) Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 30 ++++++++++++++++++++++++++++++ kernel/livepatch/transition.c | 41 +++++++++++++++++++++++++++++++++++++++++ kernel/livepatch/transition.h | 1 + 3 files changed, 72 insertions(+) (limited to 'kernel/livepatch') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index de9e45dca70f..88766bd91803 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -454,6 +454,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch); * /sys/kernel/livepatch/ * /sys/kernel/livepatch//enabled * /sys/kernel/livepatch//transition + * /sys/kernel/livepatch//signal * /sys/kernel/livepatch// * /sys/kernel/livepatch/// */ @@ -528,11 +529,40 @@ static ssize_t transition_show(struct kobject *kobj, patch == klp_transition_patch); } +static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct klp_patch *patch; + int ret; + bool val; + + patch = container_of(kobj, struct klp_patch, kobj); + + /* + * klp_mutex lock is not grabbed here intentionally. It is not really + * needed. The race window is harmless and grabbing the lock would only + * hold the action back. + */ + if (patch != klp_transition_patch) + return -EINVAL; + + ret = kstrtobool(buf, &val); + if (ret) + return ret; + + if (val) + klp_send_signals(); + + return count; +} + static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); +static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal); static struct attribute *klp_patch_attrs[] = { &enabled_kobj_attr.attr, &transition_kobj_attr.attr, + &signal_kobj_attr.attr, NULL }; diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 56add6327736..edcfcb8ebb2d 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -608,3 +608,44 @@ void klp_copy_process(struct task_struct *child) /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ } + +/* + * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. + * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this + * action currently. + */ +void klp_send_signals(void) +{ + struct task_struct *g, *task; + + pr_notice("signaling remaining tasks\n"); + + read_lock(&tasklist_lock); + for_each_process_thread(g, task) { + if (!klp_patch_pending(task)) + continue; + + /* + * There is a small race here. We could see TIF_PATCH_PENDING + * set and decide to wake up a kthread or send a fake signal. + * Meanwhile the task could migrate itself and the action + * would be meaningless. It is not serious though. + */ + if (task->flags & PF_KTHREAD) { + /* + * Wake up a kthread which sleeps interruptedly and + * still has not been migrated. + */ + wake_up_state(task, TASK_INTERRUPTIBLE); + } else { + /* + * Send fake signal to all non-kthread tasks which are + * still not migrated. + */ + spin_lock_irq(&task->sighand->siglock); + signal_wake_up(task, 0); + spin_unlock_irq(&task->sighand->siglock); + } + } + read_unlock(&tasklist_lock); +} diff --git a/kernel/livepatch/transition.h b/kernel/livepatch/transition.h index 0f6e27c481f9..40522795a5f6 100644 --- a/kernel/livepatch/transition.h +++ b/kernel/livepatch/transition.h @@ -11,5 +11,6 @@ void klp_cancel_transition(void); void klp_start_transition(void); void klp_try_complete_transition(void); void klp_reverse_transition(void); +void klp_send_signals(void); #endif /* _LIVEPATCH_TRANSITION_H */ -- cgit v1.2.3 From c99a2be790b07752d8cc694434d3450afd4c5a00 Mon Sep 17 00:00:00 2001 From: Miroslav Benes Date: Wed, 22 Nov 2017 11:29:21 +0100 Subject: livepatch: force transition to finish If a task sleeps in a set of patched functions uninterruptedly, it could block the whole transition indefinitely. Thus it may be useful to clear its TIF_PATCH_PENDING to allow the process to finish. Admin can do that now by writing to force sysfs attribute in livepatch sysfs directory. TIF_PATCH_PENDING is then cleared for all tasks and the transition can finish successfully. Important note! Administrator should not use this feature without a clearance from a patch distributor. It must be checked that by doing so the consistency model guarantees are not violated. Removal (rmmod) of patch modules is permanently disabled when the feature is used. It cannot be guaranteed there is no task sleeping in such module. Signed-off-by: Miroslav Benes Acked-by: Josh Poimboeuf Reviewed-by: Petr Mladek Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 30 ++++++++++++++++++++++++++++++ kernel/livepatch/transition.c | 36 ++++++++++++++++++++++++++++++++++-- kernel/livepatch/transition.h | 1 + 3 files changed, 65 insertions(+), 2 deletions(-) (limited to 'kernel/livepatch') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 88766bd91803..1c3c9b27c916 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -455,6 +455,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch); * /sys/kernel/livepatch//enabled * /sys/kernel/livepatch//transition * /sys/kernel/livepatch//signal + * /sys/kernel/livepatch//force * /sys/kernel/livepatch// * /sys/kernel/livepatch/// */ @@ -556,13 +557,42 @@ static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr, return count; } +static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct klp_patch *patch; + int ret; + bool val; + + patch = container_of(kobj, struct klp_patch, kobj); + + /* + * klp_mutex lock is not grabbed here intentionally. It is not really + * needed. The race window is harmless and grabbing the lock would only + * hold the action back. + */ + if (patch != klp_transition_patch) + return -EINVAL; + + ret = kstrtobool(buf, &val); + if (ret) + return ret; + + if (val) + klp_force_transition(); + + return count; +} + static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal); +static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); static struct attribute *klp_patch_attrs[] = { &enabled_kobj_attr.attr, &transition_kobj_attr.attr, &signal_kobj_attr.attr, + &force_kobj_attr.attr, NULL }; diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index edcfcb8ebb2d..be5bfa533ee8 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -33,6 +33,8 @@ struct klp_patch *klp_transition_patch; static int klp_target_state = KLP_UNDEFINED; +static bool klp_forced = false; + /* * This work can be performed periodically to finish patching or unpatching any * "straggler" tasks which failed to transition in the first attempt. @@ -146,9 +148,12 @@ done: /* * See complementary comment in __klp_enable_patch() for why we * keep the module reference for immediate patches. + * + * klp_forced or immediate_func set implies unbounded increase of + * module's ref count if the module is disabled/enabled in a loop. */ - if (!klp_transition_patch->immediate && !immediate_func && - klp_target_state == KLP_UNPATCHED) { + if (!klp_forced && !klp_transition_patch->immediate && + !immediate_func && klp_target_state == KLP_UNPATCHED) { module_put(klp_transition_patch->mod); } @@ -649,3 +654,30 @@ void klp_send_signals(void) } read_unlock(&tasklist_lock); } + +/* + * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an + * existing transition to finish. + * + * NOTE: klp_update_patch_state(task) requires the task to be inactive or + * 'current'. This is not the case here and the consistency model could be + * broken. Administrator, who is the only one to execute the + * klp_force_transitions(), has to be aware of this. + */ +void klp_force_transition(void) +{ + struct task_struct *g, *task; + unsigned int cpu; + + pr_warn("forcing remaining tasks to the patched state\n"); + + read_lock(&tasklist_lock); + for_each_process_thread(g, task) + klp_update_patch_state(task); + read_unlock(&tasklist_lock); + + for_each_possible_cpu(cpu) + klp_update_patch_state(idle_task(cpu)); + + klp_forced = true; +} diff --git a/kernel/livepatch/transition.h b/kernel/livepatch/transition.h index 40522795a5f6..f9d0bc016067 100644 --- a/kernel/livepatch/transition.h +++ b/kernel/livepatch/transition.h @@ -12,5 +12,6 @@ void klp_start_transition(void); void klp_try_complete_transition(void); void klp_reverse_transition(void); void klp_send_signals(void); +void klp_force_transition(void); #endif /* _LIVEPATCH_TRANSITION_H */ -- cgit v1.2.3 From d0807da78e11d46f18399cbf8c4028c731346766 Mon Sep 17 00:00:00 2001 From: Miroslav Benes Date: Wed, 10 Jan 2018 11:01:28 +0100 Subject: livepatch: Remove immediate feature Immediate flag has been used to disable per-task consistency and patch all tasks immediately. It could be useful if the patch doesn't change any function or data semantics. However, it causes problems on its own. The consistency problem is currently broken with respect to immediate patches. func a patches 1i 2i 3 When the patch 3 is applied, only 2i function is checked (by stack checking facility). There might be a task sleeping in 1i though. Such task is migrated to 3, because we do not check 1i in klp_check_stack_func() at all. Coming atomic replace feature would be easier to implement and more reliable without immediate. Thus, remove immediate feature completely and save us from the problems. Note that force feature has the similar problem. However it is considered as a last resort. If used, administrator should not apply any new live patches and should plan for reboot into an updated kernel. The architectures would now need to provide HAVE_RELIABLE_STACKTRACE to fully support livepatch. Signed-off-by: Miroslav Benes Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 12 +---------- kernel/livepatch/transition.c | 49 +++++-------------------------------------- 2 files changed, 6 insertions(+), 55 deletions(-) (limited to 'kernel/livepatch') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 1c3c9b27c916..41be6061b92f 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -366,11 +366,6 @@ static int __klp_enable_patch(struct klp_patch *patch) /* * A reference is taken on the patch module to prevent it from being * unloaded. - * - * Note: For immediate (no consistency model) patches we don't allow - * patch modules to unload since there is no safe/sane method to - * determine if a thread is still running in the patched code contained - * in the patch module once the ftrace registration is successful. */ if (!try_module_get(patch->mod)) return -ENODEV; @@ -890,12 +885,7 @@ int klp_register_patch(struct klp_patch *patch) if (!klp_initialized()) return -ENODEV; - /* - * Architectures without reliable stack traces have to set - * patch->immediate because there's currently no way to patch kthreads - * with the consistency model. - */ - if (!klp_have_reliable_stack() && !patch->immediate) { + if (!klp_have_reliable_stack()) { pr_err("This architecture doesn't have support for the livepatch consistency model.\n"); return -ENOSYS; } diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index be5bfa533ee8..7c6631e693bc 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -82,7 +82,6 @@ static void klp_complete_transition(void) struct klp_func *func; struct task_struct *g, *task; unsigned int cpu; - bool immediate_func = false; pr_debug("'%s': completing %s transition\n", klp_transition_patch->mod->name, @@ -104,16 +103,9 @@ static void klp_complete_transition(void) klp_synchronize_transition(); } - if (klp_transition_patch->immediate) - goto done; - - klp_for_each_object(klp_transition_patch, obj) { - klp_for_each_func(obj, func) { + klp_for_each_object(klp_transition_patch, obj) + klp_for_each_func(obj, func) func->transition = false; - if (func->immediate) - immediate_func = true; - } - } /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ if (klp_target_state == KLP_PATCHED) @@ -132,7 +124,6 @@ static void klp_complete_transition(void) task->patch_state = KLP_UNDEFINED; } -done: klp_for_each_object(klp_transition_patch, obj) { if (!klp_is_object_loaded(obj)) continue; @@ -146,16 +137,11 @@ done: klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); /* - * See complementary comment in __klp_enable_patch() for why we - * keep the module reference for immediate patches. - * - * klp_forced or immediate_func set implies unbounded increase of - * module's ref count if the module is disabled/enabled in a loop. + * klp_forced set implies unbounded increase of module's ref count if + * the module is disabled/enabled in a loop. */ - if (!klp_forced && !klp_transition_patch->immediate && - !immediate_func && klp_target_state == KLP_UNPATCHED) { + if (!klp_forced && klp_target_state == KLP_UNPATCHED) module_put(klp_transition_patch->mod); - } klp_target_state = KLP_UNDEFINED; klp_transition_patch = NULL; @@ -223,9 +209,6 @@ static int klp_check_stack_func(struct klp_func *func, struct klp_ops *ops; int i; - if (func->immediate) - return 0; - for (i = 0; i < trace->nr_entries; i++) { address = trace->entries[i]; @@ -387,13 +370,6 @@ void klp_try_complete_transition(void) WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); - /* - * If the patch can be applied or reverted immediately, skip the - * per-task transitions. - */ - if (klp_transition_patch->immediate) - goto success; - /* * Try to switch the tasks to the target patch state by walking their * stacks and looking for any to-be-patched or to-be-unpatched @@ -437,7 +413,6 @@ void klp_try_complete_transition(void) return; } -success: /* we're done, now cleanup the data structures */ klp_complete_transition(); } @@ -457,13 +432,6 @@ void klp_start_transition(void) klp_transition_patch->mod->name, klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); - /* - * If the patch can be applied or reverted immediately, skip the - * per-task transitions. - */ - if (klp_transition_patch->immediate) - return; - /* * Mark all normal tasks as needing a patch state update. They'll * switch either in klp_try_complete_transition() or as they exit the @@ -513,13 +481,6 @@ void klp_init_transition(struct klp_patch *patch, int state) pr_debug("'%s': initializing %s transition\n", patch->mod->name, klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); - /* - * If the patch can be applied or reverted immediately, skip the - * per-task transitions. - */ - if (patch->immediate) - return; - /* * Initialize all tasks to the initial patch state to prepare them for * switching to the target state. -- cgit v1.2.3 From 8869016d3a58cbe7c31c70f4f008a92122b271c7 Mon Sep 17 00:00:00 2001 From: Miroslav Benes Date: Thu, 21 Dec 2017 14:40:43 +0100 Subject: livepatch: add locking to force and signal functions klp_send_signals() and klp_force_transition() do not acquire klp_mutex, because it seemed to be superfluous. A potential race in klp_send_signals() was harmless and there was nothing in klp_force_transition() which needed to be synchronized. That changed with the addition of klp_forced variable during the review process. There is a small window now, when klp_complete_transition() does not see klp_forced set to true while all tasks have been already transitioned to the target state. module_put() is called and the module can be removed. Acquire klp_mutex in sysfs callback to prevent it. Do the same for the signal sending just to be sure. There is no real downside to that. Fixes: c99a2be790b07 ("livepatch: force transition to finish") Fixes: 43347d56c8d9d ("livepatch: send a fake signal to all blocking tasks") Reported-by: Jason Baron Signed-off-by: Miroslav Benes Reviewed-by: Petr Mladek Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 52 ++++++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 24 deletions(-) (limited to 'kernel/livepatch') diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 1c3c9b27c916..8fd8e8f126da 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -537,22 +537,24 @@ static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr, int ret; bool val; - patch = container_of(kobj, struct klp_patch, kobj); - - /* - * klp_mutex lock is not grabbed here intentionally. It is not really - * needed. The race window is harmless and grabbing the lock would only - * hold the action back. - */ - if (patch != klp_transition_patch) - return -EINVAL; - ret = kstrtobool(buf, &val); if (ret) return ret; - if (val) - klp_send_signals(); + if (!val) + return count; + + mutex_lock(&klp_mutex); + + patch = container_of(kobj, struct klp_patch, kobj); + if (patch != klp_transition_patch) { + mutex_unlock(&klp_mutex); + return -EINVAL; + } + + klp_send_signals(); + + mutex_unlock(&klp_mutex); return count; } @@ -564,22 +566,24 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, int ret; bool val; - patch = container_of(kobj, struct klp_patch, kobj); - - /* - * klp_mutex lock is not grabbed here intentionally. It is not really - * needed. The race window is harmless and grabbing the lock would only - * hold the action back. - */ - if (patch != klp_transition_patch) - return -EINVAL; - ret = kstrtobool(buf, &val); if (ret) return ret; - if (val) - klp_force_transition(); + if (!val) + return count; + + mutex_lock(&klp_mutex); + + patch = container_of(kobj, struct klp_patch, kobj); + if (patch != klp_transition_patch) { + mutex_unlock(&klp_mutex); + return -EINVAL; + } + + klp_force_transition(); + + mutex_unlock(&klp_mutex); return count; } -- cgit v1.2.3