summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-livepatch14
-rw-r--r--Documentation/livepatch/livepatch.txt18
-rw-r--r--kernel/livepatch/core.c30
-rw-r--r--kernel/livepatch/transition.c36
-rw-r--r--kernel/livepatch/transition.h1
5 files changed, 95 insertions, 4 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch
index 3bb9d5bc1ce3..dac7e1e62a8b 100644
--- a/Documentation/ABI/testing/sysfs-kernel-livepatch
+++ b/Documentation/ABI/testing/sysfs-kernel-livepatch
@@ -45,6 +45,20 @@ Description:
signal pending structures). Tasks are interrupted or woken up,
and forced to change their patched state.
+What: /sys/kernel/livepatch/<patch>/force
+Date: Nov 2017
+KernelVersion: 4.15.0
+Contact: live-patching@vger.kernel.org
+Description:
+ A writable attribute that allows administrator to affect the
+ course of an existing transition. Writing 1 clears
+ TIF_PATCH_PENDING flag of all tasks and thus forces the tasks to
+ the patched or unpatched state. Administrator should not
+ use this feature without a clearance from a patch
+ distributor. Removal (rmmod) of patch modules is permanently
+ disabled when the feature is used. See
+ Documentation/livepatch/livepatch.txt for more information.
+
What: /sys/kernel/livepatch/<patch>/<object>
Date: Nov 2014
KernelVersion: 3.19.0
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt
index 9bcdef277a36..896ba8941702 100644
--- a/Documentation/livepatch/livepatch.txt
+++ b/Documentation/livepatch/livepatch.txt
@@ -183,6 +183,20 @@ tasks. No proper signal is actually delivered (there is no data in signal
pending structures). Tasks are interrupted or woken up, and forced to change
their patched state.
+Administrator can also affect a transition through
+/sys/kernel/livepatch/<patch>/force attribute. Writing 1 there clears
+TIF_PATCH_PENDING flag of all tasks and thus forces the tasks to the patched
+state. Important note! The force attribute is intended for cases when the
+transition gets stuck for a long time because of a blocking task. Administrator
+is expected to collect all necessary data (namely stack traces of such blocking
+tasks) and request a clearance from a patch distributor to force the transition.
+Unauthorized usage may cause harm to the system. It depends on the nature of the
+patch, which functions are (un)patched, and which functions the blocking tasks
+are sleeping in (/proc/<pid>/stack may help here). Removal (rmmod) of patch
+modules is permanently disabled when the force feature is used. It cannot be
+guaranteed there is no task sleeping in such module. It implies unbounded
+reference count if a patch module is disabled and enabled in a loop.
+
3.1 Adding consistency model support to new architectures
---------------------------------------------------------
@@ -439,8 +453,8 @@ Information about the registered patches can be found under
/sys/kernel/livepatch. The patches could be enabled and disabled
by writing there.
-/sys/kernel/livepatch/<patch>/signal attribute allows administrator to affect a
-patching operation.
+/sys/kernel/livepatch/<patch>/signal and /sys/kernel/livepatch/<patch>/force
+attributes allow administrator to affect a patching operation.
See Documentation/ABI/testing/sysfs-kernel-livepatch for more details.
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 88766bd91803..1c3c9b27c916 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -455,6 +455,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch);
* /sys/kernel/livepatch/<patch>/enabled
* /sys/kernel/livepatch/<patch>/transition
* /sys/kernel/livepatch/<patch>/signal
+ * /sys/kernel/livepatch/<patch>/force
* /sys/kernel/livepatch/<patch>/<object>
* /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
*/
@@ -556,13 +557,42 @@ static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr,
return count;
}
+static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct klp_patch *patch;
+ int ret;
+ bool val;
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+
+ /*
+ * klp_mutex lock is not grabbed here intentionally. It is not really
+ * needed. The race window is harmless and grabbing the lock would only
+ * hold the action back.
+ */
+ if (patch != klp_transition_patch)
+ return -EINVAL;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ if (val)
+ klp_force_transition();
+
+ return count;
+}
+
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
+static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr,
&transition_kobj_attr.attr,
&signal_kobj_attr.attr,
+ &force_kobj_attr.attr,
NULL
};
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index edcfcb8ebb2d..be5bfa533ee8 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -33,6 +33,8 @@ struct klp_patch *klp_transition_patch;
static int klp_target_state = KLP_UNDEFINED;
+static bool klp_forced = false;
+
/*
* This work can be performed periodically to finish patching or unpatching any
* "straggler" tasks which failed to transition in the first attempt.
@@ -146,9 +148,12 @@ done:
/*
* See complementary comment in __klp_enable_patch() for why we
* keep the module reference for immediate patches.
+ *
+ * klp_forced or immediate_func set implies unbounded increase of
+ * module's ref count if the module is disabled/enabled in a loop.
*/
- if (!klp_transition_patch->immediate && !immediate_func &&
- klp_target_state == KLP_UNPATCHED) {
+ if (!klp_forced && !klp_transition_patch->immediate &&
+ !immediate_func && klp_target_state == KLP_UNPATCHED) {
module_put(klp_transition_patch->mod);
}
@@ -649,3 +654,30 @@ void klp_send_signals(void)
}
read_unlock(&tasklist_lock);
}
+
+/*
+ * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
+ * existing transition to finish.
+ *
+ * NOTE: klp_update_patch_state(task) requires the task to be inactive or
+ * 'current'. This is not the case here and the consistency model could be
+ * broken. Administrator, who is the only one to execute the
+ * klp_force_transitions(), has to be aware of this.
+ */
+void klp_force_transition(void)
+{
+ struct task_struct *g, *task;
+ unsigned int cpu;
+
+ pr_warn("forcing remaining tasks to the patched state\n");
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, task)
+ klp_update_patch_state(task);
+ read_unlock(&tasklist_lock);
+
+ for_each_possible_cpu(cpu)
+ klp_update_patch_state(idle_task(cpu));
+
+ klp_forced = true;
+}
diff --git a/kernel/livepatch/transition.h b/kernel/livepatch/transition.h
index 40522795a5f6..f9d0bc016067 100644
--- a/kernel/livepatch/transition.h
+++ b/kernel/livepatch/transition.h
@@ -12,5 +12,6 @@ void klp_start_transition(void);
void klp_try_complete_transition(void);
void klp_reverse_transition(void);
void klp_send_signals(void);
+void klp_force_transition(void);
#endif /* _LIVEPATCH_TRANSITION_H */