summaryrefslogtreecommitdiff
path: root/kernel/livepatch/transition.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-08 08:58:25 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-08 08:58:25 -0800
commitb7af27bf9471f5d54d71dc2e4228d6bc065bdb57 (patch)
treeefe61bdce366720345f4f4fb663c31c9a4f00a08 /kernel/livepatch/transition.c
parent851ca779d110f694b5d078bc4af06d3ad37169e8 (diff)
parentf9d138145686b52b48ccb36557d6842076e2b9dd (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching
Pull livepatching updates from Jiri Kosina: - support for something we call 'atomic replace', and allows for much better handling of cumulative patches (which is something very useful for distros), from Jason Baron with help of Petr Mladek and Joe Lawrence - improvement of handling of tasks blocking finalization, from Miroslav Benes - update of MAINTAINERS file to reflect move towards group maintainership * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching: (22 commits) livepatch/selftests: use "$@" to preserve argument list livepatch: Module coming and going callbacks can proceed with all listed patches livepatch: Proper error handling in the shadow variables selftest livepatch: return -ENOMEM on ptr_id() allocation failure livepatch: Introduce klp_for_each_patch macro livepatch: core: Return EOPNOTSUPP instead of ENOSYS selftests/livepatch: add DYNAMIC_DEBUG config dependency livepatch: samples: non static warnings fix livepatch: update MAINTAINERS livepatch: Remove signal sysfs attribute livepatch: Send a fake signal periodically selftests/livepatch: introduce tests livepatch: Remove ordering (stacking) of the livepatches livepatch: Atomic replace and cumulative patches documentation livepatch: Remove Nop structures when unused livepatch: Add atomic replace livepatch: Use lists to manage patches, objects and functions livepatch: Simplify API by removing registration step livepatch: Don't block the removal of patches loaded after a forced transition livepatch: Consolidate klp_free functions ...
Diffstat (limited to 'kernel/livepatch/transition.c')
-rw-r--r--kernel/livepatch/transition.c124
1 files changed, 72 insertions, 52 deletions
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index 304d5eb8a98c..9c89ae8b337a 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -29,11 +29,13 @@
#define MAX_STACK_ENTRIES 100
#define STACK_ERR_BUF_SIZE 128
+#define SIGNALS_TIMEOUT 15
+
struct klp_patch *klp_transition_patch;
static int klp_target_state = KLP_UNDEFINED;
-static bool klp_forced = false;
+static unsigned int klp_signals_cnt;
/*
* This work can be performed periodically to finish patching or unpatching any
@@ -87,6 +89,11 @@ static void klp_complete_transition(void)
klp_transition_patch->mod->name,
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
+ if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
+ klp_discard_replaced_patches(klp_transition_patch);
+ klp_discard_nops(klp_transition_patch);
+ }
+
if (klp_target_state == KLP_UNPATCHED) {
/*
* All tasks have transitioned to KLP_UNPATCHED so we can now
@@ -136,13 +143,6 @@ static void klp_complete_transition(void)
pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
- /*
- * klp_forced set implies unbounded increase of module's ref count if
- * the module is disabled/enabled in a loop.
- */
- if (!klp_forced && klp_target_state == KLP_UNPATCHED)
- module_put(klp_transition_patch->mod);
-
klp_target_state = KLP_UNDEFINED;
klp_transition_patch = NULL;
}
@@ -224,11 +224,11 @@ static int klp_check_stack_func(struct klp_func *func,
* Check for the to-be-patched function
* (the previous func).
*/
- ops = klp_find_ops(func->old_addr);
+ ops = klp_find_ops(func->old_func);
if (list_is_singular(&ops->func_stack)) {
/* original function */
- func_addr = func->old_addr;
+ func_addr = (unsigned long)func->old_func;
func_size = func->old_size;
} else {
/* previously patched function */
@@ -348,6 +348,47 @@ done:
}
/*
+ * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
+ * Kthreads with TIF_PATCH_PENDING set are woken up.
+ */
+static void klp_send_signals(void)
+{
+ struct task_struct *g, *task;
+
+ if (klp_signals_cnt == SIGNALS_TIMEOUT)
+ pr_notice("signaling remaining tasks\n");
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, task) {
+ if (!klp_patch_pending(task))
+ continue;
+
+ /*
+ * There is a small race here. We could see TIF_PATCH_PENDING
+ * set and decide to wake up a kthread or send a fake signal.
+ * Meanwhile the task could migrate itself and the action
+ * would be meaningless. It is not serious though.
+ */
+ if (task->flags & PF_KTHREAD) {
+ /*
+ * Wake up a kthread which sleeps interruptedly and
+ * still has not been migrated.
+ */
+ wake_up_state(task, TASK_INTERRUPTIBLE);
+ } else {
+ /*
+ * Send fake signal to all non-kthread tasks which are
+ * still not migrated.
+ */
+ spin_lock_irq(&task->sighand->siglock);
+ signal_wake_up(task, 0);
+ spin_unlock_irq(&task->sighand->siglock);
+ }
+ }
+ read_unlock(&tasklist_lock);
+}
+
+/*
* Try to switch all remaining tasks to the target patch state by walking the
* stacks of sleeping tasks and looking for any to-be-patched or
* to-be-unpatched functions. If such functions are found, the task can't be
@@ -359,6 +400,7 @@ void klp_try_complete_transition(void)
{
unsigned int cpu;
struct task_struct *g, *task;
+ struct klp_patch *patch;
bool complete = true;
WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
@@ -396,6 +438,10 @@ void klp_try_complete_transition(void)
put_online_cpus();
if (!complete) {
+ if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
+ klp_send_signals();
+ klp_signals_cnt++;
+
/*
* Some tasks weren't able to be switched over. Try again
* later and/or wait for other methods like kernel exit
@@ -407,7 +453,18 @@ void klp_try_complete_transition(void)
}
/* we're done, now cleanup the data structures */
+ patch = klp_transition_patch;
klp_complete_transition();
+
+ /*
+ * It would make more sense to free the patch in
+ * klp_complete_transition() but it is called also
+ * from klp_cancel_transition().
+ */
+ if (!patch->enabled) {
+ klp_free_patch_start(patch);
+ schedule_work(&patch->free_work);
+ }
}
/*
@@ -446,6 +503,8 @@ void klp_start_transition(void)
if (task->patch_state != klp_target_state)
set_tsk_thread_flag(task, TIF_PATCH_PENDING);
}
+
+ klp_signals_cnt = 0;
}
/*
@@ -569,47 +628,6 @@ void klp_copy_process(struct task_struct *child)
}
/*
- * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
- * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
- * action currently.
- */
-void klp_send_signals(void)
-{
- struct task_struct *g, *task;
-
- pr_notice("signaling remaining tasks\n");
-
- read_lock(&tasklist_lock);
- for_each_process_thread(g, task) {
- if (!klp_patch_pending(task))
- continue;
-
- /*
- * There is a small race here. We could see TIF_PATCH_PENDING
- * set and decide to wake up a kthread or send a fake signal.
- * Meanwhile the task could migrate itself and the action
- * would be meaningless. It is not serious though.
- */
- if (task->flags & PF_KTHREAD) {
- /*
- * Wake up a kthread which sleeps interruptedly and
- * still has not been migrated.
- */
- wake_up_state(task, TASK_INTERRUPTIBLE);
- } else {
- /*
- * Send fake signal to all non-kthread tasks which are
- * still not migrated.
- */
- spin_lock_irq(&task->sighand->siglock);
- signal_wake_up(task, 0);
- spin_unlock_irq(&task->sighand->siglock);
- }
- }
- read_unlock(&tasklist_lock);
-}
-
-/*
* Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
* existing transition to finish.
*
@@ -620,6 +638,7 @@ void klp_send_signals(void)
*/
void klp_force_transition(void)
{
+ struct klp_patch *patch;
struct task_struct *g, *task;
unsigned int cpu;
@@ -633,5 +652,6 @@ void klp_force_transition(void)
for_each_possible_cpu(cpu)
klp_update_patch_state(idle_task(cpu));
- klp_forced = true;
+ klp_for_each_patch(patch)
+ patch->forced = true;
}