]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/livepatch/transition.c
2 * transition.c - Kernel Live Patching transition functions
4 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/cpu.h>
23 #include <linux/stacktrace.h>
26 #include "transition.h"
27 #include "../sched/sched.h"
29 #define MAX_STACK_ENTRIES 100
30 #define STACK_ERR_BUF_SIZE 128
32 struct klp_patch
*klp_transition_patch
;
34 static int klp_target_state
= KLP_UNDEFINED
;
37 * This work can be performed periodically to finish patching or unpatching any
38 * "straggler" tasks which failed to transition in the first attempt.
40 static void klp_transition_work_fn(struct work_struct
*work
)
42 mutex_lock(&klp_mutex
);
44 if (klp_transition_patch
)
45 klp_try_complete_transition();
47 mutex_unlock(&klp_mutex
);
49 static DECLARE_DELAYED_WORK(klp_transition_work
, klp_transition_work_fn
);
52 * The transition to the target patch state is complete. Clean up the data
55 static void klp_complete_transition(void)
57 struct klp_object
*obj
;
58 struct klp_func
*func
;
59 struct task_struct
*g
, *task
;
61 bool immediate_func
= false;
63 if (klp_target_state
== KLP_UNPATCHED
) {
65 * All tasks have transitioned to KLP_UNPATCHED so we can now
66 * remove the new functions from the func_stack.
68 klp_unpatch_objects(klp_transition_patch
);
71 * Make sure klp_ftrace_handler() can no longer see functions
72 * from this patch on the ops->func_stack. Otherwise, after
73 * func->transition gets cleared, the handler may choose a
79 if (klp_transition_patch
->immediate
)
82 klp_for_each_object(klp_transition_patch
, obj
) {
83 klp_for_each_func(obj
, func
) {
84 func
->transition
= false;
86 immediate_func
= true;
90 if (klp_target_state
== KLP_UNPATCHED
&& !immediate_func
)
91 module_put(klp_transition_patch
->mod
);
93 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
94 if (klp_target_state
== KLP_PATCHED
)
97 read_lock(&tasklist_lock
);
98 for_each_process_thread(g
, task
) {
99 WARN_ON_ONCE(test_tsk_thread_flag(task
, TIF_PATCH_PENDING
));
100 task
->patch_state
= KLP_UNDEFINED
;
102 read_unlock(&tasklist_lock
);
104 for_each_possible_cpu(cpu
) {
105 task
= idle_task(cpu
);
106 WARN_ON_ONCE(test_tsk_thread_flag(task
, TIF_PATCH_PENDING
));
107 task
->patch_state
= KLP_UNDEFINED
;
111 klp_target_state
= KLP_UNDEFINED
;
112 klp_transition_patch
= NULL
;
116 * This is called in the error path, to cancel a transition before it has
117 * started, i.e. klp_init_transition() has been called but
118 * klp_start_transition() hasn't. If the transition *has* been started,
119 * klp_reverse_transition() should be used instead.
121 void klp_cancel_transition(void)
123 if (WARN_ON_ONCE(klp_target_state
!= KLP_PATCHED
))
126 klp_target_state
= KLP_UNPATCHED
;
127 klp_complete_transition();
131 * Switch the patched state of the task to the set of functions in the target
134 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
135 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
137 void klp_update_patch_state(struct task_struct
*task
)
142 * This test_and_clear_tsk_thread_flag() call also serves as a read
143 * barrier (smp_rmb) for two cases:
145 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
146 * klp_target_state read. The corresponding write barrier is in
147 * klp_init_transition().
149 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
150 * of func->transition, if klp_ftrace_handler() is called later on
151 * the same CPU. See __klp_disable_patch().
153 if (test_and_clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
))
154 task
->patch_state
= READ_ONCE(klp_target_state
);
160 * Determine whether the given stack trace includes any references to a
161 * to-be-patched or to-be-unpatched function.
163 static int klp_check_stack_func(struct klp_func
*func
,
164 struct stack_trace
*trace
)
166 unsigned long func_addr
, func_size
, address
;
173 for (i
= 0; i
< trace
->nr_entries
; i
++) {
174 address
= trace
->entries
[i
];
176 if (klp_target_state
== KLP_UNPATCHED
) {
178 * Check for the to-be-unpatched function
181 func_addr
= (unsigned long)func
->new_func
;
182 func_size
= func
->new_size
;
185 * Check for the to-be-patched function
186 * (the previous func).
188 ops
= klp_find_ops(func
->old_addr
);
190 if (list_is_singular(&ops
->func_stack
)) {
191 /* original function */
192 func_addr
= func
->old_addr
;
193 func_size
= func
->old_size
;
195 /* previously patched function */
196 struct klp_func
*prev
;
198 prev
= list_next_entry(func
, stack_node
);
199 func_addr
= (unsigned long)prev
->new_func
;
200 func_size
= prev
->new_size
;
204 if (address
>= func_addr
&& address
< func_addr
+ func_size
)
212 * Determine whether it's safe to transition the task to the target patch state
213 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
215 static int klp_check_stack(struct task_struct
*task
, char *err_buf
)
217 static unsigned long entries
[MAX_STACK_ENTRIES
];
218 struct stack_trace trace
;
219 struct klp_object
*obj
;
220 struct klp_func
*func
;
224 trace
.nr_entries
= 0;
225 trace
.max_entries
= MAX_STACK_ENTRIES
;
226 trace
.entries
= entries
;
227 ret
= save_stack_trace_tsk_reliable(task
, &trace
);
228 WARN_ON_ONCE(ret
== -ENOSYS
);
230 snprintf(err_buf
, STACK_ERR_BUF_SIZE
,
231 "%s: %s:%d has an unreliable stack\n",
232 __func__
, task
->comm
, task
->pid
);
236 klp_for_each_object(klp_transition_patch
, obj
) {
239 klp_for_each_func(obj
, func
) {
240 ret
= klp_check_stack_func(func
, &trace
);
242 snprintf(err_buf
, STACK_ERR_BUF_SIZE
,
243 "%s: %s:%d is sleeping on function %s\n",
244 __func__
, task
->comm
, task
->pid
,
255 * Try to safely switch a task to the target patch state. If it's currently
256 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
257 * if the stack is unreliable, return false.
259 static bool klp_try_switch_task(struct task_struct
*task
)
262 struct rq_flags flags
;
264 bool success
= false;
265 char err_buf
[STACK_ERR_BUF_SIZE
];
269 /* check if this task has already switched over */
270 if (task
->patch_state
== klp_target_state
)
274 * For arches which don't have reliable stack traces, we have to rely
275 * on other methods (e.g., switching tasks at kernel exit).
277 if (!klp_have_reliable_stack())
281 * Now try to check the stack for any to-be-patched or to-be-unpatched
282 * functions. If all goes well, switch the task to the target patch
285 rq
= task_rq_lock(task
, &flags
);
287 if (task_running(rq
, task
) && task
!= current
) {
288 snprintf(err_buf
, STACK_ERR_BUF_SIZE
,
289 "%s: %s:%d is running\n", __func__
, task
->comm
,
294 ret
= klp_check_stack(task
, err_buf
);
300 clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
301 task
->patch_state
= klp_target_state
;
304 task_rq_unlock(rq
, task
, &flags
);
307 * Due to console deadlock issues, pr_debug() can't be used while
308 * holding the task rq lock. Instead we have to use a temporary buffer
309 * and print the debug message after releasing the lock.
311 if (err_buf
[0] != '\0')
312 pr_debug("%s", err_buf
);
319 * Try to switch all remaining tasks to the target patch state by walking the
320 * stacks of sleeping tasks and looking for any to-be-patched or
321 * to-be-unpatched functions. If such functions are found, the task can't be
324 * If any tasks are still stuck in the initial patch state, schedule a retry.
326 void klp_try_complete_transition(void)
329 struct task_struct
*g
, *task
;
330 bool complete
= true;
332 WARN_ON_ONCE(klp_target_state
== KLP_UNDEFINED
);
335 * If the patch can be applied or reverted immediately, skip the
336 * per-task transitions.
338 if (klp_transition_patch
->immediate
)
342 * Try to switch the tasks to the target patch state by walking their
343 * stacks and looking for any to-be-patched or to-be-unpatched
344 * functions. If such functions are found on a stack, or if the stack
345 * is deemed unreliable, the task can't be switched yet.
347 * Usually this will transition most (or all) of the tasks on a system
348 * unless the patch includes changes to a very common function.
350 read_lock(&tasklist_lock
);
351 for_each_process_thread(g
, task
)
352 if (!klp_try_switch_task(task
))
354 read_unlock(&tasklist_lock
);
357 * Ditto for the idle "swapper" tasks.
360 for_each_possible_cpu(cpu
) {
361 task
= idle_task(cpu
);
362 if (cpu_online(cpu
)) {
363 if (!klp_try_switch_task(task
))
365 } else if (task
->patch_state
!= klp_target_state
) {
366 /* offline idle tasks can be switched immediately */
367 clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
368 task
->patch_state
= klp_target_state
;
375 * Some tasks weren't able to be switched over. Try again
376 * later and/or wait for other methods like kernel exit
379 schedule_delayed_work(&klp_transition_work
,
380 round_jiffies_relative(HZ
));
385 pr_notice("'%s': %s complete\n", klp_transition_patch
->mod
->name
,
386 klp_target_state
== KLP_PATCHED
? "patching" : "unpatching");
388 /* we're done, now cleanup the data structures */
389 klp_complete_transition();
393 * Start the transition to the specified target patch state so tasks can begin
396 void klp_start_transition(void)
398 struct task_struct
*g
, *task
;
401 WARN_ON_ONCE(klp_target_state
== KLP_UNDEFINED
);
403 pr_notice("'%s': %s...\n", klp_transition_patch
->mod
->name
,
404 klp_target_state
== KLP_PATCHED
? "patching" : "unpatching");
407 * If the patch can be applied or reverted immediately, skip the
408 * per-task transitions.
410 if (klp_transition_patch
->immediate
)
414 * Mark all normal tasks as needing a patch state update. They'll
415 * switch either in klp_try_complete_transition() or as they exit the
418 read_lock(&tasklist_lock
);
419 for_each_process_thread(g
, task
)
420 if (task
->patch_state
!= klp_target_state
)
421 set_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
422 read_unlock(&tasklist_lock
);
425 * Mark all idle tasks as needing a patch state update. They'll switch
426 * either in klp_try_complete_transition() or at the idle loop switch
429 for_each_possible_cpu(cpu
) {
430 task
= idle_task(cpu
);
431 if (task
->patch_state
!= klp_target_state
)
432 set_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
437 * Initialize the global target patch state and all tasks to the initial patch
438 * state, and initialize all function transition states to true in preparation
439 * for patching or unpatching.
441 void klp_init_transition(struct klp_patch
*patch
, int state
)
443 struct task_struct
*g
, *task
;
445 struct klp_object
*obj
;
446 struct klp_func
*func
;
447 int initial_state
= !state
;
449 WARN_ON_ONCE(klp_target_state
!= KLP_UNDEFINED
);
451 klp_transition_patch
= patch
;
454 * Set the global target patch state which tasks will switch to. This
455 * has no effect until the TIF_PATCH_PENDING flags get set later.
457 klp_target_state
= state
;
460 * If the patch can be applied or reverted immediately, skip the
461 * per-task transitions.
463 if (patch
->immediate
)
467 * Initialize all tasks to the initial patch state to prepare them for
468 * switching to the target state.
470 read_lock(&tasklist_lock
);
471 for_each_process_thread(g
, task
) {
472 WARN_ON_ONCE(task
->patch_state
!= KLP_UNDEFINED
);
473 task
->patch_state
= initial_state
;
475 read_unlock(&tasklist_lock
);
478 * Ditto for the idle "swapper" tasks.
480 for_each_possible_cpu(cpu
) {
481 task
= idle_task(cpu
);
482 WARN_ON_ONCE(task
->patch_state
!= KLP_UNDEFINED
);
483 task
->patch_state
= initial_state
;
487 * Enforce the order of the task->patch_state initializations and the
488 * func->transition updates to ensure that klp_ftrace_handler() doesn't
489 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
491 * Also enforce the order of the klp_target_state write and future
492 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
493 * set a task->patch_state to KLP_UNDEFINED.
498 * Set the func transition states so klp_ftrace_handler() will know to
499 * switch to the transition logic.
501 * When patching, the funcs aren't yet in the func_stack and will be
502 * made visible to the ftrace handler shortly by the calls to
503 * klp_patch_object().
505 * When unpatching, the funcs are already in the func_stack and so are
506 * already visible to the ftrace handler.
508 klp_for_each_object(patch
, obj
)
509 klp_for_each_func(obj
, func
)
510 func
->transition
= true;
514 * This function can be called in the middle of an existing transition to
515 * reverse the direction of the target patch state. This can be done to
516 * effectively cancel an existing enable or disable operation if there are any
517 * tasks which are stuck in the initial patch state.
519 void klp_reverse_transition(void)
522 struct task_struct
*g
, *task
;
524 klp_transition_patch
->enabled
= !klp_transition_patch
->enabled
;
526 klp_target_state
= !klp_target_state
;
529 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
530 * klp_update_patch_state() running in parallel with
531 * klp_start_transition().
533 read_lock(&tasklist_lock
);
534 for_each_process_thread(g
, task
)
535 clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
536 read_unlock(&tasklist_lock
);
538 for_each_possible_cpu(cpu
)
539 clear_tsk_thread_flag(idle_task(cpu
), TIF_PATCH_PENDING
);
541 /* Let any remaining calls to klp_update_patch_state() complete */
544 klp_start_transition();
547 /* Called from copy_process() during fork */
548 void klp_copy_process(struct task_struct
*child
)
550 child
->patch_state
= current
->patch_state
;
552 /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */