]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/livepatch/transition.c
2 * transition.c - Kernel Live Patching transition functions
4 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/cpu.h>
23 #include <linux/stacktrace.h>
26 #include "transition.h"
27 #include "../sched/sched.h"
29 #define MAX_STACK_ENTRIES 100
30 #define STACK_ERR_BUF_SIZE 128
32 struct klp_patch
*klp_transition_patch
;
34 static int klp_target_state
= KLP_UNDEFINED
;
37 * This work can be performed periodically to finish patching or unpatching any
38 * "straggler" tasks which failed to transition in the first attempt.
40 static void klp_transition_work_fn(struct work_struct
*work
)
42 mutex_lock(&klp_mutex
);
44 if (klp_transition_patch
)
45 klp_try_complete_transition();
47 mutex_unlock(&klp_mutex
);
49 static DECLARE_DELAYED_WORK(klp_transition_work
, klp_transition_work_fn
);
52 * This function is just a stub to implement a hard force
53 * of synchronize_sched(). This requires synchronizing
54 * tasks even in userspace and idle.
56 static void klp_sync(struct work_struct
*work
)
61 * We allow to patch also functions where RCU is not watching,
62 * e.g. before user_exit(). We can not rely on the RCU infrastructure
63 * to do the synchronization. Instead hard force the sched synchronization.
65 * This approach allows to use RCU functions for manipulating func_stack
68 static void klp_synchronize_transition(void)
70 schedule_on_each_cpu(klp_sync
);
74 * The transition to the target patch state is complete. Clean up the data
77 static void klp_complete_transition(void)
79 struct klp_object
*obj
;
80 struct klp_func
*func
;
81 struct task_struct
*g
, *task
;
83 bool immediate_func
= false;
85 if (klp_target_state
== KLP_UNPATCHED
) {
87 * All tasks have transitioned to KLP_UNPATCHED so we can now
88 * remove the new functions from the func_stack.
90 klp_unpatch_objects(klp_transition_patch
);
93 * Make sure klp_ftrace_handler() can no longer see functions
94 * from this patch on the ops->func_stack. Otherwise, after
95 * func->transition gets cleared, the handler may choose a
98 klp_synchronize_transition();
101 if (klp_transition_patch
->immediate
)
104 klp_for_each_object(klp_transition_patch
, obj
) {
105 klp_for_each_func(obj
, func
) {
106 func
->transition
= false;
108 immediate_func
= true;
112 if (klp_target_state
== KLP_UNPATCHED
&& !immediate_func
)
113 module_put(klp_transition_patch
->mod
);
115 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
116 if (klp_target_state
== KLP_PATCHED
)
117 klp_synchronize_transition();
119 read_lock(&tasklist_lock
);
120 for_each_process_thread(g
, task
) {
121 WARN_ON_ONCE(test_tsk_thread_flag(task
, TIF_PATCH_PENDING
));
122 task
->patch_state
= KLP_UNDEFINED
;
124 read_unlock(&tasklist_lock
);
126 for_each_possible_cpu(cpu
) {
127 task
= idle_task(cpu
);
128 WARN_ON_ONCE(test_tsk_thread_flag(task
, TIF_PATCH_PENDING
));
129 task
->patch_state
= KLP_UNDEFINED
;
133 klp_target_state
= KLP_UNDEFINED
;
134 klp_transition_patch
= NULL
;
138 * This is called in the error path, to cancel a transition before it has
139 * started, i.e. klp_init_transition() has been called but
140 * klp_start_transition() hasn't. If the transition *has* been started,
141 * klp_reverse_transition() should be used instead.
143 void klp_cancel_transition(void)
145 if (WARN_ON_ONCE(klp_target_state
!= KLP_PATCHED
))
148 klp_target_state
= KLP_UNPATCHED
;
149 klp_complete_transition();
153 * Switch the patched state of the task to the set of functions in the target
156 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
157 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
159 void klp_update_patch_state(struct task_struct
*task
)
162 * A variant of synchronize_sched() is used to allow patching functions
163 * where RCU is not watching, see klp_synchronize_transition().
165 preempt_disable_notrace();
168 * This test_and_clear_tsk_thread_flag() call also serves as a read
169 * barrier (smp_rmb) for two cases:
171 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
172 * klp_target_state read. The corresponding write barrier is in
173 * klp_init_transition().
175 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
176 * of func->transition, if klp_ftrace_handler() is called later on
177 * the same CPU. See __klp_disable_patch().
179 if (test_and_clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
))
180 task
->patch_state
= READ_ONCE(klp_target_state
);
182 preempt_enable_notrace();
186 * Determine whether the given stack trace includes any references to a
187 * to-be-patched or to-be-unpatched function.
189 static int klp_check_stack_func(struct klp_func
*func
,
190 struct stack_trace
*trace
)
192 unsigned long func_addr
, func_size
, address
;
199 for (i
= 0; i
< trace
->nr_entries
; i
++) {
200 address
= trace
->entries
[i
];
202 if (klp_target_state
== KLP_UNPATCHED
) {
204 * Check for the to-be-unpatched function
207 func_addr
= (unsigned long)func
->new_func
;
208 func_size
= func
->new_size
;
211 * Check for the to-be-patched function
212 * (the previous func).
214 ops
= klp_find_ops(func
->old_addr
);
216 if (list_is_singular(&ops
->func_stack
)) {
217 /* original function */
218 func_addr
= func
->old_addr
;
219 func_size
= func
->old_size
;
221 /* previously patched function */
222 struct klp_func
*prev
;
224 prev
= list_next_entry(func
, stack_node
);
225 func_addr
= (unsigned long)prev
->new_func
;
226 func_size
= prev
->new_size
;
230 if (address
>= func_addr
&& address
< func_addr
+ func_size
)
238 * Determine whether it's safe to transition the task to the target patch state
239 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
241 static int klp_check_stack(struct task_struct
*task
, char *err_buf
)
243 static unsigned long entries
[MAX_STACK_ENTRIES
];
244 struct stack_trace trace
;
245 struct klp_object
*obj
;
246 struct klp_func
*func
;
250 trace
.nr_entries
= 0;
251 trace
.max_entries
= MAX_STACK_ENTRIES
;
252 trace
.entries
= entries
;
253 ret
= save_stack_trace_tsk_reliable(task
, &trace
);
254 WARN_ON_ONCE(ret
== -ENOSYS
);
256 snprintf(err_buf
, STACK_ERR_BUF_SIZE
,
257 "%s: %s:%d has an unreliable stack\n",
258 __func__
, task
->comm
, task
->pid
);
262 klp_for_each_object(klp_transition_patch
, obj
) {
265 klp_for_each_func(obj
, func
) {
266 ret
= klp_check_stack_func(func
, &trace
);
268 snprintf(err_buf
, STACK_ERR_BUF_SIZE
,
269 "%s: %s:%d is sleeping on function %s\n",
270 __func__
, task
->comm
, task
->pid
,
281 * Try to safely switch a task to the target patch state. If it's currently
282 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
283 * if the stack is unreliable, return false.
285 static bool klp_try_switch_task(struct task_struct
*task
)
288 struct rq_flags flags
;
290 bool success
= false;
291 char err_buf
[STACK_ERR_BUF_SIZE
];
295 /* check if this task has already switched over */
296 if (task
->patch_state
== klp_target_state
)
300 * For arches which don't have reliable stack traces, we have to rely
301 * on other methods (e.g., switching tasks at kernel exit).
303 if (!klp_have_reliable_stack())
307 * Now try to check the stack for any to-be-patched or to-be-unpatched
308 * functions. If all goes well, switch the task to the target patch
311 rq
= task_rq_lock(task
, &flags
);
313 if (task_running(rq
, task
) && task
!= current
) {
314 snprintf(err_buf
, STACK_ERR_BUF_SIZE
,
315 "%s: %s:%d is running\n", __func__
, task
->comm
,
320 ret
= klp_check_stack(task
, err_buf
);
326 clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
327 task
->patch_state
= klp_target_state
;
330 task_rq_unlock(rq
, task
, &flags
);
333 * Due to console deadlock issues, pr_debug() can't be used while
334 * holding the task rq lock. Instead we have to use a temporary buffer
335 * and print the debug message after releasing the lock.
337 if (err_buf
[0] != '\0')
338 pr_debug("%s", err_buf
);
345 * Try to switch all remaining tasks to the target patch state by walking the
346 * stacks of sleeping tasks and looking for any to-be-patched or
347 * to-be-unpatched functions. If such functions are found, the task can't be
350 * If any tasks are still stuck in the initial patch state, schedule a retry.
352 void klp_try_complete_transition(void)
355 struct task_struct
*g
, *task
;
356 bool complete
= true;
358 WARN_ON_ONCE(klp_target_state
== KLP_UNDEFINED
);
361 * If the patch can be applied or reverted immediately, skip the
362 * per-task transitions.
364 if (klp_transition_patch
->immediate
)
368 * Try to switch the tasks to the target patch state by walking their
369 * stacks and looking for any to-be-patched or to-be-unpatched
370 * functions. If such functions are found on a stack, or if the stack
371 * is deemed unreliable, the task can't be switched yet.
373 * Usually this will transition most (or all) of the tasks on a system
374 * unless the patch includes changes to a very common function.
376 read_lock(&tasklist_lock
);
377 for_each_process_thread(g
, task
)
378 if (!klp_try_switch_task(task
))
380 read_unlock(&tasklist_lock
);
383 * Ditto for the idle "swapper" tasks.
386 for_each_possible_cpu(cpu
) {
387 task
= idle_task(cpu
);
388 if (cpu_online(cpu
)) {
389 if (!klp_try_switch_task(task
))
391 } else if (task
->patch_state
!= klp_target_state
) {
392 /* offline idle tasks can be switched immediately */
393 clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
394 task
->patch_state
= klp_target_state
;
401 * Some tasks weren't able to be switched over. Try again
402 * later and/or wait for other methods like kernel exit
405 schedule_delayed_work(&klp_transition_work
,
406 round_jiffies_relative(HZ
));
411 pr_notice("'%s': %s complete\n", klp_transition_patch
->mod
->name
,
412 klp_target_state
== KLP_PATCHED
? "patching" : "unpatching");
414 /* we're done, now cleanup the data structures */
415 klp_complete_transition();
419 * Start the transition to the specified target patch state so tasks can begin
422 void klp_start_transition(void)
424 struct task_struct
*g
, *task
;
427 WARN_ON_ONCE(klp_target_state
== KLP_UNDEFINED
);
429 pr_notice("'%s': %s...\n", klp_transition_patch
->mod
->name
,
430 klp_target_state
== KLP_PATCHED
? "patching" : "unpatching");
433 * If the patch can be applied or reverted immediately, skip the
434 * per-task transitions.
436 if (klp_transition_patch
->immediate
)
440 * Mark all normal tasks as needing a patch state update. They'll
441 * switch either in klp_try_complete_transition() or as they exit the
444 read_lock(&tasklist_lock
);
445 for_each_process_thread(g
, task
)
446 if (task
->patch_state
!= klp_target_state
)
447 set_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
448 read_unlock(&tasklist_lock
);
451 * Mark all idle tasks as needing a patch state update. They'll switch
452 * either in klp_try_complete_transition() or at the idle loop switch
455 for_each_possible_cpu(cpu
) {
456 task
= idle_task(cpu
);
457 if (task
->patch_state
!= klp_target_state
)
458 set_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
463 * Initialize the global target patch state and all tasks to the initial patch
464 * state, and initialize all function transition states to true in preparation
465 * for patching or unpatching.
467 void klp_init_transition(struct klp_patch
*patch
, int state
)
469 struct task_struct
*g
, *task
;
471 struct klp_object
*obj
;
472 struct klp_func
*func
;
473 int initial_state
= !state
;
475 WARN_ON_ONCE(klp_target_state
!= KLP_UNDEFINED
);
477 klp_transition_patch
= patch
;
480 * Set the global target patch state which tasks will switch to. This
481 * has no effect until the TIF_PATCH_PENDING flags get set later.
483 klp_target_state
= state
;
486 * If the patch can be applied or reverted immediately, skip the
487 * per-task transitions.
489 if (patch
->immediate
)
493 * Initialize all tasks to the initial patch state to prepare them for
494 * switching to the target state.
496 read_lock(&tasklist_lock
);
497 for_each_process_thread(g
, task
) {
498 WARN_ON_ONCE(task
->patch_state
!= KLP_UNDEFINED
);
499 task
->patch_state
= initial_state
;
501 read_unlock(&tasklist_lock
);
504 * Ditto for the idle "swapper" tasks.
506 for_each_possible_cpu(cpu
) {
507 task
= idle_task(cpu
);
508 WARN_ON_ONCE(task
->patch_state
!= KLP_UNDEFINED
);
509 task
->patch_state
= initial_state
;
513 * Enforce the order of the task->patch_state initializations and the
514 * func->transition updates to ensure that klp_ftrace_handler() doesn't
515 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
517 * Also enforce the order of the klp_target_state write and future
518 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
519 * set a task->patch_state to KLP_UNDEFINED.
524 * Set the func transition states so klp_ftrace_handler() will know to
525 * switch to the transition logic.
527 * When patching, the funcs aren't yet in the func_stack and will be
528 * made visible to the ftrace handler shortly by the calls to
529 * klp_patch_object().
531 * When unpatching, the funcs are already in the func_stack and so are
532 * already visible to the ftrace handler.
534 klp_for_each_object(patch
, obj
)
535 klp_for_each_func(obj
, func
)
536 func
->transition
= true;
540 * This function can be called in the middle of an existing transition to
541 * reverse the direction of the target patch state. This can be done to
542 * effectively cancel an existing enable or disable operation if there are any
543 * tasks which are stuck in the initial patch state.
545 void klp_reverse_transition(void)
548 struct task_struct
*g
, *task
;
550 klp_transition_patch
->enabled
= !klp_transition_patch
->enabled
;
552 klp_target_state
= !klp_target_state
;
555 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
556 * klp_update_patch_state() running in parallel with
557 * klp_start_transition().
559 read_lock(&tasklist_lock
);
560 for_each_process_thread(g
, task
)
561 clear_tsk_thread_flag(task
, TIF_PATCH_PENDING
);
562 read_unlock(&tasklist_lock
);
564 for_each_possible_cpu(cpu
)
565 clear_tsk_thread_flag(idle_task(cpu
), TIF_PATCH_PENDING
);
567 /* Let any remaining calls to klp_update_patch_state() complete */
568 klp_synchronize_transition();
570 klp_start_transition();
573 /* Called from copy_process() during fork */
574 void klp_copy_process(struct task_struct
*child
)
576 child
->patch_state
= current
->patch_state
;
578 /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */