]>
Commit | Line | Data |
---|---|---|
d83a7cb3 JP |
1 | /* |
2 | * transition.c - Kernel Live Patching transition functions | |
3 | * | |
4 | * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version 2 | |
9 | * of the License, or (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
21 | ||
22 | #include <linux/cpu.h> | |
23 | #include <linux/stacktrace.h> | |
10517429 | 24 | #include "core.h" |
d83a7cb3 JP |
25 | #include "patch.h" |
26 | #include "transition.h" | |
27 | #include "../sched/sched.h" | |
28 | ||
29 | #define MAX_STACK_ENTRIES 100 | |
30 | #define STACK_ERR_BUF_SIZE 128 | |
31 | ||
d83a7cb3 JP |
32 | struct klp_patch *klp_transition_patch; |
33 | ||
34 | static int klp_target_state = KLP_UNDEFINED; | |
35 | ||
36 | /* | |
37 | * This work can be performed periodically to finish patching or unpatching any | |
38 | * "straggler" tasks which failed to transition in the first attempt. | |
39 | */ | |
40 | static void klp_transition_work_fn(struct work_struct *work) | |
41 | { | |
42 | mutex_lock(&klp_mutex); | |
43 | ||
44 | if (klp_transition_patch) | |
45 | klp_try_complete_transition(); | |
46 | ||
47 | mutex_unlock(&klp_mutex); | |
48 | } | |
49 | static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); | |
50 | ||
51 | /* | |
52 | * The transition to the target patch state is complete. Clean up the data | |
53 | * structures. | |
54 | */ | |
55 | static void klp_complete_transition(void) | |
56 | { | |
57 | struct klp_object *obj; | |
58 | struct klp_func *func; | |
59 | struct task_struct *g, *task; | |
60 | unsigned int cpu; | |
3ec24776 | 61 | bool immediate_func = false; |
d83a7cb3 JP |
62 | |
63 | if (klp_target_state == KLP_UNPATCHED) { | |
64 | /* | |
65 | * All tasks have transitioned to KLP_UNPATCHED so we can now | |
66 | * remove the new functions from the func_stack. | |
67 | */ | |
68 | klp_unpatch_objects(klp_transition_patch); | |
69 | ||
70 | /* | |
71 | * Make sure klp_ftrace_handler() can no longer see functions | |
72 | * from this patch on the ops->func_stack. Otherwise, after | |
73 | * func->transition gets cleared, the handler may choose a | |
74 | * removed function. | |
75 | */ | |
76 | synchronize_rcu(); | |
77 | } | |
78 | ||
79 | if (klp_transition_patch->immediate) | |
80 | goto done; | |
81 | ||
3ec24776 JP |
82 | klp_for_each_object(klp_transition_patch, obj) { |
83 | klp_for_each_func(obj, func) { | |
d83a7cb3 | 84 | func->transition = false; |
3ec24776 JP |
85 | if (func->immediate) |
86 | immediate_func = true; | |
87 | } | |
88 | } | |
89 | ||
90 | if (klp_target_state == KLP_UNPATCHED && !immediate_func) | |
91 | module_put(klp_transition_patch->mod); | |
d83a7cb3 JP |
92 | |
93 | /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ | |
94 | if (klp_target_state == KLP_PATCHED) | |
95 | synchronize_rcu(); | |
96 | ||
97 | read_lock(&tasklist_lock); | |
98 | for_each_process_thread(g, task) { | |
99 | WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); | |
100 | task->patch_state = KLP_UNDEFINED; | |
101 | } | |
102 | read_unlock(&tasklist_lock); | |
103 | ||
104 | for_each_possible_cpu(cpu) { | |
105 | task = idle_task(cpu); | |
106 | WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); | |
107 | task->patch_state = KLP_UNDEFINED; | |
108 | } | |
109 | ||
110 | done: | |
111 | klp_target_state = KLP_UNDEFINED; | |
112 | klp_transition_patch = NULL; | |
113 | } | |
114 | ||
115 | /* | |
116 | * This is called in the error path, to cancel a transition before it has | |
117 | * started, i.e. klp_init_transition() has been called but | |
118 | * klp_start_transition() hasn't. If the transition *has* been started, | |
119 | * klp_reverse_transition() should be used instead. | |
120 | */ | |
121 | void klp_cancel_transition(void) | |
122 | { | |
3ec24776 JP |
123 | struct klp_patch *patch = klp_transition_patch; |
124 | struct klp_object *obj; | |
125 | struct klp_func *func; | |
126 | bool immediate_func = false; | |
127 | ||
128 | if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) | |
129 | return; | |
130 | ||
131 | klp_target_state = KLP_UNPATCHED; | |
d83a7cb3 | 132 | klp_complete_transition(); |
3ec24776 JP |
133 | |
134 | /* | |
135 | * In the enable error path, even immediate patches can be safely | |
136 | * removed because the transition hasn't been started yet. | |
137 | * | |
138 | * klp_complete_transition() doesn't have a module_put() for immediate | |
139 | * patches, so do it here. | |
140 | */ | |
141 | klp_for_each_object(patch, obj) | |
142 | klp_for_each_func(obj, func) | |
143 | if (func->immediate) | |
144 | immediate_func = true; | |
145 | ||
146 | if (patch->immediate || immediate_func) | |
147 | module_put(patch->mod); | |
d83a7cb3 JP |
148 | } |
149 | ||
150 | /* | |
151 | * Switch the patched state of the task to the set of functions in the target | |
152 | * patch state. | |
153 | * | |
154 | * NOTE: If task is not 'current', the caller must ensure the task is inactive. | |
155 | * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value. | |
156 | */ | |
157 | void klp_update_patch_state(struct task_struct *task) | |
158 | { | |
159 | rcu_read_lock(); | |
160 | ||
161 | /* | |
162 | * This test_and_clear_tsk_thread_flag() call also serves as a read | |
163 | * barrier (smp_rmb) for two cases: | |
164 | * | |
165 | * 1) Enforce the order of the TIF_PATCH_PENDING read and the | |
166 | * klp_target_state read. The corresponding write barrier is in | |
167 | * klp_init_transition(). | |
168 | * | |
169 | * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read | |
170 | * of func->transition, if klp_ftrace_handler() is called later on | |
171 | * the same CPU. See __klp_disable_patch(). | |
172 | */ | |
173 | if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) | |
174 | task->patch_state = READ_ONCE(klp_target_state); | |
175 | ||
176 | rcu_read_unlock(); | |
177 | } | |
178 | ||
179 | /* | |
180 | * Determine whether the given stack trace includes any references to a | |
181 | * to-be-patched or to-be-unpatched function. | |
182 | */ | |
183 | static int klp_check_stack_func(struct klp_func *func, | |
184 | struct stack_trace *trace) | |
185 | { | |
186 | unsigned long func_addr, func_size, address; | |
187 | struct klp_ops *ops; | |
188 | int i; | |
189 | ||
190 | if (func->immediate) | |
191 | return 0; | |
192 | ||
193 | for (i = 0; i < trace->nr_entries; i++) { | |
194 | address = trace->entries[i]; | |
195 | ||
196 | if (klp_target_state == KLP_UNPATCHED) { | |
197 | /* | |
198 | * Check for the to-be-unpatched function | |
199 | * (the func itself). | |
200 | */ | |
201 | func_addr = (unsigned long)func->new_func; | |
202 | func_size = func->new_size; | |
203 | } else { | |
204 | /* | |
205 | * Check for the to-be-patched function | |
206 | * (the previous func). | |
207 | */ | |
208 | ops = klp_find_ops(func->old_addr); | |
209 | ||
210 | if (list_is_singular(&ops->func_stack)) { | |
211 | /* original function */ | |
212 | func_addr = func->old_addr; | |
213 | func_size = func->old_size; | |
214 | } else { | |
215 | /* previously patched function */ | |
216 | struct klp_func *prev; | |
217 | ||
218 | prev = list_next_entry(func, stack_node); | |
219 | func_addr = (unsigned long)prev->new_func; | |
220 | func_size = prev->new_size; | |
221 | } | |
222 | } | |
223 | ||
224 | if (address >= func_addr && address < func_addr + func_size) | |
225 | return -EAGAIN; | |
226 | } | |
227 | ||
228 | return 0; | |
229 | } | |
230 | ||
231 | /* | |
232 | * Determine whether it's safe to transition the task to the target patch state | |
233 | * by looking for any to-be-patched or to-be-unpatched functions on its stack. | |
234 | */ | |
235 | static int klp_check_stack(struct task_struct *task, char *err_buf) | |
236 | { | |
237 | static unsigned long entries[MAX_STACK_ENTRIES]; | |
238 | struct stack_trace trace; | |
239 | struct klp_object *obj; | |
240 | struct klp_func *func; | |
241 | int ret; | |
242 | ||
243 | trace.skip = 0; | |
244 | trace.nr_entries = 0; | |
245 | trace.max_entries = MAX_STACK_ENTRIES; | |
246 | trace.entries = entries; | |
247 | ret = save_stack_trace_tsk_reliable(task, &trace); | |
248 | WARN_ON_ONCE(ret == -ENOSYS); | |
249 | if (ret) { | |
250 | snprintf(err_buf, STACK_ERR_BUF_SIZE, | |
251 | "%s: %s:%d has an unreliable stack\n", | |
252 | __func__, task->comm, task->pid); | |
253 | return ret; | |
254 | } | |
255 | ||
256 | klp_for_each_object(klp_transition_patch, obj) { | |
257 | if (!obj->patched) | |
258 | continue; | |
259 | klp_for_each_func(obj, func) { | |
260 | ret = klp_check_stack_func(func, &trace); | |
261 | if (ret) { | |
262 | snprintf(err_buf, STACK_ERR_BUF_SIZE, | |
263 | "%s: %s:%d is sleeping on function %s\n", | |
264 | __func__, task->comm, task->pid, | |
265 | func->old_name); | |
266 | return ret; | |
267 | } | |
268 | } | |
269 | } | |
270 | ||
271 | return 0; | |
272 | } | |
273 | ||
274 | /* | |
275 | * Try to safely switch a task to the target patch state. If it's currently | |
276 | * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or | |
277 | * if the stack is unreliable, return false. | |
278 | */ | |
279 | static bool klp_try_switch_task(struct task_struct *task) | |
280 | { | |
281 | struct rq *rq; | |
282 | struct rq_flags flags; | |
283 | int ret; | |
284 | bool success = false; | |
285 | char err_buf[STACK_ERR_BUF_SIZE]; | |
286 | ||
287 | err_buf[0] = '\0'; | |
288 | ||
289 | /* check if this task has already switched over */ | |
290 | if (task->patch_state == klp_target_state) | |
291 | return true; | |
292 | ||
293 | /* | |
294 | * For arches which don't have reliable stack traces, we have to rely | |
295 | * on other methods (e.g., switching tasks at kernel exit). | |
296 | */ | |
297 | if (!klp_have_reliable_stack()) | |
298 | return false; | |
299 | ||
300 | /* | |
301 | * Now try to check the stack for any to-be-patched or to-be-unpatched | |
302 | * functions. If all goes well, switch the task to the target patch | |
303 | * state. | |
304 | */ | |
305 | rq = task_rq_lock(task, &flags); | |
306 | ||
307 | if (task_running(rq, task) && task != current) { | |
308 | snprintf(err_buf, STACK_ERR_BUF_SIZE, | |
309 | "%s: %s:%d is running\n", __func__, task->comm, | |
310 | task->pid); | |
311 | goto done; | |
312 | } | |
313 | ||
314 | ret = klp_check_stack(task, err_buf); | |
315 | if (ret) | |
316 | goto done; | |
317 | ||
318 | success = true; | |
319 | ||
320 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
321 | task->patch_state = klp_target_state; | |
322 | ||
323 | done: | |
324 | task_rq_unlock(rq, task, &flags); | |
325 | ||
326 | /* | |
327 | * Due to console deadlock issues, pr_debug() can't be used while | |
328 | * holding the task rq lock. Instead we have to use a temporary buffer | |
329 | * and print the debug message after releasing the lock. | |
330 | */ | |
331 | if (err_buf[0] != '\0') | |
332 | pr_debug("%s", err_buf); | |
333 | ||
334 | return success; | |
335 | ||
336 | } | |
337 | ||
338 | /* | |
339 | * Try to switch all remaining tasks to the target patch state by walking the | |
340 | * stacks of sleeping tasks and looking for any to-be-patched or | |
341 | * to-be-unpatched functions. If such functions are found, the task can't be | |
342 | * switched yet. | |
343 | * | |
344 | * If any tasks are still stuck in the initial patch state, schedule a retry. | |
345 | */ | |
346 | void klp_try_complete_transition(void) | |
347 | { | |
348 | unsigned int cpu; | |
349 | struct task_struct *g, *task; | |
350 | bool complete = true; | |
351 | ||
352 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); | |
353 | ||
354 | /* | |
355 | * If the patch can be applied or reverted immediately, skip the | |
356 | * per-task transitions. | |
357 | */ | |
358 | if (klp_transition_patch->immediate) | |
359 | goto success; | |
360 | ||
361 | /* | |
362 | * Try to switch the tasks to the target patch state by walking their | |
363 | * stacks and looking for any to-be-patched or to-be-unpatched | |
364 | * functions. If such functions are found on a stack, or if the stack | |
365 | * is deemed unreliable, the task can't be switched yet. | |
366 | * | |
367 | * Usually this will transition most (or all) of the tasks on a system | |
368 | * unless the patch includes changes to a very common function. | |
369 | */ | |
370 | read_lock(&tasklist_lock); | |
371 | for_each_process_thread(g, task) | |
372 | if (!klp_try_switch_task(task)) | |
373 | complete = false; | |
374 | read_unlock(&tasklist_lock); | |
375 | ||
376 | /* | |
377 | * Ditto for the idle "swapper" tasks. | |
378 | */ | |
379 | get_online_cpus(); | |
380 | for_each_possible_cpu(cpu) { | |
381 | task = idle_task(cpu); | |
382 | if (cpu_online(cpu)) { | |
383 | if (!klp_try_switch_task(task)) | |
384 | complete = false; | |
385 | } else if (task->patch_state != klp_target_state) { | |
386 | /* offline idle tasks can be switched immediately */ | |
387 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
388 | task->patch_state = klp_target_state; | |
389 | } | |
390 | } | |
391 | put_online_cpus(); | |
392 | ||
393 | if (!complete) { | |
394 | /* | |
395 | * Some tasks weren't able to be switched over. Try again | |
396 | * later and/or wait for other methods like kernel exit | |
397 | * switching. | |
398 | */ | |
399 | schedule_delayed_work(&klp_transition_work, | |
400 | round_jiffies_relative(HZ)); | |
401 | return; | |
402 | } | |
403 | ||
404 | success: | |
405 | pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, | |
406 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); | |
407 | ||
408 | /* we're done, now cleanup the data structures */ | |
409 | klp_complete_transition(); | |
410 | } | |
411 | ||
412 | /* | |
413 | * Start the transition to the specified target patch state so tasks can begin | |
414 | * switching to it. | |
415 | */ | |
416 | void klp_start_transition(void) | |
417 | { | |
418 | struct task_struct *g, *task; | |
419 | unsigned int cpu; | |
420 | ||
421 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); | |
422 | ||
423 | pr_notice("'%s': %s...\n", klp_transition_patch->mod->name, | |
424 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); | |
425 | ||
426 | /* | |
427 | * If the patch can be applied or reverted immediately, skip the | |
428 | * per-task transitions. | |
429 | */ | |
430 | if (klp_transition_patch->immediate) | |
431 | return; | |
432 | ||
433 | /* | |
434 | * Mark all normal tasks as needing a patch state update. They'll | |
435 | * switch either in klp_try_complete_transition() or as they exit the | |
436 | * kernel. | |
437 | */ | |
438 | read_lock(&tasklist_lock); | |
439 | for_each_process_thread(g, task) | |
440 | if (task->patch_state != klp_target_state) | |
441 | set_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
442 | read_unlock(&tasklist_lock); | |
443 | ||
444 | /* | |
445 | * Mark all idle tasks as needing a patch state update. They'll switch | |
446 | * either in klp_try_complete_transition() or at the idle loop switch | |
447 | * point. | |
448 | */ | |
449 | for_each_possible_cpu(cpu) { | |
450 | task = idle_task(cpu); | |
451 | if (task->patch_state != klp_target_state) | |
452 | set_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
453 | } | |
454 | } | |
455 | ||
456 | /* | |
457 | * Initialize the global target patch state and all tasks to the initial patch | |
458 | * state, and initialize all function transition states to true in preparation | |
459 | * for patching or unpatching. | |
460 | */ | |
461 | void klp_init_transition(struct klp_patch *patch, int state) | |
462 | { | |
463 | struct task_struct *g, *task; | |
464 | unsigned int cpu; | |
465 | struct klp_object *obj; | |
466 | struct klp_func *func; | |
467 | int initial_state = !state; | |
468 | ||
469 | WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); | |
470 | ||
471 | klp_transition_patch = patch; | |
472 | ||
473 | /* | |
474 | * Set the global target patch state which tasks will switch to. This | |
475 | * has no effect until the TIF_PATCH_PENDING flags get set later. | |
476 | */ | |
477 | klp_target_state = state; | |
478 | ||
479 | /* | |
480 | * If the patch can be applied or reverted immediately, skip the | |
481 | * per-task transitions. | |
482 | */ | |
483 | if (patch->immediate) | |
484 | return; | |
485 | ||
486 | /* | |
487 | * Initialize all tasks to the initial patch state to prepare them for | |
488 | * switching to the target state. | |
489 | */ | |
490 | read_lock(&tasklist_lock); | |
491 | for_each_process_thread(g, task) { | |
492 | WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); | |
493 | task->patch_state = initial_state; | |
494 | } | |
495 | read_unlock(&tasklist_lock); | |
496 | ||
497 | /* | |
498 | * Ditto for the idle "swapper" tasks. | |
499 | */ | |
500 | for_each_possible_cpu(cpu) { | |
501 | task = idle_task(cpu); | |
502 | WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); | |
503 | task->patch_state = initial_state; | |
504 | } | |
505 | ||
506 | /* | |
507 | * Enforce the order of the task->patch_state initializations and the | |
508 | * func->transition updates to ensure that klp_ftrace_handler() doesn't | |
509 | * see a func in transition with a task->patch_state of KLP_UNDEFINED. | |
510 | * | |
511 | * Also enforce the order of the klp_target_state write and future | |
512 | * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't | |
513 | * set a task->patch_state to KLP_UNDEFINED. | |
514 | */ | |
515 | smp_wmb(); | |
516 | ||
517 | /* | |
518 | * Set the func transition states so klp_ftrace_handler() will know to | |
519 | * switch to the transition logic. | |
520 | * | |
521 | * When patching, the funcs aren't yet in the func_stack and will be | |
522 | * made visible to the ftrace handler shortly by the calls to | |
523 | * klp_patch_object(). | |
524 | * | |
525 | * When unpatching, the funcs are already in the func_stack and so are | |
526 | * already visible to the ftrace handler. | |
527 | */ | |
528 | klp_for_each_object(patch, obj) | |
529 | klp_for_each_func(obj, func) | |
530 | func->transition = true; | |
531 | } | |
532 | ||
533 | /* | |
534 | * This function can be called in the middle of an existing transition to | |
535 | * reverse the direction of the target patch state. This can be done to | |
536 | * effectively cancel an existing enable or disable operation if there are any | |
537 | * tasks which are stuck in the initial patch state. | |
538 | */ | |
539 | void klp_reverse_transition(void) | |
540 | { | |
541 | unsigned int cpu; | |
542 | struct task_struct *g, *task; | |
543 | ||
544 | klp_transition_patch->enabled = !klp_transition_patch->enabled; | |
545 | ||
546 | klp_target_state = !klp_target_state; | |
547 | ||
548 | /* | |
549 | * Clear all TIF_PATCH_PENDING flags to prevent races caused by | |
550 | * klp_update_patch_state() running in parallel with | |
551 | * klp_start_transition(). | |
552 | */ | |
553 | read_lock(&tasklist_lock); | |
554 | for_each_process_thread(g, task) | |
555 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
556 | read_unlock(&tasklist_lock); | |
557 | ||
558 | for_each_possible_cpu(cpu) | |
559 | clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); | |
560 | ||
561 | /* Let any remaining calls to klp_update_patch_state() complete */ | |
562 | synchronize_rcu(); | |
563 | ||
564 | klp_start_transition(); | |
565 | } | |
566 | ||
567 | /* Called from copy_process() during fork */ | |
568 | void klp_copy_process(struct task_struct *child) | |
569 | { | |
570 | child->patch_state = current->patch_state; | |
571 | ||
572 | /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ | |
573 | } |