]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/livepatch/core.c
Merge tag 'renesas-fixes-for-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / kernel / livepatch / core.c
1 /*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
31
32 /**
33 * struct klp_ops - structure for tracking registered ftrace ops structs
34 *
35 * A single ftrace_ops is shared between all enabled replacement functions
36 * (klp_func structs) which have the same old_addr. This allows the switch
37 * between function versions to happen instantaneously by updating the klp_ops
38 * struct's func_stack list. The winner is the klp_func at the top of the
39 * func_stack (front of the list).
40 *
41 * @node: node for the global klp_ops list
42 * @func_stack: list head for the stack of klp_func's (active func is on top)
43 * @fops: registered ftrace ops struct
44 */
45 struct klp_ops {
46 struct list_head node;
47 struct list_head func_stack;
48 struct ftrace_ops fops;
49 };
50
51 /*
52 * The klp_mutex protects the global lists and state transitions of any
53 * structure reachable from them. References to any structure must be obtained
54 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55 * ensure it gets consistent data).
56 */
57 static DEFINE_MUTEX(klp_mutex);
58
59 static LIST_HEAD(klp_patches);
60 static LIST_HEAD(klp_ops);
61
62 static struct kobject *klp_root_kobj;
63
64 static struct klp_ops *klp_find_ops(unsigned long old_addr)
65 {
66 struct klp_ops *ops;
67 struct klp_func *func;
68
69 list_for_each_entry(ops, &klp_ops, node) {
70 func = list_first_entry(&ops->func_stack, struct klp_func,
71 stack_node);
72 if (func->old_addr == old_addr)
73 return ops;
74 }
75
76 return NULL;
77 }
78
79 static bool klp_is_module(struct klp_object *obj)
80 {
81 return obj->name;
82 }
83
84 static bool klp_is_object_loaded(struct klp_object *obj)
85 {
86 return !obj->name || obj->mod;
87 }
88
89 /* sets obj->mod if object is not vmlinux and module is found */
90 static void klp_find_object_module(struct klp_object *obj)
91 {
92 struct module *mod;
93
94 if (!klp_is_module(obj))
95 return;
96
97 mutex_lock(&module_mutex);
98 /*
99 * We do not want to block removal of patched modules and therefore
100 * we do not take a reference here. The patches are removed by
101 * a going module handler instead.
102 */
103 mod = find_module(obj->name);
104 /*
105 * Do not mess work of the module coming and going notifiers.
106 * Note that the patch might still be needed before the going handler
107 * is called. Module functions can be called even in the GOING state
108 * until mod->exit() finishes. This is especially important for
109 * patches that modify semantic of the functions.
110 */
111 if (mod && mod->klp_alive)
112 obj->mod = mod;
113
114 mutex_unlock(&module_mutex);
115 }
116
117 /* klp_mutex must be held by caller */
118 static bool klp_is_patch_registered(struct klp_patch *patch)
119 {
120 struct klp_patch *mypatch;
121
122 list_for_each_entry(mypatch, &klp_patches, list)
123 if (mypatch == patch)
124 return true;
125
126 return false;
127 }
128
129 static bool klp_initialized(void)
130 {
131 return !!klp_root_kobj;
132 }
133
134 struct klp_find_arg {
135 const char *objname;
136 const char *name;
137 unsigned long addr;
138 /*
139 * If count == 0, the symbol was not found. If count == 1, a unique
140 * match was found and addr is set. If count > 1, there is
141 * unresolvable ambiguity among "count" number of symbols with the same
142 * name in the same object.
143 */
144 unsigned long count;
145 };
146
147 static int klp_find_callback(void *data, const char *name,
148 struct module *mod, unsigned long addr)
149 {
150 struct klp_find_arg *args = data;
151
152 if ((mod && !args->objname) || (!mod && args->objname))
153 return 0;
154
155 if (strcmp(args->name, name))
156 return 0;
157
158 if (args->objname && strcmp(args->objname, mod->name))
159 return 0;
160
161 /*
162 * args->addr might be overwritten if another match is found
163 * but klp_find_object_symbol() handles this and only returns the
164 * addr if count == 1.
165 */
166 args->addr = addr;
167 args->count++;
168
169 return 0;
170 }
171
172 static int klp_find_object_symbol(const char *objname, const char *name,
173 unsigned long *addr)
174 {
175 struct klp_find_arg args = {
176 .objname = objname,
177 .name = name,
178 .addr = 0,
179 .count = 0
180 };
181
182 mutex_lock(&module_mutex);
183 kallsyms_on_each_symbol(klp_find_callback, &args);
184 mutex_unlock(&module_mutex);
185
186 if (args.count == 0)
187 pr_err("symbol '%s' not found in symbol table\n", name);
188 else if (args.count > 1)
189 pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
190 args.count, name, objname);
191 else {
192 *addr = args.addr;
193 return 0;
194 }
195
196 *addr = 0;
197 return -EINVAL;
198 }
199
200 struct klp_verify_args {
201 const char *name;
202 const unsigned long addr;
203 };
204
205 static int klp_verify_callback(void *data, const char *name,
206 struct module *mod, unsigned long addr)
207 {
208 struct klp_verify_args *args = data;
209
210 if (!mod &&
211 !strcmp(args->name, name) &&
212 args->addr == addr)
213 return 1;
214
215 return 0;
216 }
217
218 static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
219 {
220 struct klp_verify_args args = {
221 .name = name,
222 .addr = addr,
223 };
224 int ret;
225
226 mutex_lock(&module_mutex);
227 ret = kallsyms_on_each_symbol(klp_verify_callback, &args);
228 mutex_unlock(&module_mutex);
229
230 if (!ret) {
231 pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
232 name, addr);
233 return -EINVAL;
234 }
235
236 return 0;
237 }
238
239 static int klp_find_verify_func_addr(struct klp_object *obj,
240 struct klp_func *func)
241 {
242 int ret;
243
244 #if defined(CONFIG_RANDOMIZE_BASE)
245 /* If KASLR has been enabled, adjust old_addr accordingly */
246 if (kaslr_enabled() && func->old_addr)
247 func->old_addr += kaslr_offset();
248 #endif
249
250 if (!func->old_addr || klp_is_module(obj))
251 ret = klp_find_object_symbol(obj->name, func->old_name,
252 &func->old_addr);
253 else
254 ret = klp_verify_vmlinux_symbol(func->old_name,
255 func->old_addr);
256
257 return ret;
258 }
259
260 /*
261 * external symbols are located outside the parent object (where the parent
262 * object is either vmlinux or the kmod being patched).
263 */
264 static int klp_find_external_symbol(struct module *pmod, const char *name,
265 unsigned long *addr)
266 {
267 const struct kernel_symbol *sym;
268
269 /* first, check if it's an exported symbol */
270 preempt_disable();
271 sym = find_symbol(name, NULL, NULL, true, true);
272 if (sym) {
273 *addr = sym->value;
274 preempt_enable();
275 return 0;
276 }
277 preempt_enable();
278
279 /* otherwise check if it's in another .o within the patch module */
280 return klp_find_object_symbol(pmod->name, name, addr);
281 }
282
283 static int klp_write_object_relocations(struct module *pmod,
284 struct klp_object *obj)
285 {
286 int ret;
287 struct klp_reloc *reloc;
288
289 if (WARN_ON(!klp_is_object_loaded(obj)))
290 return -EINVAL;
291
292 if (WARN_ON(!obj->relocs))
293 return -EINVAL;
294
295 for (reloc = obj->relocs; reloc->name; reloc++) {
296 if (!klp_is_module(obj)) {
297
298 #if defined(CONFIG_RANDOMIZE_BASE)
299 /* If KASLR has been enabled, adjust old value accordingly */
300 if (kaslr_enabled())
301 reloc->val += kaslr_offset();
302 #endif
303 ret = klp_verify_vmlinux_symbol(reloc->name,
304 reloc->val);
305 if (ret)
306 return ret;
307 } else {
308 /* module, reloc->val needs to be discovered */
309 if (reloc->external)
310 ret = klp_find_external_symbol(pmod,
311 reloc->name,
312 &reloc->val);
313 else
314 ret = klp_find_object_symbol(obj->mod->name,
315 reloc->name,
316 &reloc->val);
317 if (ret)
318 return ret;
319 }
320 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
321 reloc->val + reloc->addend);
322 if (ret) {
323 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
324 reloc->name, reloc->val, ret);
325 return ret;
326 }
327 }
328
329 return 0;
330 }
331
332 static void notrace klp_ftrace_handler(unsigned long ip,
333 unsigned long parent_ip,
334 struct ftrace_ops *fops,
335 struct pt_regs *regs)
336 {
337 struct klp_ops *ops;
338 struct klp_func *func;
339
340 ops = container_of(fops, struct klp_ops, fops);
341
342 rcu_read_lock();
343 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
344 stack_node);
345 if (WARN_ON_ONCE(!func))
346 goto unlock;
347
348 klp_arch_set_pc(regs, (unsigned long)func->new_func);
349 unlock:
350 rcu_read_unlock();
351 }
352
353 static void klp_disable_func(struct klp_func *func)
354 {
355 struct klp_ops *ops;
356
357 if (WARN_ON(func->state != KLP_ENABLED))
358 return;
359 if (WARN_ON(!func->old_addr))
360 return;
361
362 ops = klp_find_ops(func->old_addr);
363 if (WARN_ON(!ops))
364 return;
365
366 if (list_is_singular(&ops->func_stack)) {
367 WARN_ON(unregister_ftrace_function(&ops->fops));
368 WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
369
370 list_del_rcu(&func->stack_node);
371 list_del(&ops->node);
372 kfree(ops);
373 } else {
374 list_del_rcu(&func->stack_node);
375 }
376
377 func->state = KLP_DISABLED;
378 }
379
380 static int klp_enable_func(struct klp_func *func)
381 {
382 struct klp_ops *ops;
383 int ret;
384
385 if (WARN_ON(!func->old_addr))
386 return -EINVAL;
387
388 if (WARN_ON(func->state != KLP_DISABLED))
389 return -EINVAL;
390
391 ops = klp_find_ops(func->old_addr);
392 if (!ops) {
393 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
394 if (!ops)
395 return -ENOMEM;
396
397 ops->fops.func = klp_ftrace_handler;
398 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
399 FTRACE_OPS_FL_DYNAMIC |
400 FTRACE_OPS_FL_IPMODIFY;
401
402 list_add(&ops->node, &klp_ops);
403
404 INIT_LIST_HEAD(&ops->func_stack);
405 list_add_rcu(&func->stack_node, &ops->func_stack);
406
407 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
408 if (ret) {
409 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
410 func->old_name, ret);
411 goto err;
412 }
413
414 ret = register_ftrace_function(&ops->fops);
415 if (ret) {
416 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
417 func->old_name, ret);
418 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
419 goto err;
420 }
421
422
423 } else {
424 list_add_rcu(&func->stack_node, &ops->func_stack);
425 }
426
427 func->state = KLP_ENABLED;
428
429 return 0;
430
431 err:
432 list_del_rcu(&func->stack_node);
433 list_del(&ops->node);
434 kfree(ops);
435 return ret;
436 }
437
438 static void klp_disable_object(struct klp_object *obj)
439 {
440 struct klp_func *func;
441
442 klp_for_each_func(obj, func)
443 if (func->state == KLP_ENABLED)
444 klp_disable_func(func);
445
446 obj->state = KLP_DISABLED;
447 }
448
449 static int klp_enable_object(struct klp_object *obj)
450 {
451 struct klp_func *func;
452 int ret;
453
454 if (WARN_ON(obj->state != KLP_DISABLED))
455 return -EINVAL;
456
457 if (WARN_ON(!klp_is_object_loaded(obj)))
458 return -EINVAL;
459
460 klp_for_each_func(obj, func) {
461 ret = klp_enable_func(func);
462 if (ret) {
463 klp_disable_object(obj);
464 return ret;
465 }
466 }
467 obj->state = KLP_ENABLED;
468
469 return 0;
470 }
471
472 static int __klp_disable_patch(struct klp_patch *patch)
473 {
474 struct klp_object *obj;
475
476 /* enforce stacking: only the last enabled patch can be disabled */
477 if (!list_is_last(&patch->list, &klp_patches) &&
478 list_next_entry(patch, list)->state == KLP_ENABLED)
479 return -EBUSY;
480
481 pr_notice("disabling patch '%s'\n", patch->mod->name);
482
483 klp_for_each_object(patch, obj) {
484 if (obj->state == KLP_ENABLED)
485 klp_disable_object(obj);
486 }
487
488 patch->state = KLP_DISABLED;
489
490 return 0;
491 }
492
493 /**
494 * klp_disable_patch() - disables a registered patch
495 * @patch: The registered, enabled patch to be disabled
496 *
497 * Unregisters the patched functions from ftrace.
498 *
499 * Return: 0 on success, otherwise error
500 */
501 int klp_disable_patch(struct klp_patch *patch)
502 {
503 int ret;
504
505 mutex_lock(&klp_mutex);
506
507 if (!klp_is_patch_registered(patch)) {
508 ret = -EINVAL;
509 goto err;
510 }
511
512 if (patch->state == KLP_DISABLED) {
513 ret = -EINVAL;
514 goto err;
515 }
516
517 ret = __klp_disable_patch(patch);
518
519 err:
520 mutex_unlock(&klp_mutex);
521 return ret;
522 }
523 EXPORT_SYMBOL_GPL(klp_disable_patch);
524
525 static int __klp_enable_patch(struct klp_patch *patch)
526 {
527 struct klp_object *obj;
528 int ret;
529
530 if (WARN_ON(patch->state != KLP_DISABLED))
531 return -EINVAL;
532
533 /* enforce stacking: only the first disabled patch can be enabled */
534 if (patch->list.prev != &klp_patches &&
535 list_prev_entry(patch, list)->state == KLP_DISABLED)
536 return -EBUSY;
537
538 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
539 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
540
541 pr_notice("enabling patch '%s'\n", patch->mod->name);
542
543 klp_for_each_object(patch, obj) {
544 if (!klp_is_object_loaded(obj))
545 continue;
546
547 ret = klp_enable_object(obj);
548 if (ret)
549 goto unregister;
550 }
551
552 patch->state = KLP_ENABLED;
553
554 return 0;
555
556 unregister:
557 WARN_ON(__klp_disable_patch(patch));
558 return ret;
559 }
560
561 /**
562 * klp_enable_patch() - enables a registered patch
563 * @patch: The registered, disabled patch to be enabled
564 *
565 * Performs the needed symbol lookups and code relocations,
566 * then registers the patched functions with ftrace.
567 *
568 * Return: 0 on success, otherwise error
569 */
570 int klp_enable_patch(struct klp_patch *patch)
571 {
572 int ret;
573
574 mutex_lock(&klp_mutex);
575
576 if (!klp_is_patch_registered(patch)) {
577 ret = -EINVAL;
578 goto err;
579 }
580
581 ret = __klp_enable_patch(patch);
582
583 err:
584 mutex_unlock(&klp_mutex);
585 return ret;
586 }
587 EXPORT_SYMBOL_GPL(klp_enable_patch);
588
589 /*
590 * Sysfs Interface
591 *
592 * /sys/kernel/livepatch
593 * /sys/kernel/livepatch/<patch>
594 * /sys/kernel/livepatch/<patch>/enabled
595 * /sys/kernel/livepatch/<patch>/<object>
596 * /sys/kernel/livepatch/<patch>/<object>/<func>
597 */
598
599 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
600 const char *buf, size_t count)
601 {
602 struct klp_patch *patch;
603 int ret;
604 unsigned long val;
605
606 ret = kstrtoul(buf, 10, &val);
607 if (ret)
608 return -EINVAL;
609
610 if (val != KLP_DISABLED && val != KLP_ENABLED)
611 return -EINVAL;
612
613 patch = container_of(kobj, struct klp_patch, kobj);
614
615 mutex_lock(&klp_mutex);
616
617 if (val == patch->state) {
618 /* already in requested state */
619 ret = -EINVAL;
620 goto err;
621 }
622
623 if (val == KLP_ENABLED) {
624 ret = __klp_enable_patch(patch);
625 if (ret)
626 goto err;
627 } else {
628 ret = __klp_disable_patch(patch);
629 if (ret)
630 goto err;
631 }
632
633 mutex_unlock(&klp_mutex);
634
635 return count;
636
637 err:
638 mutex_unlock(&klp_mutex);
639 return ret;
640 }
641
642 static ssize_t enabled_show(struct kobject *kobj,
643 struct kobj_attribute *attr, char *buf)
644 {
645 struct klp_patch *patch;
646
647 patch = container_of(kobj, struct klp_patch, kobj);
648 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
649 }
650
651 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
652 static struct attribute *klp_patch_attrs[] = {
653 &enabled_kobj_attr.attr,
654 NULL
655 };
656
657 static void klp_kobj_release_patch(struct kobject *kobj)
658 {
659 /*
660 * Once we have a consistency model we'll need to module_put() the
661 * patch module here. See klp_register_patch() for more details.
662 */
663 }
664
665 static struct kobj_type klp_ktype_patch = {
666 .release = klp_kobj_release_patch,
667 .sysfs_ops = &kobj_sysfs_ops,
668 .default_attrs = klp_patch_attrs,
669 };
670
671 static void klp_kobj_release_object(struct kobject *kobj)
672 {
673 }
674
675 static struct kobj_type klp_ktype_object = {
676 .release = klp_kobj_release_object,
677 .sysfs_ops = &kobj_sysfs_ops,
678 };
679
680 static void klp_kobj_release_func(struct kobject *kobj)
681 {
682 }
683
684 static struct kobj_type klp_ktype_func = {
685 .release = klp_kobj_release_func,
686 .sysfs_ops = &kobj_sysfs_ops,
687 };
688
689 /*
690 * Free all functions' kobjects in the array up to some limit. When limit is
691 * NULL, all kobjects are freed.
692 */
693 static void klp_free_funcs_limited(struct klp_object *obj,
694 struct klp_func *limit)
695 {
696 struct klp_func *func;
697
698 for (func = obj->funcs; func->old_name && func != limit; func++)
699 kobject_put(&func->kobj);
700 }
701
702 /* Clean up when a patched object is unloaded */
703 static void klp_free_object_loaded(struct klp_object *obj)
704 {
705 struct klp_func *func;
706
707 obj->mod = NULL;
708
709 klp_for_each_func(obj, func)
710 func->old_addr = 0;
711 }
712
713 /*
714 * Free all objects' kobjects in the array up to some limit. When limit is
715 * NULL, all kobjects are freed.
716 */
717 static void klp_free_objects_limited(struct klp_patch *patch,
718 struct klp_object *limit)
719 {
720 struct klp_object *obj;
721
722 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
723 klp_free_funcs_limited(obj, NULL);
724 kobject_put(&obj->kobj);
725 }
726 }
727
728 static void klp_free_patch(struct klp_patch *patch)
729 {
730 klp_free_objects_limited(patch, NULL);
731 if (!list_empty(&patch->list))
732 list_del(&patch->list);
733 kobject_put(&patch->kobj);
734 }
735
736 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
737 {
738 INIT_LIST_HEAD(&func->stack_node);
739 func->state = KLP_DISABLED;
740
741 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
742 &obj->kobj, "%s", func->old_name);
743 }
744
745 /* parts of the initialization that is done only when the object is loaded */
746 static int klp_init_object_loaded(struct klp_patch *patch,
747 struct klp_object *obj)
748 {
749 struct klp_func *func;
750 int ret;
751
752 if (obj->relocs) {
753 ret = klp_write_object_relocations(patch->mod, obj);
754 if (ret)
755 return ret;
756 }
757
758 klp_for_each_func(obj, func) {
759 ret = klp_find_verify_func_addr(obj, func);
760 if (ret)
761 return ret;
762 }
763
764 return 0;
765 }
766
767 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
768 {
769 struct klp_func *func;
770 int ret;
771 const char *name;
772
773 if (!obj->funcs)
774 return -EINVAL;
775
776 obj->state = KLP_DISABLED;
777 obj->mod = NULL;
778
779 klp_find_object_module(obj);
780
781 name = klp_is_module(obj) ? obj->name : "vmlinux";
782 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
783 &patch->kobj, "%s", name);
784 if (ret)
785 return ret;
786
787 klp_for_each_func(obj, func) {
788 ret = klp_init_func(obj, func);
789 if (ret)
790 goto free;
791 }
792
793 if (klp_is_object_loaded(obj)) {
794 ret = klp_init_object_loaded(patch, obj);
795 if (ret)
796 goto free;
797 }
798
799 return 0;
800
801 free:
802 klp_free_funcs_limited(obj, func);
803 kobject_put(&obj->kobj);
804 return ret;
805 }
806
807 static int klp_init_patch(struct klp_patch *patch)
808 {
809 struct klp_object *obj;
810 int ret;
811
812 if (!patch->objs)
813 return -EINVAL;
814
815 mutex_lock(&klp_mutex);
816
817 patch->state = KLP_DISABLED;
818
819 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
820 klp_root_kobj, "%s", patch->mod->name);
821 if (ret)
822 goto unlock;
823
824 klp_for_each_object(patch, obj) {
825 ret = klp_init_object(patch, obj);
826 if (ret)
827 goto free;
828 }
829
830 list_add_tail(&patch->list, &klp_patches);
831
832 mutex_unlock(&klp_mutex);
833
834 return 0;
835
836 free:
837 klp_free_objects_limited(patch, obj);
838 kobject_put(&patch->kobj);
839 unlock:
840 mutex_unlock(&klp_mutex);
841 return ret;
842 }
843
844 /**
845 * klp_unregister_patch() - unregisters a patch
846 * @patch: Disabled patch to be unregistered
847 *
848 * Frees the data structures and removes the sysfs interface.
849 *
850 * Return: 0 on success, otherwise error
851 */
852 int klp_unregister_patch(struct klp_patch *patch)
853 {
854 int ret = 0;
855
856 mutex_lock(&klp_mutex);
857
858 if (!klp_is_patch_registered(patch)) {
859 ret = -EINVAL;
860 goto out;
861 }
862
863 if (patch->state == KLP_ENABLED) {
864 ret = -EBUSY;
865 goto out;
866 }
867
868 klp_free_patch(patch);
869
870 out:
871 mutex_unlock(&klp_mutex);
872 return ret;
873 }
874 EXPORT_SYMBOL_GPL(klp_unregister_patch);
875
876 /**
877 * klp_register_patch() - registers a patch
878 * @patch: Patch to be registered
879 *
880 * Initializes the data structure associated with the patch and
881 * creates the sysfs interface.
882 *
883 * Return: 0 on success, otherwise error
884 */
885 int klp_register_patch(struct klp_patch *patch)
886 {
887 int ret;
888
889 if (!klp_initialized())
890 return -ENODEV;
891
892 if (!patch || !patch->mod)
893 return -EINVAL;
894
895 /*
896 * A reference is taken on the patch module to prevent it from being
897 * unloaded. Right now, we don't allow patch modules to unload since
898 * there is currently no method to determine if a thread is still
899 * running in the patched code contained in the patch module once
900 * the ftrace registration is successful.
901 */
902 if (!try_module_get(patch->mod))
903 return -ENODEV;
904
905 ret = klp_init_patch(patch);
906 if (ret)
907 module_put(patch->mod);
908
909 return ret;
910 }
911 EXPORT_SYMBOL_GPL(klp_register_patch);
912
913 static int klp_module_notify_coming(struct klp_patch *patch,
914 struct klp_object *obj)
915 {
916 struct module *pmod = patch->mod;
917 struct module *mod = obj->mod;
918 int ret;
919
920 ret = klp_init_object_loaded(patch, obj);
921 if (ret) {
922 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
923 pmod->name, mod->name, ret);
924 return ret;
925 }
926
927 if (patch->state == KLP_DISABLED)
928 return 0;
929
930 pr_notice("applying patch '%s' to loading module '%s'\n",
931 pmod->name, mod->name);
932
933 ret = klp_enable_object(obj);
934 if (ret)
935 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
936 pmod->name, mod->name, ret);
937 return ret;
938 }
939
940 static void klp_module_notify_going(struct klp_patch *patch,
941 struct klp_object *obj)
942 {
943 struct module *pmod = patch->mod;
944 struct module *mod = obj->mod;
945
946 if (patch->state == KLP_DISABLED)
947 goto disabled;
948
949 pr_notice("reverting patch '%s' on unloading module '%s'\n",
950 pmod->name, mod->name);
951
952 klp_disable_object(obj);
953
954 disabled:
955 klp_free_object_loaded(obj);
956 }
957
958 static int klp_module_notify(struct notifier_block *nb, unsigned long action,
959 void *data)
960 {
961 int ret;
962 struct module *mod = data;
963 struct klp_patch *patch;
964 struct klp_object *obj;
965
966 if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
967 return 0;
968
969 mutex_lock(&klp_mutex);
970
971 /*
972 * Each module has to know that the notifier has been called.
973 * We never know what module will get patched by a new patch.
974 */
975 if (action == MODULE_STATE_COMING)
976 mod->klp_alive = true;
977 else /* MODULE_STATE_GOING */
978 mod->klp_alive = false;
979
980 list_for_each_entry(patch, &klp_patches, list) {
981 klp_for_each_object(patch, obj) {
982 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
983 continue;
984
985 if (action == MODULE_STATE_COMING) {
986 obj->mod = mod;
987 ret = klp_module_notify_coming(patch, obj);
988 if (ret) {
989 obj->mod = NULL;
990 pr_warn("patch '%s' is in an inconsistent state!\n",
991 patch->mod->name);
992 }
993 } else /* MODULE_STATE_GOING */
994 klp_module_notify_going(patch, obj);
995
996 break;
997 }
998 }
999
1000 mutex_unlock(&klp_mutex);
1001
1002 return 0;
1003 }
1004
1005 static struct notifier_block klp_module_nb = {
1006 .notifier_call = klp_module_notify,
1007 .priority = INT_MIN+1, /* called late but before ftrace notifier */
1008 };
1009
1010 static int __init klp_init(void)
1011 {
1012 int ret;
1013
1014 ret = klp_check_compiler_support();
1015 if (ret) {
1016 pr_info("Your compiler is too old; turning off.\n");
1017 return -EINVAL;
1018 }
1019
1020 ret = register_module_notifier(&klp_module_nb);
1021 if (ret)
1022 return ret;
1023
1024 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1025 if (!klp_root_kobj) {
1026 ret = -ENOMEM;
1027 goto unregister;
1028 }
1029
1030 return 0;
1031
1032 unregister:
1033 unregister_module_notifier(&klp_module_nb);
1034 return ret;
1035 }
1036
1037 module_init(klp_init);