]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/livepatch/core.c
livepatch: fix RCU usage in klp_find_external_symbol()
[mirror_ubuntu-artful-kernel.git] / kernel / livepatch / core.c
1 /*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
31
32 /**
33 * struct klp_ops - structure for tracking registered ftrace ops structs
34 *
35 * A single ftrace_ops is shared between all enabled replacement functions
36 * (klp_func structs) which have the same old_addr. This allows the switch
37 * between function versions to happen instantaneously by updating the klp_ops
38 * struct's func_stack list. The winner is the klp_func at the top of the
39 * func_stack (front of the list).
40 *
41 * @node: node for the global klp_ops list
42 * @func_stack: list head for the stack of klp_func's (active func is on top)
43 * @fops: registered ftrace ops struct
44 */
45 struct klp_ops {
46 struct list_head node;
47 struct list_head func_stack;
48 struct ftrace_ops fops;
49 };
50
51 /*
52 * The klp_mutex protects the global lists and state transitions of any
53 * structure reachable from them. References to any structure must be obtained
54 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55 * ensure it gets consistent data).
56 */
57 static DEFINE_MUTEX(klp_mutex);
58
59 static LIST_HEAD(klp_patches);
60 static LIST_HEAD(klp_ops);
61
62 static struct kobject *klp_root_kobj;
63
64 static struct klp_ops *klp_find_ops(unsigned long old_addr)
65 {
66 struct klp_ops *ops;
67 struct klp_func *func;
68
69 list_for_each_entry(ops, &klp_ops, node) {
70 func = list_first_entry(&ops->func_stack, struct klp_func,
71 stack_node);
72 if (func->old_addr == old_addr)
73 return ops;
74 }
75
76 return NULL;
77 }
78
79 static bool klp_is_module(struct klp_object *obj)
80 {
81 return obj->name;
82 }
83
84 static bool klp_is_object_loaded(struct klp_object *obj)
85 {
86 return !obj->name || obj->mod;
87 }
88
89 /* sets obj->mod if object is not vmlinux and module is found */
90 static void klp_find_object_module(struct klp_object *obj)
91 {
92 if (!klp_is_module(obj))
93 return;
94
95 mutex_lock(&module_mutex);
96 /*
97 * We don't need to take a reference on the module here because we have
98 * the klp_mutex, which is also taken by the module notifier. This
99 * prevents any module from unloading until we release the klp_mutex.
100 */
101 obj->mod = find_module(obj->name);
102 mutex_unlock(&module_mutex);
103 }
104
105 /* klp_mutex must be held by caller */
106 static bool klp_is_patch_registered(struct klp_patch *patch)
107 {
108 struct klp_patch *mypatch;
109
110 list_for_each_entry(mypatch, &klp_patches, list)
111 if (mypatch == patch)
112 return true;
113
114 return false;
115 }
116
117 static bool klp_initialized(void)
118 {
119 return klp_root_kobj;
120 }
121
122 struct klp_find_arg {
123 const char *objname;
124 const char *name;
125 unsigned long addr;
126 /*
127 * If count == 0, the symbol was not found. If count == 1, a unique
128 * match was found and addr is set. If count > 1, there is
129 * unresolvable ambiguity among "count" number of symbols with the same
130 * name in the same object.
131 */
132 unsigned long count;
133 };
134
135 static int klp_find_callback(void *data, const char *name,
136 struct module *mod, unsigned long addr)
137 {
138 struct klp_find_arg *args = data;
139
140 if ((mod && !args->objname) || (!mod && args->objname))
141 return 0;
142
143 if (strcmp(args->name, name))
144 return 0;
145
146 if (args->objname && strcmp(args->objname, mod->name))
147 return 0;
148
149 /*
150 * args->addr might be overwritten if another match is found
151 * but klp_find_object_symbol() handles this and only returns the
152 * addr if count == 1.
153 */
154 args->addr = addr;
155 args->count++;
156
157 return 0;
158 }
159
160 static int klp_find_object_symbol(const char *objname, const char *name,
161 unsigned long *addr)
162 {
163 struct klp_find_arg args = {
164 .objname = objname,
165 .name = name,
166 .addr = 0,
167 .count = 0
168 };
169
170 kallsyms_on_each_symbol(klp_find_callback, &args);
171
172 if (args.count == 0)
173 pr_err("symbol '%s' not found in symbol table\n", name);
174 else if (args.count > 1)
175 pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
176 args.count, name, objname);
177 else {
178 *addr = args.addr;
179 return 0;
180 }
181
182 *addr = 0;
183 return -EINVAL;
184 }
185
186 struct klp_verify_args {
187 const char *name;
188 const unsigned long addr;
189 };
190
191 static int klp_verify_callback(void *data, const char *name,
192 struct module *mod, unsigned long addr)
193 {
194 struct klp_verify_args *args = data;
195
196 if (!mod &&
197 !strcmp(args->name, name) &&
198 args->addr == addr)
199 return 1;
200
201 return 0;
202 }
203
204 static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
205 {
206 struct klp_verify_args args = {
207 .name = name,
208 .addr = addr,
209 };
210
211 if (kallsyms_on_each_symbol(klp_verify_callback, &args))
212 return 0;
213
214 pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
215 name, addr);
216 return -EINVAL;
217 }
218
219 static int klp_find_verify_func_addr(struct klp_object *obj,
220 struct klp_func *func)
221 {
222 int ret;
223
224 #if defined(CONFIG_RANDOMIZE_BASE)
225 /* KASLR is enabled, disregard old_addr from user */
226 func->old_addr = 0;
227 #endif
228
229 if (!func->old_addr || klp_is_module(obj))
230 ret = klp_find_object_symbol(obj->name, func->old_name,
231 &func->old_addr);
232 else
233 ret = klp_verify_vmlinux_symbol(func->old_name,
234 func->old_addr);
235
236 return ret;
237 }
238
239 /*
240 * external symbols are located outside the parent object (where the parent
241 * object is either vmlinux or the kmod being patched).
242 */
243 static int klp_find_external_symbol(struct module *pmod, const char *name,
244 unsigned long *addr)
245 {
246 const struct kernel_symbol *sym;
247
248 /* first, check if it's an exported symbol */
249 preempt_disable();
250 sym = find_symbol(name, NULL, NULL, true, true);
251 if (sym) {
252 *addr = sym->value;
253 preempt_enable();
254 return 0;
255 }
256 preempt_enable();
257
258 /* otherwise check if it's in another .o within the patch module */
259 return klp_find_object_symbol(pmod->name, name, addr);
260 }
261
262 static int klp_write_object_relocations(struct module *pmod,
263 struct klp_object *obj)
264 {
265 int ret;
266 struct klp_reloc *reloc;
267
268 if (WARN_ON(!klp_is_object_loaded(obj)))
269 return -EINVAL;
270
271 if (WARN_ON(!obj->relocs))
272 return -EINVAL;
273
274 for (reloc = obj->relocs; reloc->name; reloc++) {
275 if (!klp_is_module(obj)) {
276 ret = klp_verify_vmlinux_symbol(reloc->name,
277 reloc->val);
278 if (ret)
279 return ret;
280 } else {
281 /* module, reloc->val needs to be discovered */
282 if (reloc->external)
283 ret = klp_find_external_symbol(pmod,
284 reloc->name,
285 &reloc->val);
286 else
287 ret = klp_find_object_symbol(obj->mod->name,
288 reloc->name,
289 &reloc->val);
290 if (ret)
291 return ret;
292 }
293 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
294 reloc->val + reloc->addend);
295 if (ret) {
296 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
297 reloc->name, reloc->val, ret);
298 return ret;
299 }
300 }
301
302 return 0;
303 }
304
305 static void notrace klp_ftrace_handler(unsigned long ip,
306 unsigned long parent_ip,
307 struct ftrace_ops *fops,
308 struct pt_regs *regs)
309 {
310 struct klp_ops *ops;
311 struct klp_func *func;
312
313 ops = container_of(fops, struct klp_ops, fops);
314
315 rcu_read_lock();
316 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
317 stack_node);
318 if (WARN_ON_ONCE(!func))
319 goto unlock;
320
321 klp_arch_set_pc(regs, (unsigned long)func->new_func);
322 unlock:
323 rcu_read_unlock();
324 }
325
326 static int klp_disable_func(struct klp_func *func)
327 {
328 struct klp_ops *ops;
329 int ret;
330
331 if (WARN_ON(func->state != KLP_ENABLED))
332 return -EINVAL;
333
334 if (WARN_ON(!func->old_addr))
335 return -EINVAL;
336
337 ops = klp_find_ops(func->old_addr);
338 if (WARN_ON(!ops))
339 return -EINVAL;
340
341 if (list_is_singular(&ops->func_stack)) {
342 ret = unregister_ftrace_function(&ops->fops);
343 if (ret) {
344 pr_err("failed to unregister ftrace handler for function '%s' (%d)\n",
345 func->old_name, ret);
346 return ret;
347 }
348
349 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
350 if (ret)
351 pr_warn("function unregister succeeded but failed to clear the filter\n");
352
353 list_del_rcu(&func->stack_node);
354 list_del(&ops->node);
355 kfree(ops);
356 } else {
357 list_del_rcu(&func->stack_node);
358 }
359
360 func->state = KLP_DISABLED;
361
362 return 0;
363 }
364
365 static int klp_enable_func(struct klp_func *func)
366 {
367 struct klp_ops *ops;
368 int ret;
369
370 if (WARN_ON(!func->old_addr))
371 return -EINVAL;
372
373 if (WARN_ON(func->state != KLP_DISABLED))
374 return -EINVAL;
375
376 ops = klp_find_ops(func->old_addr);
377 if (!ops) {
378 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
379 if (!ops)
380 return -ENOMEM;
381
382 ops->fops.func = klp_ftrace_handler;
383 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
384 FTRACE_OPS_FL_DYNAMIC |
385 FTRACE_OPS_FL_IPMODIFY;
386
387 list_add(&ops->node, &klp_ops);
388
389 INIT_LIST_HEAD(&ops->func_stack);
390 list_add_rcu(&func->stack_node, &ops->func_stack);
391
392 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
393 if (ret) {
394 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
395 func->old_name, ret);
396 goto err;
397 }
398
399 ret = register_ftrace_function(&ops->fops);
400 if (ret) {
401 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
402 func->old_name, ret);
403 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
404 goto err;
405 }
406
407
408 } else {
409 list_add_rcu(&func->stack_node, &ops->func_stack);
410 }
411
412 func->state = KLP_ENABLED;
413
414 return 0;
415
416 err:
417 list_del_rcu(&func->stack_node);
418 list_del(&ops->node);
419 kfree(ops);
420 return ret;
421 }
422
423 static int klp_disable_object(struct klp_object *obj)
424 {
425 struct klp_func *func;
426 int ret;
427
428 for (func = obj->funcs; func->old_name; func++) {
429 if (func->state != KLP_ENABLED)
430 continue;
431
432 ret = klp_disable_func(func);
433 if (ret)
434 return ret;
435 }
436
437 obj->state = KLP_DISABLED;
438
439 return 0;
440 }
441
442 static int klp_enable_object(struct klp_object *obj)
443 {
444 struct klp_func *func;
445 int ret;
446
447 if (WARN_ON(obj->state != KLP_DISABLED))
448 return -EINVAL;
449
450 if (WARN_ON(!klp_is_object_loaded(obj)))
451 return -EINVAL;
452
453 for (func = obj->funcs; func->old_name; func++) {
454 ret = klp_enable_func(func);
455 if (ret)
456 goto unregister;
457 }
458 obj->state = KLP_ENABLED;
459
460 return 0;
461
462 unregister:
463 WARN_ON(klp_disable_object(obj));
464 return ret;
465 }
466
467 static int __klp_disable_patch(struct klp_patch *patch)
468 {
469 struct klp_object *obj;
470 int ret;
471
472 /* enforce stacking: only the last enabled patch can be disabled */
473 if (!list_is_last(&patch->list, &klp_patches) &&
474 list_next_entry(patch, list)->state == KLP_ENABLED)
475 return -EBUSY;
476
477 pr_notice("disabling patch '%s'\n", patch->mod->name);
478
479 for (obj = patch->objs; obj->funcs; obj++) {
480 if (obj->state != KLP_ENABLED)
481 continue;
482
483 ret = klp_disable_object(obj);
484 if (ret)
485 return ret;
486 }
487
488 patch->state = KLP_DISABLED;
489
490 return 0;
491 }
492
493 /**
494 * klp_disable_patch() - disables a registered patch
495 * @patch: The registered, enabled patch to be disabled
496 *
497 * Unregisters the patched functions from ftrace.
498 *
499 * Return: 0 on success, otherwise error
500 */
501 int klp_disable_patch(struct klp_patch *patch)
502 {
503 int ret;
504
505 mutex_lock(&klp_mutex);
506
507 if (!klp_is_patch_registered(patch)) {
508 ret = -EINVAL;
509 goto err;
510 }
511
512 if (patch->state == KLP_DISABLED) {
513 ret = -EINVAL;
514 goto err;
515 }
516
517 ret = __klp_disable_patch(patch);
518
519 err:
520 mutex_unlock(&klp_mutex);
521 return ret;
522 }
523 EXPORT_SYMBOL_GPL(klp_disable_patch);
524
525 static int __klp_enable_patch(struct klp_patch *patch)
526 {
527 struct klp_object *obj;
528 int ret;
529
530 if (WARN_ON(patch->state != KLP_DISABLED))
531 return -EINVAL;
532
533 /* enforce stacking: only the first disabled patch can be enabled */
534 if (patch->list.prev != &klp_patches &&
535 list_prev_entry(patch, list)->state == KLP_DISABLED)
536 return -EBUSY;
537
538 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
539 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
540
541 pr_notice("enabling patch '%s'\n", patch->mod->name);
542
543 for (obj = patch->objs; obj->funcs; obj++) {
544 klp_find_object_module(obj);
545
546 if (!klp_is_object_loaded(obj))
547 continue;
548
549 ret = klp_enable_object(obj);
550 if (ret)
551 goto unregister;
552 }
553
554 patch->state = KLP_ENABLED;
555
556 return 0;
557
558 unregister:
559 WARN_ON(__klp_disable_patch(patch));
560 return ret;
561 }
562
563 /**
564 * klp_enable_patch() - enables a registered patch
565 * @patch: The registered, disabled patch to be enabled
566 *
567 * Performs the needed symbol lookups and code relocations,
568 * then registers the patched functions with ftrace.
569 *
570 * Return: 0 on success, otherwise error
571 */
572 int klp_enable_patch(struct klp_patch *patch)
573 {
574 int ret;
575
576 mutex_lock(&klp_mutex);
577
578 if (!klp_is_patch_registered(patch)) {
579 ret = -EINVAL;
580 goto err;
581 }
582
583 ret = __klp_enable_patch(patch);
584
585 err:
586 mutex_unlock(&klp_mutex);
587 return ret;
588 }
589 EXPORT_SYMBOL_GPL(klp_enable_patch);
590
591 /*
592 * Sysfs Interface
593 *
594 * /sys/kernel/livepatch
595 * /sys/kernel/livepatch/<patch>
596 * /sys/kernel/livepatch/<patch>/enabled
597 * /sys/kernel/livepatch/<patch>/<object>
598 * /sys/kernel/livepatch/<patch>/<object>/<func>
599 */
600
601 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
602 const char *buf, size_t count)
603 {
604 struct klp_patch *patch;
605 int ret;
606 unsigned long val;
607
608 ret = kstrtoul(buf, 10, &val);
609 if (ret)
610 return -EINVAL;
611
612 if (val != KLP_DISABLED && val != KLP_ENABLED)
613 return -EINVAL;
614
615 patch = container_of(kobj, struct klp_patch, kobj);
616
617 mutex_lock(&klp_mutex);
618
619 if (val == patch->state) {
620 /* already in requested state */
621 ret = -EINVAL;
622 goto err;
623 }
624
625 if (val == KLP_ENABLED) {
626 ret = __klp_enable_patch(patch);
627 if (ret)
628 goto err;
629 } else {
630 ret = __klp_disable_patch(patch);
631 if (ret)
632 goto err;
633 }
634
635 mutex_unlock(&klp_mutex);
636
637 return count;
638
639 err:
640 mutex_unlock(&klp_mutex);
641 return ret;
642 }
643
644 static ssize_t enabled_show(struct kobject *kobj,
645 struct kobj_attribute *attr, char *buf)
646 {
647 struct klp_patch *patch;
648
649 patch = container_of(kobj, struct klp_patch, kobj);
650 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
651 }
652
653 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
654 static struct attribute *klp_patch_attrs[] = {
655 &enabled_kobj_attr.attr,
656 NULL
657 };
658
659 static void klp_kobj_release_patch(struct kobject *kobj)
660 {
661 /*
662 * Once we have a consistency model we'll need to module_put() the
663 * patch module here. See klp_register_patch() for more details.
664 */
665 }
666
667 static struct kobj_type klp_ktype_patch = {
668 .release = klp_kobj_release_patch,
669 .sysfs_ops = &kobj_sysfs_ops,
670 .default_attrs = klp_patch_attrs,
671 };
672
673 static void klp_kobj_release_func(struct kobject *kobj)
674 {
675 }
676
677 static struct kobj_type klp_ktype_func = {
678 .release = klp_kobj_release_func,
679 .sysfs_ops = &kobj_sysfs_ops,
680 };
681
682 /*
683 * Free all functions' kobjects in the array up to some limit. When limit is
684 * NULL, all kobjects are freed.
685 */
686 static void klp_free_funcs_limited(struct klp_object *obj,
687 struct klp_func *limit)
688 {
689 struct klp_func *func;
690
691 for (func = obj->funcs; func->old_name && func != limit; func++)
692 kobject_put(&func->kobj);
693 }
694
695 /* Clean up when a patched object is unloaded */
696 static void klp_free_object_loaded(struct klp_object *obj)
697 {
698 struct klp_func *func;
699
700 obj->mod = NULL;
701
702 for (func = obj->funcs; func->old_name; func++)
703 func->old_addr = 0;
704 }
705
706 /*
707 * Free all objects' kobjects in the array up to some limit. When limit is
708 * NULL, all kobjects are freed.
709 */
710 static void klp_free_objects_limited(struct klp_patch *patch,
711 struct klp_object *limit)
712 {
713 struct klp_object *obj;
714
715 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
716 klp_free_funcs_limited(obj, NULL);
717 kobject_put(obj->kobj);
718 }
719 }
720
721 static void klp_free_patch(struct klp_patch *patch)
722 {
723 klp_free_objects_limited(patch, NULL);
724 if (!list_empty(&patch->list))
725 list_del(&patch->list);
726 kobject_put(&patch->kobj);
727 }
728
729 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
730 {
731 INIT_LIST_HEAD(&func->stack_node);
732 func->state = KLP_DISABLED;
733
734 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
735 obj->kobj, "%s", func->old_name);
736 }
737
738 /* parts of the initialization that is done only when the object is loaded */
739 static int klp_init_object_loaded(struct klp_patch *patch,
740 struct klp_object *obj)
741 {
742 struct klp_func *func;
743 int ret;
744
745 if (obj->relocs) {
746 ret = klp_write_object_relocations(patch->mod, obj);
747 if (ret)
748 return ret;
749 }
750
751 for (func = obj->funcs; func->old_name; func++) {
752 ret = klp_find_verify_func_addr(obj, func);
753 if (ret)
754 return ret;
755 }
756
757 return 0;
758 }
759
760 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
761 {
762 struct klp_func *func;
763 int ret;
764 const char *name;
765
766 if (!obj->funcs)
767 return -EINVAL;
768
769 obj->state = KLP_DISABLED;
770
771 klp_find_object_module(obj);
772
773 name = klp_is_module(obj) ? obj->name : "vmlinux";
774 obj->kobj = kobject_create_and_add(name, &patch->kobj);
775 if (!obj->kobj)
776 return -ENOMEM;
777
778 for (func = obj->funcs; func->old_name; func++) {
779 ret = klp_init_func(obj, func);
780 if (ret)
781 goto free;
782 }
783
784 if (klp_is_object_loaded(obj)) {
785 ret = klp_init_object_loaded(patch, obj);
786 if (ret)
787 goto free;
788 }
789
790 return 0;
791
792 free:
793 klp_free_funcs_limited(obj, func);
794 kobject_put(obj->kobj);
795 return ret;
796 }
797
798 static int klp_init_patch(struct klp_patch *patch)
799 {
800 struct klp_object *obj;
801 int ret;
802
803 if (!patch->objs)
804 return -EINVAL;
805
806 mutex_lock(&klp_mutex);
807
808 patch->state = KLP_DISABLED;
809
810 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
811 klp_root_kobj, "%s", patch->mod->name);
812 if (ret)
813 goto unlock;
814
815 for (obj = patch->objs; obj->funcs; obj++) {
816 ret = klp_init_object(patch, obj);
817 if (ret)
818 goto free;
819 }
820
821 list_add_tail(&patch->list, &klp_patches);
822
823 mutex_unlock(&klp_mutex);
824
825 return 0;
826
827 free:
828 klp_free_objects_limited(patch, obj);
829 kobject_put(&patch->kobj);
830 unlock:
831 mutex_unlock(&klp_mutex);
832 return ret;
833 }
834
835 /**
836 * klp_unregister_patch() - unregisters a patch
837 * @patch: Disabled patch to be unregistered
838 *
839 * Frees the data structures and removes the sysfs interface.
840 *
841 * Return: 0 on success, otherwise error
842 */
843 int klp_unregister_patch(struct klp_patch *patch)
844 {
845 int ret = 0;
846
847 mutex_lock(&klp_mutex);
848
849 if (!klp_is_patch_registered(patch)) {
850 ret = -EINVAL;
851 goto out;
852 }
853
854 if (patch->state == KLP_ENABLED) {
855 ret = -EBUSY;
856 goto out;
857 }
858
859 klp_free_patch(patch);
860
861 out:
862 mutex_unlock(&klp_mutex);
863 return ret;
864 }
865 EXPORT_SYMBOL_GPL(klp_unregister_patch);
866
867 /**
868 * klp_register_patch() - registers a patch
869 * @patch: Patch to be registered
870 *
871 * Initializes the data structure associated with the patch and
872 * creates the sysfs interface.
873 *
874 * Return: 0 on success, otherwise error
875 */
876 int klp_register_patch(struct klp_patch *patch)
877 {
878 int ret;
879
880 if (!klp_initialized())
881 return -ENODEV;
882
883 if (!patch || !patch->mod)
884 return -EINVAL;
885
886 /*
887 * A reference is taken on the patch module to prevent it from being
888 * unloaded. Right now, we don't allow patch modules to unload since
889 * there is currently no method to determine if a thread is still
890 * running in the patched code contained in the patch module once
891 * the ftrace registration is successful.
892 */
893 if (!try_module_get(patch->mod))
894 return -ENODEV;
895
896 ret = klp_init_patch(patch);
897 if (ret)
898 module_put(patch->mod);
899
900 return ret;
901 }
902 EXPORT_SYMBOL_GPL(klp_register_patch);
903
904 static void klp_module_notify_coming(struct klp_patch *patch,
905 struct klp_object *obj)
906 {
907 struct module *pmod = patch->mod;
908 struct module *mod = obj->mod;
909 int ret;
910
911 ret = klp_init_object_loaded(patch, obj);
912 if (ret)
913 goto err;
914
915 if (patch->state == KLP_DISABLED)
916 return;
917
918 pr_notice("applying patch '%s' to loading module '%s'\n",
919 pmod->name, mod->name);
920
921 ret = klp_enable_object(obj);
922 if (!ret)
923 return;
924
925 err:
926 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
927 pmod->name, mod->name, ret);
928 }
929
930 static void klp_module_notify_going(struct klp_patch *patch,
931 struct klp_object *obj)
932 {
933 struct module *pmod = patch->mod;
934 struct module *mod = obj->mod;
935 int ret;
936
937 if (patch->state == KLP_DISABLED)
938 goto disabled;
939
940 pr_notice("reverting patch '%s' on unloading module '%s'\n",
941 pmod->name, mod->name);
942
943 ret = klp_disable_object(obj);
944 if (ret)
945 pr_warn("failed to revert patch '%s' on module '%s' (%d)\n",
946 pmod->name, mod->name, ret);
947
948 disabled:
949 klp_free_object_loaded(obj);
950 }
951
952 static int klp_module_notify(struct notifier_block *nb, unsigned long action,
953 void *data)
954 {
955 struct module *mod = data;
956 struct klp_patch *patch;
957 struct klp_object *obj;
958
959 if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
960 return 0;
961
962 mutex_lock(&klp_mutex);
963
964 list_for_each_entry(patch, &klp_patches, list) {
965 for (obj = patch->objs; obj->funcs; obj++) {
966 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
967 continue;
968
969 if (action == MODULE_STATE_COMING) {
970 obj->mod = mod;
971 klp_module_notify_coming(patch, obj);
972 } else /* MODULE_STATE_GOING */
973 klp_module_notify_going(patch, obj);
974
975 break;
976 }
977 }
978
979 mutex_unlock(&klp_mutex);
980
981 return 0;
982 }
983
984 static struct notifier_block klp_module_nb = {
985 .notifier_call = klp_module_notify,
986 .priority = INT_MIN+1, /* called late but before ftrace notifier */
987 };
988
989 static int klp_init(void)
990 {
991 int ret;
992
993 ret = klp_check_compiler_support();
994 if (ret) {
995 pr_info("Your compiler is too old; turning off.\n");
996 return -EINVAL;
997 }
998
999 ret = register_module_notifier(&klp_module_nb);
1000 if (ret)
1001 return ret;
1002
1003 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1004 if (!klp_root_kobj) {
1005 ret = -ENOMEM;
1006 goto unregister;
1007 }
1008
1009 return 0;
1010
1011 unregister:
1012 unregister_module_notifier(&klp_module_nb);
1013 return ret;
1014 }
1015
1016 module_init(klp_init);