]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/livepatch/core.c
livepatch: reuse module loader code to write relocations
[mirror_ubuntu-artful-kernel.git] / kernel / livepatch / core.c
1 /*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
31 #include <linux/elf.h>
32 #include <linux/moduleloader.h>
33 #include <asm/cacheflush.h>
34
35 /**
36 * struct klp_ops - structure for tracking registered ftrace ops structs
37 *
38 * A single ftrace_ops is shared between all enabled replacement functions
39 * (klp_func structs) which have the same old_addr. This allows the switch
40 * between function versions to happen instantaneously by updating the klp_ops
41 * struct's func_stack list. The winner is the klp_func at the top of the
42 * func_stack (front of the list).
43 *
44 * @node: node for the global klp_ops list
45 * @func_stack: list head for the stack of klp_func's (active func is on top)
46 * @fops: registered ftrace ops struct
47 */
48 struct klp_ops {
49 struct list_head node;
50 struct list_head func_stack;
51 struct ftrace_ops fops;
52 };
53
54 /*
55 * The klp_mutex protects the global lists and state transitions of any
56 * structure reachable from them. References to any structure must be obtained
57 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
58 * ensure it gets consistent data).
59 */
60 static DEFINE_MUTEX(klp_mutex);
61
62 static LIST_HEAD(klp_patches);
63 static LIST_HEAD(klp_ops);
64
65 static struct kobject *klp_root_kobj;
66
67 static struct klp_ops *klp_find_ops(unsigned long old_addr)
68 {
69 struct klp_ops *ops;
70 struct klp_func *func;
71
72 list_for_each_entry(ops, &klp_ops, node) {
73 func = list_first_entry(&ops->func_stack, struct klp_func,
74 stack_node);
75 if (func->old_addr == old_addr)
76 return ops;
77 }
78
79 return NULL;
80 }
81
82 static bool klp_is_module(struct klp_object *obj)
83 {
84 return obj->name;
85 }
86
87 static bool klp_is_object_loaded(struct klp_object *obj)
88 {
89 return !obj->name || obj->mod;
90 }
91
92 /* sets obj->mod if object is not vmlinux and module is found */
93 static void klp_find_object_module(struct klp_object *obj)
94 {
95 struct module *mod;
96
97 if (!klp_is_module(obj))
98 return;
99
100 mutex_lock(&module_mutex);
101 /*
102 * We do not want to block removal of patched modules and therefore
103 * we do not take a reference here. The patches are removed by
104 * klp_module_going() instead.
105 */
106 mod = find_module(obj->name);
107 /*
108 * Do not mess work of klp_module_coming() and klp_module_going().
109 * Note that the patch might still be needed before klp_module_going()
110 * is called. Module functions can be called even in the GOING state
111 * until mod->exit() finishes. This is especially important for
112 * patches that modify semantic of the functions.
113 */
114 if (mod && mod->klp_alive)
115 obj->mod = mod;
116
117 mutex_unlock(&module_mutex);
118 }
119
120 /* klp_mutex must be held by caller */
121 static bool klp_is_patch_registered(struct klp_patch *patch)
122 {
123 struct klp_patch *mypatch;
124
125 list_for_each_entry(mypatch, &klp_patches, list)
126 if (mypatch == patch)
127 return true;
128
129 return false;
130 }
131
132 static bool klp_initialized(void)
133 {
134 return !!klp_root_kobj;
135 }
136
137 struct klp_find_arg {
138 const char *objname;
139 const char *name;
140 unsigned long addr;
141 unsigned long count;
142 unsigned long pos;
143 };
144
145 static int klp_find_callback(void *data, const char *name,
146 struct module *mod, unsigned long addr)
147 {
148 struct klp_find_arg *args = data;
149
150 if ((mod && !args->objname) || (!mod && args->objname))
151 return 0;
152
153 if (strcmp(args->name, name))
154 return 0;
155
156 if (args->objname && strcmp(args->objname, mod->name))
157 return 0;
158
159 args->addr = addr;
160 args->count++;
161
162 /*
163 * Finish the search when the symbol is found for the desired position
164 * or the position is not defined for a non-unique symbol.
165 */
166 if ((args->pos && (args->count == args->pos)) ||
167 (!args->pos && (args->count > 1)))
168 return 1;
169
170 return 0;
171 }
172
173 static int klp_find_object_symbol(const char *objname, const char *name,
174 unsigned long sympos, unsigned long *addr)
175 {
176 struct klp_find_arg args = {
177 .objname = objname,
178 .name = name,
179 .addr = 0,
180 .count = 0,
181 .pos = sympos,
182 };
183
184 mutex_lock(&module_mutex);
185 kallsyms_on_each_symbol(klp_find_callback, &args);
186 mutex_unlock(&module_mutex);
187
188 /*
189 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
190 * otherwise ensure the symbol position count matches sympos.
191 */
192 if (args.addr == 0)
193 pr_err("symbol '%s' not found in symbol table\n", name);
194 else if (args.count > 1 && sympos == 0) {
195 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
196 name, objname);
197 } else if (sympos != args.count && sympos > 0) {
198 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
199 sympos, name, objname ? objname : "vmlinux");
200 } else {
201 *addr = args.addr;
202 return 0;
203 }
204
205 *addr = 0;
206 return -EINVAL;
207 }
208
209 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
210 {
211 int i, cnt, vmlinux, ret;
212 char objname[MODULE_NAME_LEN];
213 char symname[KSYM_NAME_LEN];
214 char *strtab = pmod->core_kallsyms.strtab;
215 Elf_Rela *relas;
216 Elf_Sym *sym;
217 unsigned long sympos, addr;
218
219 /*
220 * Since the field widths for objname and symname in the sscanf()
221 * call are hard-coded and correspond to MODULE_NAME_LEN and
222 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
223 * and KSYM_NAME_LEN have the values we expect them to have.
224 *
225 * Because the value of MODULE_NAME_LEN can differ among architectures,
226 * we use the smallest/strictest upper bound possible (56, based on
227 * the current definition of MODULE_NAME_LEN) to prevent overflows.
228 */
229 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
230
231 relas = (Elf_Rela *) relasec->sh_addr;
232 /* For each rela in this klp relocation section */
233 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
234 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
235 if (sym->st_shndx != SHN_LIVEPATCH) {
236 pr_err("symbol %s is not marked as a livepatch symbol",
237 strtab + sym->st_name);
238 return -EINVAL;
239 }
240
241 /* Format: .klp.sym.objname.symname,sympos */
242 cnt = sscanf(strtab + sym->st_name,
243 ".klp.sym.%55[^.].%127[^,],%lu",
244 objname, symname, &sympos);
245 if (cnt != 3) {
246 pr_err("symbol %s has an incorrectly formatted name",
247 strtab + sym->st_name);
248 return -EINVAL;
249 }
250
251 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
252 vmlinux = !strcmp(objname, "vmlinux");
253 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
254 symname, sympos, &addr);
255 if (ret)
256 return ret;
257
258 sym->st_value = addr;
259 }
260
261 return 0;
262 }
263
264 static int klp_write_object_relocations(struct module *pmod,
265 struct klp_object *obj)
266 {
267 int i, cnt, ret = 0;
268 const char *objname, *secname;
269 char sec_objname[MODULE_NAME_LEN];
270 Elf_Shdr *sec;
271
272 if (WARN_ON(!klp_is_object_loaded(obj)))
273 return -EINVAL;
274
275 objname = klp_is_module(obj) ? obj->name : "vmlinux";
276
277 module_disable_ro(pmod);
278 /* For each klp relocation section */
279 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
280 sec = pmod->klp_info->sechdrs + i;
281 secname = pmod->klp_info->secstrings + sec->sh_name;
282 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
283 continue;
284
285 /*
286 * Format: .klp.rela.sec_objname.section_name
287 * See comment in klp_resolve_symbols() for an explanation
288 * of the selected field width value.
289 */
290 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
291 if (cnt != 1) {
292 pr_err("section %s has an incorrectly formatted name",
293 secname);
294 ret = -EINVAL;
295 break;
296 }
297
298 if (strcmp(objname, sec_objname))
299 continue;
300
301 ret = klp_resolve_symbols(sec, pmod);
302 if (ret)
303 break;
304
305 ret = apply_relocate_add(pmod->klp_info->sechdrs,
306 pmod->core_kallsyms.strtab,
307 pmod->klp_info->symndx, i, pmod);
308 if (ret)
309 break;
310 }
311
312 module_enable_ro(pmod);
313 return ret;
314 }
315
316 static void notrace klp_ftrace_handler(unsigned long ip,
317 unsigned long parent_ip,
318 struct ftrace_ops *fops,
319 struct pt_regs *regs)
320 {
321 struct klp_ops *ops;
322 struct klp_func *func;
323
324 ops = container_of(fops, struct klp_ops, fops);
325
326 rcu_read_lock();
327 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
328 stack_node);
329 if (WARN_ON_ONCE(!func))
330 goto unlock;
331
332 klp_arch_set_pc(regs, (unsigned long)func->new_func);
333 unlock:
334 rcu_read_unlock();
335 }
336
337 static void klp_disable_func(struct klp_func *func)
338 {
339 struct klp_ops *ops;
340
341 if (WARN_ON(func->state != KLP_ENABLED))
342 return;
343 if (WARN_ON(!func->old_addr))
344 return;
345
346 ops = klp_find_ops(func->old_addr);
347 if (WARN_ON(!ops))
348 return;
349
350 if (list_is_singular(&ops->func_stack)) {
351 WARN_ON(unregister_ftrace_function(&ops->fops));
352 WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
353
354 list_del_rcu(&func->stack_node);
355 list_del(&ops->node);
356 kfree(ops);
357 } else {
358 list_del_rcu(&func->stack_node);
359 }
360
361 func->state = KLP_DISABLED;
362 }
363
364 static int klp_enable_func(struct klp_func *func)
365 {
366 struct klp_ops *ops;
367 int ret;
368
369 if (WARN_ON(!func->old_addr))
370 return -EINVAL;
371
372 if (WARN_ON(func->state != KLP_DISABLED))
373 return -EINVAL;
374
375 ops = klp_find_ops(func->old_addr);
376 if (!ops) {
377 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
378 if (!ops)
379 return -ENOMEM;
380
381 ops->fops.func = klp_ftrace_handler;
382 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
383 FTRACE_OPS_FL_DYNAMIC |
384 FTRACE_OPS_FL_IPMODIFY;
385
386 list_add(&ops->node, &klp_ops);
387
388 INIT_LIST_HEAD(&ops->func_stack);
389 list_add_rcu(&func->stack_node, &ops->func_stack);
390
391 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
392 if (ret) {
393 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
394 func->old_name, ret);
395 goto err;
396 }
397
398 ret = register_ftrace_function(&ops->fops);
399 if (ret) {
400 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
401 func->old_name, ret);
402 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
403 goto err;
404 }
405
406
407 } else {
408 list_add_rcu(&func->stack_node, &ops->func_stack);
409 }
410
411 func->state = KLP_ENABLED;
412
413 return 0;
414
415 err:
416 list_del_rcu(&func->stack_node);
417 list_del(&ops->node);
418 kfree(ops);
419 return ret;
420 }
421
422 static void klp_disable_object(struct klp_object *obj)
423 {
424 struct klp_func *func;
425
426 klp_for_each_func(obj, func)
427 if (func->state == KLP_ENABLED)
428 klp_disable_func(func);
429
430 obj->state = KLP_DISABLED;
431 }
432
433 static int klp_enable_object(struct klp_object *obj)
434 {
435 struct klp_func *func;
436 int ret;
437
438 if (WARN_ON(obj->state != KLP_DISABLED))
439 return -EINVAL;
440
441 if (WARN_ON(!klp_is_object_loaded(obj)))
442 return -EINVAL;
443
444 klp_for_each_func(obj, func) {
445 ret = klp_enable_func(func);
446 if (ret) {
447 klp_disable_object(obj);
448 return ret;
449 }
450 }
451 obj->state = KLP_ENABLED;
452
453 return 0;
454 }
455
456 static int __klp_disable_patch(struct klp_patch *patch)
457 {
458 struct klp_object *obj;
459
460 /* enforce stacking: only the last enabled patch can be disabled */
461 if (!list_is_last(&patch->list, &klp_patches) &&
462 list_next_entry(patch, list)->state == KLP_ENABLED)
463 return -EBUSY;
464
465 pr_notice("disabling patch '%s'\n", patch->mod->name);
466
467 klp_for_each_object(patch, obj) {
468 if (obj->state == KLP_ENABLED)
469 klp_disable_object(obj);
470 }
471
472 patch->state = KLP_DISABLED;
473
474 return 0;
475 }
476
477 /**
478 * klp_disable_patch() - disables a registered patch
479 * @patch: The registered, enabled patch to be disabled
480 *
481 * Unregisters the patched functions from ftrace.
482 *
483 * Return: 0 on success, otherwise error
484 */
485 int klp_disable_patch(struct klp_patch *patch)
486 {
487 int ret;
488
489 mutex_lock(&klp_mutex);
490
491 if (!klp_is_patch_registered(patch)) {
492 ret = -EINVAL;
493 goto err;
494 }
495
496 if (patch->state == KLP_DISABLED) {
497 ret = -EINVAL;
498 goto err;
499 }
500
501 ret = __klp_disable_patch(patch);
502
503 err:
504 mutex_unlock(&klp_mutex);
505 return ret;
506 }
507 EXPORT_SYMBOL_GPL(klp_disable_patch);
508
509 static int __klp_enable_patch(struct klp_patch *patch)
510 {
511 struct klp_object *obj;
512 int ret;
513
514 if (WARN_ON(patch->state != KLP_DISABLED))
515 return -EINVAL;
516
517 /* enforce stacking: only the first disabled patch can be enabled */
518 if (patch->list.prev != &klp_patches &&
519 list_prev_entry(patch, list)->state == KLP_DISABLED)
520 return -EBUSY;
521
522 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
523 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
524
525 pr_notice("enabling patch '%s'\n", patch->mod->name);
526
527 klp_for_each_object(patch, obj) {
528 if (!klp_is_object_loaded(obj))
529 continue;
530
531 ret = klp_enable_object(obj);
532 if (ret)
533 goto unregister;
534 }
535
536 patch->state = KLP_ENABLED;
537
538 return 0;
539
540 unregister:
541 WARN_ON(__klp_disable_patch(patch));
542 return ret;
543 }
544
545 /**
546 * klp_enable_patch() - enables a registered patch
547 * @patch: The registered, disabled patch to be enabled
548 *
549 * Performs the needed symbol lookups and code relocations,
550 * then registers the patched functions with ftrace.
551 *
552 * Return: 0 on success, otherwise error
553 */
554 int klp_enable_patch(struct klp_patch *patch)
555 {
556 int ret;
557
558 mutex_lock(&klp_mutex);
559
560 if (!klp_is_patch_registered(patch)) {
561 ret = -EINVAL;
562 goto err;
563 }
564
565 ret = __klp_enable_patch(patch);
566
567 err:
568 mutex_unlock(&klp_mutex);
569 return ret;
570 }
571 EXPORT_SYMBOL_GPL(klp_enable_patch);
572
573 /*
574 * Sysfs Interface
575 *
576 * /sys/kernel/livepatch
577 * /sys/kernel/livepatch/<patch>
578 * /sys/kernel/livepatch/<patch>/enabled
579 * /sys/kernel/livepatch/<patch>/<object>
580 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
581 */
582
583 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
584 const char *buf, size_t count)
585 {
586 struct klp_patch *patch;
587 int ret;
588 unsigned long val;
589
590 ret = kstrtoul(buf, 10, &val);
591 if (ret)
592 return -EINVAL;
593
594 if (val != KLP_DISABLED && val != KLP_ENABLED)
595 return -EINVAL;
596
597 patch = container_of(kobj, struct klp_patch, kobj);
598
599 mutex_lock(&klp_mutex);
600
601 if (val == patch->state) {
602 /* already in requested state */
603 ret = -EINVAL;
604 goto err;
605 }
606
607 if (val == KLP_ENABLED) {
608 ret = __klp_enable_patch(patch);
609 if (ret)
610 goto err;
611 } else {
612 ret = __klp_disable_patch(patch);
613 if (ret)
614 goto err;
615 }
616
617 mutex_unlock(&klp_mutex);
618
619 return count;
620
621 err:
622 mutex_unlock(&klp_mutex);
623 return ret;
624 }
625
626 static ssize_t enabled_show(struct kobject *kobj,
627 struct kobj_attribute *attr, char *buf)
628 {
629 struct klp_patch *patch;
630
631 patch = container_of(kobj, struct klp_patch, kobj);
632 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
633 }
634
635 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
636 static struct attribute *klp_patch_attrs[] = {
637 &enabled_kobj_attr.attr,
638 NULL
639 };
640
641 static void klp_kobj_release_patch(struct kobject *kobj)
642 {
643 /*
644 * Once we have a consistency model we'll need to module_put() the
645 * patch module here. See klp_register_patch() for more details.
646 */
647 }
648
649 static struct kobj_type klp_ktype_patch = {
650 .release = klp_kobj_release_patch,
651 .sysfs_ops = &kobj_sysfs_ops,
652 .default_attrs = klp_patch_attrs,
653 };
654
655 static void klp_kobj_release_object(struct kobject *kobj)
656 {
657 }
658
659 static struct kobj_type klp_ktype_object = {
660 .release = klp_kobj_release_object,
661 .sysfs_ops = &kobj_sysfs_ops,
662 };
663
664 static void klp_kobj_release_func(struct kobject *kobj)
665 {
666 }
667
668 static struct kobj_type klp_ktype_func = {
669 .release = klp_kobj_release_func,
670 .sysfs_ops = &kobj_sysfs_ops,
671 };
672
673 /*
674 * Free all functions' kobjects in the array up to some limit. When limit is
675 * NULL, all kobjects are freed.
676 */
677 static void klp_free_funcs_limited(struct klp_object *obj,
678 struct klp_func *limit)
679 {
680 struct klp_func *func;
681
682 for (func = obj->funcs; func->old_name && func != limit; func++)
683 kobject_put(&func->kobj);
684 }
685
686 /* Clean up when a patched object is unloaded */
687 static void klp_free_object_loaded(struct klp_object *obj)
688 {
689 struct klp_func *func;
690
691 obj->mod = NULL;
692
693 klp_for_each_func(obj, func)
694 func->old_addr = 0;
695 }
696
697 /*
698 * Free all objects' kobjects in the array up to some limit. When limit is
699 * NULL, all kobjects are freed.
700 */
701 static void klp_free_objects_limited(struct klp_patch *patch,
702 struct klp_object *limit)
703 {
704 struct klp_object *obj;
705
706 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
707 klp_free_funcs_limited(obj, NULL);
708 kobject_put(&obj->kobj);
709 }
710 }
711
712 static void klp_free_patch(struct klp_patch *patch)
713 {
714 klp_free_objects_limited(patch, NULL);
715 if (!list_empty(&patch->list))
716 list_del(&patch->list);
717 kobject_put(&patch->kobj);
718 }
719
720 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
721 {
722 INIT_LIST_HEAD(&func->stack_node);
723 func->state = KLP_DISABLED;
724
725 /* The format for the sysfs directory is <function,sympos> where sympos
726 * is the nth occurrence of this symbol in kallsyms for the patched
727 * object. If the user selects 0 for old_sympos, then 1 will be used
728 * since a unique symbol will be the first occurrence.
729 */
730 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
731 &obj->kobj, "%s,%lu", func->old_name,
732 func->old_sympos ? func->old_sympos : 1);
733 }
734
735 /* parts of the initialization that is done only when the object is loaded */
736 static int klp_init_object_loaded(struct klp_patch *patch,
737 struct klp_object *obj)
738 {
739 struct klp_func *func;
740 int ret;
741
742 ret = klp_write_object_relocations(patch->mod, obj);
743 if (ret)
744 return ret;
745
746 klp_for_each_func(obj, func) {
747 ret = klp_find_object_symbol(obj->name, func->old_name,
748 func->old_sympos,
749 &func->old_addr);
750 if (ret)
751 return ret;
752 }
753
754 return 0;
755 }
756
757 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
758 {
759 struct klp_func *func;
760 int ret;
761 const char *name;
762
763 if (!obj->funcs)
764 return -EINVAL;
765
766 obj->state = KLP_DISABLED;
767 obj->mod = NULL;
768
769 klp_find_object_module(obj);
770
771 name = klp_is_module(obj) ? obj->name : "vmlinux";
772 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
773 &patch->kobj, "%s", name);
774 if (ret)
775 return ret;
776
777 klp_for_each_func(obj, func) {
778 ret = klp_init_func(obj, func);
779 if (ret)
780 goto free;
781 }
782
783 if (klp_is_object_loaded(obj)) {
784 ret = klp_init_object_loaded(patch, obj);
785 if (ret)
786 goto free;
787 }
788
789 return 0;
790
791 free:
792 klp_free_funcs_limited(obj, func);
793 kobject_put(&obj->kobj);
794 return ret;
795 }
796
797 static int klp_init_patch(struct klp_patch *patch)
798 {
799 struct klp_object *obj;
800 int ret;
801
802 if (!patch->objs)
803 return -EINVAL;
804
805 mutex_lock(&klp_mutex);
806
807 patch->state = KLP_DISABLED;
808
809 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
810 klp_root_kobj, "%s", patch->mod->name);
811 if (ret)
812 goto unlock;
813
814 klp_for_each_object(patch, obj) {
815 ret = klp_init_object(patch, obj);
816 if (ret)
817 goto free;
818 }
819
820 list_add_tail(&patch->list, &klp_patches);
821
822 mutex_unlock(&klp_mutex);
823
824 return 0;
825
826 free:
827 klp_free_objects_limited(patch, obj);
828 kobject_put(&patch->kobj);
829 unlock:
830 mutex_unlock(&klp_mutex);
831 return ret;
832 }
833
834 /**
835 * klp_unregister_patch() - unregisters a patch
836 * @patch: Disabled patch to be unregistered
837 *
838 * Frees the data structures and removes the sysfs interface.
839 *
840 * Return: 0 on success, otherwise error
841 */
842 int klp_unregister_patch(struct klp_patch *patch)
843 {
844 int ret = 0;
845
846 mutex_lock(&klp_mutex);
847
848 if (!klp_is_patch_registered(patch)) {
849 ret = -EINVAL;
850 goto out;
851 }
852
853 if (patch->state == KLP_ENABLED) {
854 ret = -EBUSY;
855 goto out;
856 }
857
858 klp_free_patch(patch);
859
860 out:
861 mutex_unlock(&klp_mutex);
862 return ret;
863 }
864 EXPORT_SYMBOL_GPL(klp_unregister_patch);
865
866 /**
867 * klp_register_patch() - registers a patch
868 * @patch: Patch to be registered
869 *
870 * Initializes the data structure associated with the patch and
871 * creates the sysfs interface.
872 *
873 * Return: 0 on success, otherwise error
874 */
875 int klp_register_patch(struct klp_patch *patch)
876 {
877 int ret;
878
879 if (!is_livepatch_module(patch->mod)) {
880 pr_err("module %s is not marked as a livepatch module",
881 patch->mod->name);
882 return -EINVAL;
883 }
884
885 if (!klp_initialized())
886 return -ENODEV;
887
888 if (!patch || !patch->mod)
889 return -EINVAL;
890
891 /*
892 * A reference is taken on the patch module to prevent it from being
893 * unloaded. Right now, we don't allow patch modules to unload since
894 * there is currently no method to determine if a thread is still
895 * running in the patched code contained in the patch module once
896 * the ftrace registration is successful.
897 */
898 if (!try_module_get(patch->mod))
899 return -ENODEV;
900
901 ret = klp_init_patch(patch);
902 if (ret)
903 module_put(patch->mod);
904
905 return ret;
906 }
907 EXPORT_SYMBOL_GPL(klp_register_patch);
908
909 int klp_module_coming(struct module *mod)
910 {
911 int ret;
912 struct klp_patch *patch;
913 struct klp_object *obj;
914
915 if (WARN_ON(mod->state != MODULE_STATE_COMING))
916 return -EINVAL;
917
918 mutex_lock(&klp_mutex);
919 /*
920 * Each module has to know that klp_module_coming()
921 * has been called. We never know what module will
922 * get patched by a new patch.
923 */
924 mod->klp_alive = true;
925
926 list_for_each_entry(patch, &klp_patches, list) {
927 klp_for_each_object(patch, obj) {
928 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
929 continue;
930
931 obj->mod = mod;
932
933 ret = klp_init_object_loaded(patch, obj);
934 if (ret) {
935 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
936 patch->mod->name, obj->mod->name, ret);
937 goto err;
938 }
939
940 if (patch->state == KLP_DISABLED)
941 break;
942
943 pr_notice("applying patch '%s' to loading module '%s'\n",
944 patch->mod->name, obj->mod->name);
945
946 ret = klp_enable_object(obj);
947 if (ret) {
948 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
949 patch->mod->name, obj->mod->name, ret);
950 goto err;
951 }
952
953 break;
954 }
955 }
956
957 mutex_unlock(&klp_mutex);
958
959 return 0;
960
961 err:
962 /*
963 * If a patch is unsuccessfully applied, return
964 * error to the module loader.
965 */
966 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
967 patch->mod->name, obj->mod->name, obj->mod->name);
968 mod->klp_alive = false;
969 klp_free_object_loaded(obj);
970 mutex_unlock(&klp_mutex);
971
972 return ret;
973 }
974
975 void klp_module_going(struct module *mod)
976 {
977 struct klp_patch *patch;
978 struct klp_object *obj;
979
980 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
981 mod->state != MODULE_STATE_COMING))
982 return;
983
984 mutex_lock(&klp_mutex);
985 /*
986 * Each module has to know that klp_module_going()
987 * has been called. We never know what module will
988 * get patched by a new patch.
989 */
990 mod->klp_alive = false;
991
992 list_for_each_entry(patch, &klp_patches, list) {
993 klp_for_each_object(patch, obj) {
994 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
995 continue;
996
997 if (patch->state != KLP_DISABLED) {
998 pr_notice("reverting patch '%s' on unloading module '%s'\n",
999 patch->mod->name, obj->mod->name);
1000 klp_disable_object(obj);
1001 }
1002
1003 klp_free_object_loaded(obj);
1004 break;
1005 }
1006 }
1007
1008 mutex_unlock(&klp_mutex);
1009 }
1010
1011 static int __init klp_init(void)
1012 {
1013 int ret;
1014
1015 ret = klp_check_compiler_support();
1016 if (ret) {
1017 pr_info("Your compiler is too old; turning off.\n");
1018 return -EINVAL;
1019 }
1020
1021 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1022 if (!klp_root_kobj)
1023 return -ENOMEM;
1024
1025 return 0;
1026 }
1027
1028 module_init(klp_init);