]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/livepatch/core.c
livepatch: make klp_mutex proper part of API
[mirror_ubuntu-artful-kernel.git] / kernel / livepatch / core.c
1 /*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/kallsyms.h>
29 #include <linux/livepatch.h>
30 #include <linux/elf.h>
31 #include <linux/moduleloader.h>
32 #include <linux/completion.h>
33 #include <asm/cacheflush.h>
34 #include "core.h"
35 #include "patch.h"
36 #include "transition.h"
37
38 /*
39 * klp_mutex is a coarse lock which serializes access to klp data. All
40 * accesses to klp-related variables and structures must have mutex protection,
41 * except within the following functions which carefully avoid the need for it:
42 *
43 * - klp_ftrace_handler()
44 * - klp_update_patch_state()
45 */
46 DEFINE_MUTEX(klp_mutex);
47
48 static LIST_HEAD(klp_patches);
49
50 static struct kobject *klp_root_kobj;
51
52 static bool klp_is_module(struct klp_object *obj)
53 {
54 return obj->name;
55 }
56
57 static bool klp_is_object_loaded(struct klp_object *obj)
58 {
59 return !obj->name || obj->mod;
60 }
61
62 /* sets obj->mod if object is not vmlinux and module is found */
63 static void klp_find_object_module(struct klp_object *obj)
64 {
65 struct module *mod;
66
67 if (!klp_is_module(obj))
68 return;
69
70 mutex_lock(&module_mutex);
71 /*
72 * We do not want to block removal of patched modules and therefore
73 * we do not take a reference here. The patches are removed by
74 * klp_module_going() instead.
75 */
76 mod = find_module(obj->name);
77 /*
78 * Do not mess work of klp_module_coming() and klp_module_going().
79 * Note that the patch might still be needed before klp_module_going()
80 * is called. Module functions can be called even in the GOING state
81 * until mod->exit() finishes. This is especially important for
82 * patches that modify semantic of the functions.
83 */
84 if (mod && mod->klp_alive)
85 obj->mod = mod;
86
87 mutex_unlock(&module_mutex);
88 }
89
90 static bool klp_is_patch_registered(struct klp_patch *patch)
91 {
92 struct klp_patch *mypatch;
93
94 list_for_each_entry(mypatch, &klp_patches, list)
95 if (mypatch == patch)
96 return true;
97
98 return false;
99 }
100
101 static bool klp_initialized(void)
102 {
103 return !!klp_root_kobj;
104 }
105
106 struct klp_find_arg {
107 const char *objname;
108 const char *name;
109 unsigned long addr;
110 unsigned long count;
111 unsigned long pos;
112 };
113
114 static int klp_find_callback(void *data, const char *name,
115 struct module *mod, unsigned long addr)
116 {
117 struct klp_find_arg *args = data;
118
119 if ((mod && !args->objname) || (!mod && args->objname))
120 return 0;
121
122 if (strcmp(args->name, name))
123 return 0;
124
125 if (args->objname && strcmp(args->objname, mod->name))
126 return 0;
127
128 args->addr = addr;
129 args->count++;
130
131 /*
132 * Finish the search when the symbol is found for the desired position
133 * or the position is not defined for a non-unique symbol.
134 */
135 if ((args->pos && (args->count == args->pos)) ||
136 (!args->pos && (args->count > 1)))
137 return 1;
138
139 return 0;
140 }
141
142 static int klp_find_object_symbol(const char *objname, const char *name,
143 unsigned long sympos, unsigned long *addr)
144 {
145 struct klp_find_arg args = {
146 .objname = objname,
147 .name = name,
148 .addr = 0,
149 .count = 0,
150 .pos = sympos,
151 };
152
153 mutex_lock(&module_mutex);
154 kallsyms_on_each_symbol(klp_find_callback, &args);
155 mutex_unlock(&module_mutex);
156
157 /*
158 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
159 * otherwise ensure the symbol position count matches sympos.
160 */
161 if (args.addr == 0)
162 pr_err("symbol '%s' not found in symbol table\n", name);
163 else if (args.count > 1 && sympos == 0) {
164 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
165 name, objname);
166 } else if (sympos != args.count && sympos > 0) {
167 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
168 sympos, name, objname ? objname : "vmlinux");
169 } else {
170 *addr = args.addr;
171 return 0;
172 }
173
174 *addr = 0;
175 return -EINVAL;
176 }
177
178 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
179 {
180 int i, cnt, vmlinux, ret;
181 char objname[MODULE_NAME_LEN];
182 char symname[KSYM_NAME_LEN];
183 char *strtab = pmod->core_kallsyms.strtab;
184 Elf_Rela *relas;
185 Elf_Sym *sym;
186 unsigned long sympos, addr;
187
188 /*
189 * Since the field widths for objname and symname in the sscanf()
190 * call are hard-coded and correspond to MODULE_NAME_LEN and
191 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
192 * and KSYM_NAME_LEN have the values we expect them to have.
193 *
194 * Because the value of MODULE_NAME_LEN can differ among architectures,
195 * we use the smallest/strictest upper bound possible (56, based on
196 * the current definition of MODULE_NAME_LEN) to prevent overflows.
197 */
198 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
199
200 relas = (Elf_Rela *) relasec->sh_addr;
201 /* For each rela in this klp relocation section */
202 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
203 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
204 if (sym->st_shndx != SHN_LIVEPATCH) {
205 pr_err("symbol %s is not marked as a livepatch symbol",
206 strtab + sym->st_name);
207 return -EINVAL;
208 }
209
210 /* Format: .klp.sym.objname.symname,sympos */
211 cnt = sscanf(strtab + sym->st_name,
212 ".klp.sym.%55[^.].%127[^,],%lu",
213 objname, symname, &sympos);
214 if (cnt != 3) {
215 pr_err("symbol %s has an incorrectly formatted name",
216 strtab + sym->st_name);
217 return -EINVAL;
218 }
219
220 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
221 vmlinux = !strcmp(objname, "vmlinux");
222 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
223 symname, sympos, &addr);
224 if (ret)
225 return ret;
226
227 sym->st_value = addr;
228 }
229
230 return 0;
231 }
232
233 static int klp_write_object_relocations(struct module *pmod,
234 struct klp_object *obj)
235 {
236 int i, cnt, ret = 0;
237 const char *objname, *secname;
238 char sec_objname[MODULE_NAME_LEN];
239 Elf_Shdr *sec;
240
241 if (WARN_ON(!klp_is_object_loaded(obj)))
242 return -EINVAL;
243
244 objname = klp_is_module(obj) ? obj->name : "vmlinux";
245
246 /* For each klp relocation section */
247 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
248 sec = pmod->klp_info->sechdrs + i;
249 secname = pmod->klp_info->secstrings + sec->sh_name;
250 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
251 continue;
252
253 /*
254 * Format: .klp.rela.sec_objname.section_name
255 * See comment in klp_resolve_symbols() for an explanation
256 * of the selected field width value.
257 */
258 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
259 if (cnt != 1) {
260 pr_err("section %s has an incorrectly formatted name",
261 secname);
262 ret = -EINVAL;
263 break;
264 }
265
266 if (strcmp(objname, sec_objname))
267 continue;
268
269 ret = klp_resolve_symbols(sec, pmod);
270 if (ret)
271 break;
272
273 ret = apply_relocate_add(pmod->klp_info->sechdrs,
274 pmod->core_kallsyms.strtab,
275 pmod->klp_info->symndx, i, pmod);
276 if (ret)
277 break;
278 }
279
280 return ret;
281 }
282
283 static int __klp_disable_patch(struct klp_patch *patch)
284 {
285 if (klp_transition_patch)
286 return -EBUSY;
287
288 /* enforce stacking: only the last enabled patch can be disabled */
289 if (!list_is_last(&patch->list, &klp_patches) &&
290 list_next_entry(patch, list)->enabled)
291 return -EBUSY;
292
293 klp_init_transition(patch, KLP_UNPATCHED);
294
295 /*
296 * Enforce the order of the func->transition writes in
297 * klp_init_transition() and the TIF_PATCH_PENDING writes in
298 * klp_start_transition(). In the rare case where klp_ftrace_handler()
299 * is called shortly after klp_update_patch_state() switches the task,
300 * this ensures the handler sees that func->transition is set.
301 */
302 smp_wmb();
303
304 klp_start_transition();
305 klp_try_complete_transition();
306 patch->enabled = false;
307
308 return 0;
309 }
310
311 /**
312 * klp_disable_patch() - disables a registered patch
313 * @patch: The registered, enabled patch to be disabled
314 *
315 * Unregisters the patched functions from ftrace.
316 *
317 * Return: 0 on success, otherwise error
318 */
319 int klp_disable_patch(struct klp_patch *patch)
320 {
321 int ret;
322
323 mutex_lock(&klp_mutex);
324
325 if (!klp_is_patch_registered(patch)) {
326 ret = -EINVAL;
327 goto err;
328 }
329
330 if (!patch->enabled) {
331 ret = -EINVAL;
332 goto err;
333 }
334
335 ret = __klp_disable_patch(patch);
336
337 err:
338 mutex_unlock(&klp_mutex);
339 return ret;
340 }
341 EXPORT_SYMBOL_GPL(klp_disable_patch);
342
343 static int __klp_enable_patch(struct klp_patch *patch)
344 {
345 struct klp_object *obj;
346 int ret;
347
348 if (klp_transition_patch)
349 return -EBUSY;
350
351 if (WARN_ON(patch->enabled))
352 return -EINVAL;
353
354 /* enforce stacking: only the first disabled patch can be enabled */
355 if (patch->list.prev != &klp_patches &&
356 !list_prev_entry(patch, list)->enabled)
357 return -EBUSY;
358
359 /*
360 * A reference is taken on the patch module to prevent it from being
361 * unloaded.
362 *
363 * Note: For immediate (no consistency model) patches we don't allow
364 * patch modules to unload since there is no safe/sane method to
365 * determine if a thread is still running in the patched code contained
366 * in the patch module once the ftrace registration is successful.
367 */
368 if (!try_module_get(patch->mod))
369 return -ENODEV;
370
371 pr_notice("enabling patch '%s'\n", patch->mod->name);
372
373 klp_init_transition(patch, KLP_PATCHED);
374
375 /*
376 * Enforce the order of the func->transition writes in
377 * klp_init_transition() and the ops->func_stack writes in
378 * klp_patch_object(), so that klp_ftrace_handler() will see the
379 * func->transition updates before the handler is registered and the
380 * new funcs become visible to the handler.
381 */
382 smp_wmb();
383
384 klp_for_each_object(patch, obj) {
385 if (!klp_is_object_loaded(obj))
386 continue;
387
388 ret = klp_patch_object(obj);
389 if (ret) {
390 pr_warn("failed to enable patch '%s'\n",
391 patch->mod->name);
392
393 klp_cancel_transition();
394 return ret;
395 }
396 }
397
398 klp_start_transition();
399 klp_try_complete_transition();
400 patch->enabled = true;
401
402 return 0;
403 }
404
405 /**
406 * klp_enable_patch() - enables a registered patch
407 * @patch: The registered, disabled patch to be enabled
408 *
409 * Performs the needed symbol lookups and code relocations,
410 * then registers the patched functions with ftrace.
411 *
412 * Return: 0 on success, otherwise error
413 */
414 int klp_enable_patch(struct klp_patch *patch)
415 {
416 int ret;
417
418 mutex_lock(&klp_mutex);
419
420 if (!klp_is_patch_registered(patch)) {
421 ret = -EINVAL;
422 goto err;
423 }
424
425 ret = __klp_enable_patch(patch);
426
427 err:
428 mutex_unlock(&klp_mutex);
429 return ret;
430 }
431 EXPORT_SYMBOL_GPL(klp_enable_patch);
432
433 /*
434 * Sysfs Interface
435 *
436 * /sys/kernel/livepatch
437 * /sys/kernel/livepatch/<patch>
438 * /sys/kernel/livepatch/<patch>/enabled
439 * /sys/kernel/livepatch/<patch>/transition
440 * /sys/kernel/livepatch/<patch>/<object>
441 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
442 */
443
444 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
445 const char *buf, size_t count)
446 {
447 struct klp_patch *patch;
448 int ret;
449 bool enabled;
450
451 ret = kstrtobool(buf, &enabled);
452 if (ret)
453 return ret;
454
455 patch = container_of(kobj, struct klp_patch, kobj);
456
457 mutex_lock(&klp_mutex);
458
459 if (!klp_is_patch_registered(patch)) {
460 /*
461 * Module with the patch could either disappear meanwhile or is
462 * not properly initialized yet.
463 */
464 ret = -EINVAL;
465 goto err;
466 }
467
468 if (patch->enabled == enabled) {
469 /* already in requested state */
470 ret = -EINVAL;
471 goto err;
472 }
473
474 if (patch == klp_transition_patch) {
475 klp_reverse_transition();
476 } else if (enabled) {
477 ret = __klp_enable_patch(patch);
478 if (ret)
479 goto err;
480 } else {
481 ret = __klp_disable_patch(patch);
482 if (ret)
483 goto err;
484 }
485
486 mutex_unlock(&klp_mutex);
487
488 return count;
489
490 err:
491 mutex_unlock(&klp_mutex);
492 return ret;
493 }
494
495 static ssize_t enabled_show(struct kobject *kobj,
496 struct kobj_attribute *attr, char *buf)
497 {
498 struct klp_patch *patch;
499
500 patch = container_of(kobj, struct klp_patch, kobj);
501 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
502 }
503
504 static ssize_t transition_show(struct kobject *kobj,
505 struct kobj_attribute *attr, char *buf)
506 {
507 struct klp_patch *patch;
508
509 patch = container_of(kobj, struct klp_patch, kobj);
510 return snprintf(buf, PAGE_SIZE-1, "%d\n",
511 patch == klp_transition_patch);
512 }
513
514 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
515 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
516 static struct attribute *klp_patch_attrs[] = {
517 &enabled_kobj_attr.attr,
518 &transition_kobj_attr.attr,
519 NULL
520 };
521
522 static void klp_kobj_release_patch(struct kobject *kobj)
523 {
524 struct klp_patch *patch;
525
526 patch = container_of(kobj, struct klp_patch, kobj);
527 complete(&patch->finish);
528 }
529
530 static struct kobj_type klp_ktype_patch = {
531 .release = klp_kobj_release_patch,
532 .sysfs_ops = &kobj_sysfs_ops,
533 .default_attrs = klp_patch_attrs,
534 };
535
536 static void klp_kobj_release_object(struct kobject *kobj)
537 {
538 }
539
540 static struct kobj_type klp_ktype_object = {
541 .release = klp_kobj_release_object,
542 .sysfs_ops = &kobj_sysfs_ops,
543 };
544
545 static void klp_kobj_release_func(struct kobject *kobj)
546 {
547 }
548
549 static struct kobj_type klp_ktype_func = {
550 .release = klp_kobj_release_func,
551 .sysfs_ops = &kobj_sysfs_ops,
552 };
553
554 /*
555 * Free all functions' kobjects in the array up to some limit. When limit is
556 * NULL, all kobjects are freed.
557 */
558 static void klp_free_funcs_limited(struct klp_object *obj,
559 struct klp_func *limit)
560 {
561 struct klp_func *func;
562
563 for (func = obj->funcs; func->old_name && func != limit; func++)
564 kobject_put(&func->kobj);
565 }
566
567 /* Clean up when a patched object is unloaded */
568 static void klp_free_object_loaded(struct klp_object *obj)
569 {
570 struct klp_func *func;
571
572 obj->mod = NULL;
573
574 klp_for_each_func(obj, func)
575 func->old_addr = 0;
576 }
577
578 /*
579 * Free all objects' kobjects in the array up to some limit. When limit is
580 * NULL, all kobjects are freed.
581 */
582 static void klp_free_objects_limited(struct klp_patch *patch,
583 struct klp_object *limit)
584 {
585 struct klp_object *obj;
586
587 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
588 klp_free_funcs_limited(obj, NULL);
589 kobject_put(&obj->kobj);
590 }
591 }
592
593 static void klp_free_patch(struct klp_patch *patch)
594 {
595 klp_free_objects_limited(patch, NULL);
596 if (!list_empty(&patch->list))
597 list_del(&patch->list);
598 }
599
600 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
601 {
602 if (!func->old_name || !func->new_func)
603 return -EINVAL;
604
605 INIT_LIST_HEAD(&func->stack_node);
606 func->patched = false;
607 func->transition = false;
608
609 /* The format for the sysfs directory is <function,sympos> where sympos
610 * is the nth occurrence of this symbol in kallsyms for the patched
611 * object. If the user selects 0 for old_sympos, then 1 will be used
612 * since a unique symbol will be the first occurrence.
613 */
614 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
615 &obj->kobj, "%s,%lu", func->old_name,
616 func->old_sympos ? func->old_sympos : 1);
617 }
618
619 /* Arches may override this to finish any remaining arch-specific tasks */
620 void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
621 struct klp_object *obj)
622 {
623 }
624
625 /* parts of the initialization that is done only when the object is loaded */
626 static int klp_init_object_loaded(struct klp_patch *patch,
627 struct klp_object *obj)
628 {
629 struct klp_func *func;
630 int ret;
631
632 module_disable_ro(patch->mod);
633 ret = klp_write_object_relocations(patch->mod, obj);
634 if (ret) {
635 module_enable_ro(patch->mod, true);
636 return ret;
637 }
638
639 arch_klp_init_object_loaded(patch, obj);
640 module_enable_ro(patch->mod, true);
641
642 klp_for_each_func(obj, func) {
643 ret = klp_find_object_symbol(obj->name, func->old_name,
644 func->old_sympos,
645 &func->old_addr);
646 if (ret)
647 return ret;
648
649 ret = kallsyms_lookup_size_offset(func->old_addr,
650 &func->old_size, NULL);
651 if (!ret) {
652 pr_err("kallsyms size lookup failed for '%s'\n",
653 func->old_name);
654 return -ENOENT;
655 }
656
657 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
658 &func->new_size, NULL);
659 if (!ret) {
660 pr_err("kallsyms size lookup failed for '%s' replacement\n",
661 func->old_name);
662 return -ENOENT;
663 }
664 }
665
666 return 0;
667 }
668
669 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
670 {
671 struct klp_func *func;
672 int ret;
673 const char *name;
674
675 if (!obj->funcs)
676 return -EINVAL;
677
678 obj->patched = false;
679 obj->mod = NULL;
680
681 klp_find_object_module(obj);
682
683 name = klp_is_module(obj) ? obj->name : "vmlinux";
684 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
685 &patch->kobj, "%s", name);
686 if (ret)
687 return ret;
688
689 klp_for_each_func(obj, func) {
690 ret = klp_init_func(obj, func);
691 if (ret)
692 goto free;
693 }
694
695 if (klp_is_object_loaded(obj)) {
696 ret = klp_init_object_loaded(patch, obj);
697 if (ret)
698 goto free;
699 }
700
701 return 0;
702
703 free:
704 klp_free_funcs_limited(obj, func);
705 kobject_put(&obj->kobj);
706 return ret;
707 }
708
709 static int klp_init_patch(struct klp_patch *patch)
710 {
711 struct klp_object *obj;
712 int ret;
713
714 if (!patch->objs)
715 return -EINVAL;
716
717 mutex_lock(&klp_mutex);
718
719 patch->enabled = false;
720 init_completion(&patch->finish);
721
722 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
723 klp_root_kobj, "%s", patch->mod->name);
724 if (ret) {
725 mutex_unlock(&klp_mutex);
726 return ret;
727 }
728
729 klp_for_each_object(patch, obj) {
730 ret = klp_init_object(patch, obj);
731 if (ret)
732 goto free;
733 }
734
735 list_add_tail(&patch->list, &klp_patches);
736
737 mutex_unlock(&klp_mutex);
738
739 return 0;
740
741 free:
742 klp_free_objects_limited(patch, obj);
743
744 mutex_unlock(&klp_mutex);
745
746 kobject_put(&patch->kobj);
747 wait_for_completion(&patch->finish);
748
749 return ret;
750 }
751
752 /**
753 * klp_unregister_patch() - unregisters a patch
754 * @patch: Disabled patch to be unregistered
755 *
756 * Frees the data structures and removes the sysfs interface.
757 *
758 * Return: 0 on success, otherwise error
759 */
760 int klp_unregister_patch(struct klp_patch *patch)
761 {
762 int ret;
763
764 mutex_lock(&klp_mutex);
765
766 if (!klp_is_patch_registered(patch)) {
767 ret = -EINVAL;
768 goto err;
769 }
770
771 if (patch->enabled) {
772 ret = -EBUSY;
773 goto err;
774 }
775
776 klp_free_patch(patch);
777
778 mutex_unlock(&klp_mutex);
779
780 kobject_put(&patch->kobj);
781 wait_for_completion(&patch->finish);
782
783 return 0;
784 err:
785 mutex_unlock(&klp_mutex);
786 return ret;
787 }
788 EXPORT_SYMBOL_GPL(klp_unregister_patch);
789
790 /**
791 * klp_register_patch() - registers a patch
792 * @patch: Patch to be registered
793 *
794 * Initializes the data structure associated with the patch and
795 * creates the sysfs interface.
796 *
797 * There is no need to take the reference on the patch module here. It is done
798 * later when the patch is enabled.
799 *
800 * Return: 0 on success, otherwise error
801 */
802 int klp_register_patch(struct klp_patch *patch)
803 {
804 if (!patch || !patch->mod)
805 return -EINVAL;
806
807 if (!is_livepatch_module(patch->mod)) {
808 pr_err("module %s is not marked as a livepatch module",
809 patch->mod->name);
810 return -EINVAL;
811 }
812
813 if (!klp_initialized())
814 return -ENODEV;
815
816 /*
817 * Architectures without reliable stack traces have to set
818 * patch->immediate because there's currently no way to patch kthreads
819 * with the consistency model.
820 */
821 if (!klp_have_reliable_stack() && !patch->immediate) {
822 pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
823 return -ENOSYS;
824 }
825
826 return klp_init_patch(patch);
827 }
828 EXPORT_SYMBOL_GPL(klp_register_patch);
829
830 int klp_module_coming(struct module *mod)
831 {
832 int ret;
833 struct klp_patch *patch;
834 struct klp_object *obj;
835
836 if (WARN_ON(mod->state != MODULE_STATE_COMING))
837 return -EINVAL;
838
839 mutex_lock(&klp_mutex);
840 /*
841 * Each module has to know that klp_module_coming()
842 * has been called. We never know what module will
843 * get patched by a new patch.
844 */
845 mod->klp_alive = true;
846
847 list_for_each_entry(patch, &klp_patches, list) {
848 klp_for_each_object(patch, obj) {
849 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
850 continue;
851
852 obj->mod = mod;
853
854 ret = klp_init_object_loaded(patch, obj);
855 if (ret) {
856 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
857 patch->mod->name, obj->mod->name, ret);
858 goto err;
859 }
860
861 /*
862 * Only patch the module if the patch is enabled or is
863 * in transition.
864 */
865 if (!patch->enabled && patch != klp_transition_patch)
866 break;
867
868 pr_notice("applying patch '%s' to loading module '%s'\n",
869 patch->mod->name, obj->mod->name);
870
871 ret = klp_patch_object(obj);
872 if (ret) {
873 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
874 patch->mod->name, obj->mod->name, ret);
875 goto err;
876 }
877
878 break;
879 }
880 }
881
882 mutex_unlock(&klp_mutex);
883
884 return 0;
885
886 err:
887 /*
888 * If a patch is unsuccessfully applied, return
889 * error to the module loader.
890 */
891 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
892 patch->mod->name, obj->mod->name, obj->mod->name);
893 mod->klp_alive = false;
894 klp_free_object_loaded(obj);
895 mutex_unlock(&klp_mutex);
896
897 return ret;
898 }
899
900 void klp_module_going(struct module *mod)
901 {
902 struct klp_patch *patch;
903 struct klp_object *obj;
904
905 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
906 mod->state != MODULE_STATE_COMING))
907 return;
908
909 mutex_lock(&klp_mutex);
910 /*
911 * Each module has to know that klp_module_going()
912 * has been called. We never know what module will
913 * get patched by a new patch.
914 */
915 mod->klp_alive = false;
916
917 list_for_each_entry(patch, &klp_patches, list) {
918 klp_for_each_object(patch, obj) {
919 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
920 continue;
921
922 /*
923 * Only unpatch the module if the patch is enabled or
924 * is in transition.
925 */
926 if (patch->enabled || patch == klp_transition_patch) {
927 pr_notice("reverting patch '%s' on unloading module '%s'\n",
928 patch->mod->name, obj->mod->name);
929 klp_unpatch_object(obj);
930 }
931
932 klp_free_object_loaded(obj);
933 break;
934 }
935 }
936
937 mutex_unlock(&klp_mutex);
938 }
939
940 static int __init klp_init(void)
941 {
942 int ret;
943
944 ret = klp_check_compiler_support();
945 if (ret) {
946 pr_info("Your compiler is too old; turning off.\n");
947 return -EINVAL;
948 }
949
950 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
951 if (!klp_root_kobj)
952 return -ENOMEM;
953
954 return 0;
955 }
956
957 module_init(klp_init);