]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/livepatch/core.c
livepatch: make klp_mutex proper part of API
[mirror_ubuntu-jammy-kernel.git] / kernel / livepatch / core.c
CommitLineData
b700e7f0
SJ
1/*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
b700e7f0
SJ
27#include <linux/list.h>
28#include <linux/kallsyms.h>
29#include <linux/livepatch.h>
425595a7
JY
30#include <linux/elf.h>
31#include <linux/moduleloader.h>
3ec24776 32#include <linux/completion.h>
b56b36ee 33#include <asm/cacheflush.h>
10517429 34#include "core.h"
c349cdca 35#include "patch.h"
d83a7cb3 36#include "transition.h"
b700e7f0 37
3c33f5b9 38/*
d83a7cb3
JP
39 * klp_mutex is a coarse lock which serializes access to klp data. All
40 * accesses to klp-related variables and structures must have mutex protection,
41 * except within the following functions which carefully avoid the need for it:
42 *
43 * - klp_ftrace_handler()
44 * - klp_update_patch_state()
3c33f5b9 45 */
d83a7cb3 46DEFINE_MUTEX(klp_mutex);
3c33f5b9 47
b700e7f0
SJ
48static LIST_HEAD(klp_patches);
49
50static struct kobject *klp_root_kobj;
51
52static bool klp_is_module(struct klp_object *obj)
53{
54 return obj->name;
55}
56
57static bool klp_is_object_loaded(struct klp_object *obj)
58{
59 return !obj->name || obj->mod;
60}
61
62/* sets obj->mod if object is not vmlinux and module is found */
63static void klp_find_object_module(struct klp_object *obj)
64{
8cb2c2dc
PM
65 struct module *mod;
66
b700e7f0
SJ
67 if (!klp_is_module(obj))
68 return;
69
70 mutex_lock(&module_mutex);
71 /*
8cb2c2dc
PM
72 * We do not want to block removal of patched modules and therefore
73 * we do not take a reference here. The patches are removed by
7e545d6e 74 * klp_module_going() instead.
8cb2c2dc
PM
75 */
76 mod = find_module(obj->name);
77 /*
7e545d6e
JY
78 * Do not mess work of klp_module_coming() and klp_module_going().
79 * Note that the patch might still be needed before klp_module_going()
8cb2c2dc
PM
80 * is called. Module functions can be called even in the GOING state
81 * until mod->exit() finishes. This is especially important for
82 * patches that modify semantic of the functions.
b700e7f0 83 */
8cb2c2dc
PM
84 if (mod && mod->klp_alive)
85 obj->mod = mod;
86
b700e7f0
SJ
87 mutex_unlock(&module_mutex);
88}
89
b700e7f0
SJ
90static bool klp_is_patch_registered(struct klp_patch *patch)
91{
92 struct klp_patch *mypatch;
93
94 list_for_each_entry(mypatch, &klp_patches, list)
95 if (mypatch == patch)
96 return true;
97
98 return false;
99}
100
101static bool klp_initialized(void)
102{
e76ff06a 103 return !!klp_root_kobj;
b700e7f0
SJ
104}
105
106struct klp_find_arg {
107 const char *objname;
108 const char *name;
109 unsigned long addr;
b700e7f0 110 unsigned long count;
b2b018ef 111 unsigned long pos;
b700e7f0
SJ
112};
113
114static int klp_find_callback(void *data, const char *name,
115 struct module *mod, unsigned long addr)
116{
117 struct klp_find_arg *args = data;
118
119 if ((mod && !args->objname) || (!mod && args->objname))
120 return 0;
121
122 if (strcmp(args->name, name))
123 return 0;
124
125 if (args->objname && strcmp(args->objname, mod->name))
126 return 0;
127
b700e7f0
SJ
128 args->addr = addr;
129 args->count++;
130
b2b018ef
CA
131 /*
132 * Finish the search when the symbol is found for the desired position
133 * or the position is not defined for a non-unique symbol.
134 */
135 if ((args->pos && (args->count == args->pos)) ||
136 (!args->pos && (args->count > 1)))
137 return 1;
138
b700e7f0
SJ
139 return 0;
140}
141
142static int klp_find_object_symbol(const char *objname, const char *name,
b2b018ef 143 unsigned long sympos, unsigned long *addr)
b700e7f0
SJ
144{
145 struct klp_find_arg args = {
146 .objname = objname,
147 .name = name,
148 .addr = 0,
b2b018ef
CA
149 .count = 0,
150 .pos = sympos,
b700e7f0
SJ
151 };
152
9a1bd63c 153 mutex_lock(&module_mutex);
b700e7f0 154 kallsyms_on_each_symbol(klp_find_callback, &args);
9a1bd63c 155 mutex_unlock(&module_mutex);
b700e7f0 156
b2b018ef
CA
157 /*
158 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
159 * otherwise ensure the symbol position count matches sympos.
160 */
161 if (args.addr == 0)
b700e7f0 162 pr_err("symbol '%s' not found in symbol table\n", name);
b2b018ef 163 else if (args.count > 1 && sympos == 0) {
f995b5f7
PM
164 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
165 name, objname);
b2b018ef
CA
166 } else if (sympos != args.count && sympos > 0) {
167 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
168 sympos, name, objname ? objname : "vmlinux");
169 } else {
b700e7f0
SJ
170 *addr = args.addr;
171 return 0;
172 }
173
174 *addr = 0;
175 return -EINVAL;
176}
177
425595a7 178static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
b700e7f0 179{
425595a7
JY
180 int i, cnt, vmlinux, ret;
181 char objname[MODULE_NAME_LEN];
182 char symname[KSYM_NAME_LEN];
183 char *strtab = pmod->core_kallsyms.strtab;
184 Elf_Rela *relas;
185 Elf_Sym *sym;
186 unsigned long sympos, addr;
b700e7f0 187
b2b018ef 188 /*
425595a7
JY
189 * Since the field widths for objname and symname in the sscanf()
190 * call are hard-coded and correspond to MODULE_NAME_LEN and
191 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
192 * and KSYM_NAME_LEN have the values we expect them to have.
193 *
194 * Because the value of MODULE_NAME_LEN can differ among architectures,
195 * we use the smallest/strictest upper bound possible (56, based on
196 * the current definition of MODULE_NAME_LEN) to prevent overflows.
b2b018ef 197 */
425595a7
JY
198 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
199
200 relas = (Elf_Rela *) relasec->sh_addr;
201 /* For each rela in this klp relocation section */
202 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
203 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
204 if (sym->st_shndx != SHN_LIVEPATCH) {
205 pr_err("symbol %s is not marked as a livepatch symbol",
206 strtab + sym->st_name);
207 return -EINVAL;
208 }
209
210 /* Format: .klp.sym.objname.symname,sympos */
211 cnt = sscanf(strtab + sym->st_name,
212 ".klp.sym.%55[^.].%127[^,],%lu",
213 objname, symname, &sympos);
214 if (cnt != 3) {
215 pr_err("symbol %s has an incorrectly formatted name",
216 strtab + sym->st_name);
217 return -EINVAL;
218 }
219
220 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
221 vmlinux = !strcmp(objname, "vmlinux");
222 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
223 symname, sympos, &addr);
224 if (ret)
225 return ret;
226
227 sym->st_value = addr;
228 }
229
230 return 0;
b700e7f0
SJ
231}
232
233static int klp_write_object_relocations(struct module *pmod,
234 struct klp_object *obj)
235{
425595a7
JY
236 int i, cnt, ret = 0;
237 const char *objname, *secname;
238 char sec_objname[MODULE_NAME_LEN];
239 Elf_Shdr *sec;
b700e7f0
SJ
240
241 if (WARN_ON(!klp_is_object_loaded(obj)))
242 return -EINVAL;
243
425595a7 244 objname = klp_is_module(obj) ? obj->name : "vmlinux";
b700e7f0 245
425595a7
JY
246 /* For each klp relocation section */
247 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
248 sec = pmod->klp_info->sechdrs + i;
249 secname = pmod->klp_info->secstrings + sec->sh_name;
250 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
251 continue;
b56b36ee 252
425595a7
JY
253 /*
254 * Format: .klp.rela.sec_objname.section_name
255 * See comment in klp_resolve_symbols() for an explanation
256 * of the selected field width value.
257 */
258 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
259 if (cnt != 1) {
260 pr_err("section %s has an incorrectly formatted name",
261 secname);
262 ret = -EINVAL;
263 break;
264 }
b56b36ee 265
425595a7
JY
266 if (strcmp(objname, sec_objname))
267 continue;
b56b36ee 268
425595a7 269 ret = klp_resolve_symbols(sec, pmod);
064c89df 270 if (ret)
425595a7 271 break;
064c89df 272
425595a7
JY
273 ret = apply_relocate_add(pmod->klp_info->sechdrs,
274 pmod->core_kallsyms.strtab,
275 pmod->klp_info->symndx, i, pmod);
276 if (ret)
277 break;
b700e7f0
SJ
278 }
279
b56b36ee 280 return ret;
b700e7f0
SJ
281}
282
b700e7f0
SJ
283static int __klp_disable_patch(struct klp_patch *patch)
284{
d83a7cb3
JP
285 if (klp_transition_patch)
286 return -EBUSY;
b700e7f0 287
83a90bb1
JP
288 /* enforce stacking: only the last enabled patch can be disabled */
289 if (!list_is_last(&patch->list, &klp_patches) &&
0dade9f3 290 list_next_entry(patch, list)->enabled)
83a90bb1
JP
291 return -EBUSY;
292
d83a7cb3 293 klp_init_transition(patch, KLP_UNPATCHED);
b700e7f0 294
d83a7cb3
JP
295 /*
296 * Enforce the order of the func->transition writes in
297 * klp_init_transition() and the TIF_PATCH_PENDING writes in
298 * klp_start_transition(). In the rare case where klp_ftrace_handler()
299 * is called shortly after klp_update_patch_state() switches the task,
300 * this ensures the handler sees that func->transition is set.
301 */
302 smp_wmb();
b700e7f0 303
d83a7cb3
JP
304 klp_start_transition();
305 klp_try_complete_transition();
0dade9f3 306 patch->enabled = false;
b700e7f0
SJ
307
308 return 0;
309}
310
311/**
312 * klp_disable_patch() - disables a registered patch
313 * @patch: The registered, enabled patch to be disabled
314 *
315 * Unregisters the patched functions from ftrace.
316 *
317 * Return: 0 on success, otherwise error
318 */
319int klp_disable_patch(struct klp_patch *patch)
320{
321 int ret;
322
323 mutex_lock(&klp_mutex);
324
325 if (!klp_is_patch_registered(patch)) {
326 ret = -EINVAL;
327 goto err;
328 }
329
0dade9f3 330 if (!patch->enabled) {
b700e7f0
SJ
331 ret = -EINVAL;
332 goto err;
333 }
334
335 ret = __klp_disable_patch(patch);
336
337err:
338 mutex_unlock(&klp_mutex);
339 return ret;
340}
341EXPORT_SYMBOL_GPL(klp_disable_patch);
342
343static int __klp_enable_patch(struct klp_patch *patch)
344{
345 struct klp_object *obj;
346 int ret;
347
d83a7cb3
JP
348 if (klp_transition_patch)
349 return -EBUSY;
350
0dade9f3 351 if (WARN_ON(patch->enabled))
b700e7f0
SJ
352 return -EINVAL;
353
83a90bb1
JP
354 /* enforce stacking: only the first disabled patch can be enabled */
355 if (patch->list.prev != &klp_patches &&
0dade9f3 356 !list_prev_entry(patch, list)->enabled)
83a90bb1
JP
357 return -EBUSY;
358
3ec24776
JP
359 /*
360 * A reference is taken on the patch module to prevent it from being
361 * unloaded.
362 *
363 * Note: For immediate (no consistency model) patches we don't allow
364 * patch modules to unload since there is no safe/sane method to
365 * determine if a thread is still running in the patched code contained
366 * in the patch module once the ftrace registration is successful.
367 */
368 if (!try_module_get(patch->mod))
369 return -ENODEV;
370
b700e7f0
SJ
371 pr_notice("enabling patch '%s'\n", patch->mod->name);
372
d83a7cb3
JP
373 klp_init_transition(patch, KLP_PATCHED);
374
375 /*
376 * Enforce the order of the func->transition writes in
377 * klp_init_transition() and the ops->func_stack writes in
378 * klp_patch_object(), so that klp_ftrace_handler() will see the
379 * func->transition updates before the handler is registered and the
380 * new funcs become visible to the handler.
381 */
382 smp_wmb();
383
8cdd043a 384 klp_for_each_object(patch, obj) {
b700e7f0
SJ
385 if (!klp_is_object_loaded(obj))
386 continue;
387
0dade9f3 388 ret = klp_patch_object(obj);
d83a7cb3
JP
389 if (ret) {
390 pr_warn("failed to enable patch '%s'\n",
391 patch->mod->name);
392
393 klp_cancel_transition();
394 return ret;
395 }
b700e7f0
SJ
396 }
397
d83a7cb3
JP
398 klp_start_transition();
399 klp_try_complete_transition();
0dade9f3 400 patch->enabled = true;
b700e7f0
SJ
401
402 return 0;
b700e7f0
SJ
403}
404
405/**
406 * klp_enable_patch() - enables a registered patch
407 * @patch: The registered, disabled patch to be enabled
408 *
409 * Performs the needed symbol lookups and code relocations,
410 * then registers the patched functions with ftrace.
411 *
412 * Return: 0 on success, otherwise error
413 */
414int klp_enable_patch(struct klp_patch *patch)
415{
416 int ret;
417
418 mutex_lock(&klp_mutex);
419
420 if (!klp_is_patch_registered(patch)) {
421 ret = -EINVAL;
422 goto err;
423 }
424
425 ret = __klp_enable_patch(patch);
426
427err:
428 mutex_unlock(&klp_mutex);
429 return ret;
430}
431EXPORT_SYMBOL_GPL(klp_enable_patch);
432
433/*
434 * Sysfs Interface
435 *
436 * /sys/kernel/livepatch
437 * /sys/kernel/livepatch/<patch>
438 * /sys/kernel/livepatch/<patch>/enabled
d83a7cb3 439 * /sys/kernel/livepatch/<patch>/transition
b700e7f0 440 * /sys/kernel/livepatch/<patch>/<object>
444f9e99 441 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
b700e7f0
SJ
442 */
443
444static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
445 const char *buf, size_t count)
446{
447 struct klp_patch *patch;
448 int ret;
68ae4b2b 449 bool enabled;
b700e7f0 450
68ae4b2b 451 ret = kstrtobool(buf, &enabled);
b700e7f0 452 if (ret)
68ae4b2b 453 return ret;
b700e7f0
SJ
454
455 patch = container_of(kobj, struct klp_patch, kobj);
456
457 mutex_lock(&klp_mutex);
458
3ec24776
JP
459 if (!klp_is_patch_registered(patch)) {
460 /*
461 * Module with the patch could either disappear meanwhile or is
462 * not properly initialized yet.
463 */
464 ret = -EINVAL;
465 goto err;
466 }
467
68ae4b2b 468 if (patch->enabled == enabled) {
b700e7f0
SJ
469 /* already in requested state */
470 ret = -EINVAL;
471 goto err;
472 }
473
d83a7cb3
JP
474 if (patch == klp_transition_patch) {
475 klp_reverse_transition();
476 } else if (enabled) {
b700e7f0
SJ
477 ret = __klp_enable_patch(patch);
478 if (ret)
479 goto err;
480 } else {
481 ret = __klp_disable_patch(patch);
482 if (ret)
483 goto err;
484 }
485
486 mutex_unlock(&klp_mutex);
487
488 return count;
489
490err:
491 mutex_unlock(&klp_mutex);
492 return ret;
493}
494
495static ssize_t enabled_show(struct kobject *kobj,
496 struct kobj_attribute *attr, char *buf)
497{
498 struct klp_patch *patch;
499
500 patch = container_of(kobj, struct klp_patch, kobj);
0dade9f3 501 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
b700e7f0
SJ
502}
503
d83a7cb3
JP
504static ssize_t transition_show(struct kobject *kobj,
505 struct kobj_attribute *attr, char *buf)
506{
507 struct klp_patch *patch;
508
509 patch = container_of(kobj, struct klp_patch, kobj);
510 return snprintf(buf, PAGE_SIZE-1, "%d\n",
511 patch == klp_transition_patch);
512}
513
b700e7f0 514static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
d83a7cb3 515static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
b700e7f0
SJ
516static struct attribute *klp_patch_attrs[] = {
517 &enabled_kobj_attr.attr,
d83a7cb3 518 &transition_kobj_attr.attr,
b700e7f0
SJ
519 NULL
520};
521
522static void klp_kobj_release_patch(struct kobject *kobj)
523{
3ec24776
JP
524 struct klp_patch *patch;
525
526 patch = container_of(kobj, struct klp_patch, kobj);
527 complete(&patch->finish);
b700e7f0
SJ
528}
529
530static struct kobj_type klp_ktype_patch = {
531 .release = klp_kobj_release_patch,
532 .sysfs_ops = &kobj_sysfs_ops,
533 .default_attrs = klp_patch_attrs,
534};
535
cad706df
MB
536static void klp_kobj_release_object(struct kobject *kobj)
537{
538}
539
540static struct kobj_type klp_ktype_object = {
541 .release = klp_kobj_release_object,
542 .sysfs_ops = &kobj_sysfs_ops,
543};
544
b700e7f0
SJ
545static void klp_kobj_release_func(struct kobject *kobj)
546{
b700e7f0
SJ
547}
548
549static struct kobj_type klp_ktype_func = {
550 .release = klp_kobj_release_func,
551 .sysfs_ops = &kobj_sysfs_ops,
552};
553
554/*
555 * Free all functions' kobjects in the array up to some limit. When limit is
556 * NULL, all kobjects are freed.
557 */
558static void klp_free_funcs_limited(struct klp_object *obj,
559 struct klp_func *limit)
560{
561 struct klp_func *func;
562
563 for (func = obj->funcs; func->old_name && func != limit; func++)
564 kobject_put(&func->kobj);
565}
566
567/* Clean up when a patched object is unloaded */
568static void klp_free_object_loaded(struct klp_object *obj)
569{
570 struct klp_func *func;
571
572 obj->mod = NULL;
573
8cdd043a 574 klp_for_each_func(obj, func)
b700e7f0
SJ
575 func->old_addr = 0;
576}
577
578/*
579 * Free all objects' kobjects in the array up to some limit. When limit is
580 * NULL, all kobjects are freed.
581 */
582static void klp_free_objects_limited(struct klp_patch *patch,
583 struct klp_object *limit)
584{
585 struct klp_object *obj;
586
587 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
588 klp_free_funcs_limited(obj, NULL);
cad706df 589 kobject_put(&obj->kobj);
b700e7f0
SJ
590 }
591}
592
593static void klp_free_patch(struct klp_patch *patch)
594{
595 klp_free_objects_limited(patch, NULL);
596 if (!list_empty(&patch->list))
597 list_del(&patch->list);
b700e7f0
SJ
598}
599
600static int klp_init_func(struct klp_object *obj, struct klp_func *func)
601{
f09d9086
MB
602 if (!func->old_name || !func->new_func)
603 return -EINVAL;
604
3c33f5b9 605 INIT_LIST_HEAD(&func->stack_node);
0dade9f3 606 func->patched = false;
d83a7cb3 607 func->transition = false;
b700e7f0 608
444f9e99
CA
609 /* The format for the sysfs directory is <function,sympos> where sympos
610 * is the nth occurrence of this symbol in kallsyms for the patched
611 * object. If the user selects 0 for old_sympos, then 1 will be used
612 * since a unique symbol will be the first occurrence.
613 */
3c33f5b9 614 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
444f9e99
CA
615 &obj->kobj, "%s,%lu", func->old_name,
616 func->old_sympos ? func->old_sympos : 1);
b700e7f0
SJ
617}
618
255e732c
JY
619/* Arches may override this to finish any remaining arch-specific tasks */
620void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
621 struct klp_object *obj)
622{
623}
624
b700e7f0
SJ
625/* parts of the initialization that is done only when the object is loaded */
626static int klp_init_object_loaded(struct klp_patch *patch,
627 struct klp_object *obj)
628{
629 struct klp_func *func;
630 int ret;
631
255e732c 632 module_disable_ro(patch->mod);
425595a7 633 ret = klp_write_object_relocations(patch->mod, obj);
255e732c
JY
634 if (ret) {
635 module_enable_ro(patch->mod, true);
425595a7 636 return ret;
255e732c
JY
637 }
638
639 arch_klp_init_object_loaded(patch, obj);
640 module_enable_ro(patch->mod, true);
b700e7f0 641
8cdd043a 642 klp_for_each_func(obj, func) {
b2b018ef
CA
643 ret = klp_find_object_symbol(obj->name, func->old_name,
644 func->old_sympos,
645 &func->old_addr);
b700e7f0
SJ
646 if (ret)
647 return ret;
f5e547f4
JP
648
649 ret = kallsyms_lookup_size_offset(func->old_addr,
650 &func->old_size, NULL);
651 if (!ret) {
652 pr_err("kallsyms size lookup failed for '%s'\n",
653 func->old_name);
654 return -ENOENT;
655 }
656
657 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
658 &func->new_size, NULL);
659 if (!ret) {
660 pr_err("kallsyms size lookup failed for '%s' replacement\n",
661 func->old_name);
662 return -ENOENT;
663 }
b700e7f0
SJ
664 }
665
666 return 0;
667}
668
669static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
670{
671 struct klp_func *func;
672 int ret;
673 const char *name;
674
675 if (!obj->funcs)
676 return -EINVAL;
677
0dade9f3 678 obj->patched = false;
8cb2c2dc 679 obj->mod = NULL;
b700e7f0
SJ
680
681 klp_find_object_module(obj);
682
683 name = klp_is_module(obj) ? obj->name : "vmlinux";
cad706df
MB
684 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
685 &patch->kobj, "%s", name);
686 if (ret)
687 return ret;
b700e7f0 688
8cdd043a 689 klp_for_each_func(obj, func) {
b700e7f0
SJ
690 ret = klp_init_func(obj, func);
691 if (ret)
692 goto free;
693 }
694
695 if (klp_is_object_loaded(obj)) {
696 ret = klp_init_object_loaded(patch, obj);
697 if (ret)
698 goto free;
699 }
700
701 return 0;
702
703free:
704 klp_free_funcs_limited(obj, func);
cad706df 705 kobject_put(&obj->kobj);
b700e7f0
SJ
706 return ret;
707}
708
709static int klp_init_patch(struct klp_patch *patch)
710{
711 struct klp_object *obj;
712 int ret;
713
714 if (!patch->objs)
715 return -EINVAL;
716
717 mutex_lock(&klp_mutex);
718
0dade9f3 719 patch->enabled = false;
3ec24776 720 init_completion(&patch->finish);
b700e7f0
SJ
721
722 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
e0b561ee 723 klp_root_kobj, "%s", patch->mod->name);
3ec24776
JP
724 if (ret) {
725 mutex_unlock(&klp_mutex);
726 return ret;
727 }
b700e7f0 728
8cdd043a 729 klp_for_each_object(patch, obj) {
b700e7f0
SJ
730 ret = klp_init_object(patch, obj);
731 if (ret)
732 goto free;
733 }
734
99590ba5 735 list_add_tail(&patch->list, &klp_patches);
b700e7f0
SJ
736
737 mutex_unlock(&klp_mutex);
738
739 return 0;
740
741free:
742 klp_free_objects_limited(patch, obj);
3ec24776 743
b700e7f0 744 mutex_unlock(&klp_mutex);
3ec24776
JP
745
746 kobject_put(&patch->kobj);
747 wait_for_completion(&patch->finish);
748
b700e7f0
SJ
749 return ret;
750}
751
752/**
753 * klp_unregister_patch() - unregisters a patch
754 * @patch: Disabled patch to be unregistered
755 *
756 * Frees the data structures and removes the sysfs interface.
757 *
758 * Return: 0 on success, otherwise error
759 */
760int klp_unregister_patch(struct klp_patch *patch)
761{
3ec24776 762 int ret;
b700e7f0
SJ
763
764 mutex_lock(&klp_mutex);
765
766 if (!klp_is_patch_registered(patch)) {
767 ret = -EINVAL;
3ec24776 768 goto err;
b700e7f0
SJ
769 }
770
0dade9f3 771 if (patch->enabled) {
b700e7f0 772 ret = -EBUSY;
3ec24776 773 goto err;
b700e7f0
SJ
774 }
775
776 klp_free_patch(patch);
777
3ec24776
JP
778 mutex_unlock(&klp_mutex);
779
780 kobject_put(&patch->kobj);
781 wait_for_completion(&patch->finish);
782
783 return 0;
784err:
b700e7f0
SJ
785 mutex_unlock(&klp_mutex);
786 return ret;
787}
788EXPORT_SYMBOL_GPL(klp_unregister_patch);
789
790/**
791 * klp_register_patch() - registers a patch
792 * @patch: Patch to be registered
793 *
794 * Initializes the data structure associated with the patch and
795 * creates the sysfs interface.
796 *
3ec24776
JP
797 * There is no need to take the reference on the patch module here. It is done
798 * later when the patch is enabled.
799 *
b700e7f0
SJ
800 * Return: 0 on success, otherwise error
801 */
802int klp_register_patch(struct klp_patch *patch)
803{
b700e7f0
SJ
804 if (!patch || !patch->mod)
805 return -EINVAL;
806
425595a7
JY
807 if (!is_livepatch_module(patch->mod)) {
808 pr_err("module %s is not marked as a livepatch module",
809 patch->mod->name);
810 return -EINVAL;
811 }
812
b700e7f0
SJ
813 if (!klp_initialized())
814 return -ENODEV;
815
d83a7cb3
JP
816 /*
817 * Architectures without reliable stack traces have to set
818 * patch->immediate because there's currently no way to patch kthreads
819 * with the consistency model.
820 */
821 if (!klp_have_reliable_stack() && !patch->immediate) {
822 pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
823 return -ENOSYS;
824 }
825
3ec24776 826 return klp_init_patch(patch);
b700e7f0
SJ
827}
828EXPORT_SYMBOL_GPL(klp_register_patch);
829
7e545d6e 830int klp_module_coming(struct module *mod)
b700e7f0 831{
b700e7f0 832 int ret;
7e545d6e
JY
833 struct klp_patch *patch;
834 struct klp_object *obj;
b700e7f0 835
7e545d6e
JY
836 if (WARN_ON(mod->state != MODULE_STATE_COMING))
837 return -EINVAL;
b700e7f0 838
7e545d6e
JY
839 mutex_lock(&klp_mutex);
840 /*
841 * Each module has to know that klp_module_coming()
842 * has been called. We never know what module will
843 * get patched by a new patch.
844 */
845 mod->klp_alive = true;
b700e7f0 846
7e545d6e
JY
847 list_for_each_entry(patch, &klp_patches, list) {
848 klp_for_each_object(patch, obj) {
849 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
850 continue;
b700e7f0 851
7e545d6e 852 obj->mod = mod;
b700e7f0 853
7e545d6e
JY
854 ret = klp_init_object_loaded(patch, obj);
855 if (ret) {
856 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
857 patch->mod->name, obj->mod->name, ret);
858 goto err;
859 }
b700e7f0 860
d83a7cb3
JP
861 /*
862 * Only patch the module if the patch is enabled or is
863 * in transition.
864 */
865 if (!patch->enabled && patch != klp_transition_patch)
7e545d6e
JY
866 break;
867
868 pr_notice("applying patch '%s' to loading module '%s'\n",
869 patch->mod->name, obj->mod->name);
870
0dade9f3 871 ret = klp_patch_object(obj);
7e545d6e
JY
872 if (ret) {
873 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
874 patch->mod->name, obj->mod->name, ret);
875 goto err;
876 }
877
878 break;
879 }
880 }
b700e7f0 881
7e545d6e 882 mutex_unlock(&klp_mutex);
b700e7f0 883
7e545d6e 884 return 0;
b700e7f0 885
7e545d6e
JY
886err:
887 /*
888 * If a patch is unsuccessfully applied, return
889 * error to the module loader.
890 */
891 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
892 patch->mod->name, obj->mod->name, obj->mod->name);
893 mod->klp_alive = false;
b700e7f0 894 klp_free_object_loaded(obj);
7e545d6e
JY
895 mutex_unlock(&klp_mutex);
896
897 return ret;
b700e7f0
SJ
898}
899
7e545d6e 900void klp_module_going(struct module *mod)
b700e7f0 901{
b700e7f0
SJ
902 struct klp_patch *patch;
903 struct klp_object *obj;
904
7e545d6e
JY
905 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
906 mod->state != MODULE_STATE_COMING))
907 return;
b700e7f0
SJ
908
909 mutex_lock(&klp_mutex);
8cb2c2dc 910 /*
7e545d6e
JY
911 * Each module has to know that klp_module_going()
912 * has been called. We never know what module will
913 * get patched by a new patch.
8cb2c2dc 914 */
7e545d6e 915 mod->klp_alive = false;
8cb2c2dc 916
b700e7f0 917 list_for_each_entry(patch, &klp_patches, list) {
8cdd043a 918 klp_for_each_object(patch, obj) {
b700e7f0
SJ
919 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
920 continue;
921
d83a7cb3
JP
922 /*
923 * Only unpatch the module if the patch is enabled or
924 * is in transition.
925 */
926 if (patch->enabled || patch == klp_transition_patch) {
7e545d6e
JY
927 pr_notice("reverting patch '%s' on unloading module '%s'\n",
928 patch->mod->name, obj->mod->name);
0dade9f3 929 klp_unpatch_object(obj);
7e545d6e 930 }
b700e7f0 931
7e545d6e 932 klp_free_object_loaded(obj);
b700e7f0
SJ
933 break;
934 }
935 }
936
937 mutex_unlock(&klp_mutex);
b700e7f0
SJ
938}
939
26029d88 940static int __init klp_init(void)
b700e7f0
SJ
941{
942 int ret;
943
b9dfe0be
JK
944 ret = klp_check_compiler_support();
945 if (ret) {
946 pr_info("Your compiler is too old; turning off.\n");
947 return -EINVAL;
948 }
949
b700e7f0 950 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
7e545d6e
JY
951 if (!klp_root_kobj)
952 return -ENOMEM;
b700e7f0
SJ
953
954 return 0;
b700e7f0
SJ
955}
956
957module_init(klp_init);