]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - tools/objtool/check.c
x86,objtool: Create .return_sites
[mirror_ubuntu-jammy-kernel.git] / tools / objtool / check.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6 #include <string.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10
11 #include <arch/elf.h>
12 #include <objtool/builtin.h>
13 #include <objtool/cfi.h>
14 #include <objtool/arch.h>
15 #include <objtool/check.h>
16 #include <objtool/special.h>
17 #include <objtool/warn.h>
18 #include <objtool/endianness.h>
19
20 #include <linux/objtool.h>
21 #include <linux/hashtable.h>
22 #include <linux/kernel.h>
23 #include <linux/static_call_types.h>
24
25 struct alternative {
26 struct list_head list;
27 struct instruction *insn;
28 bool skip_orig;
29 };
30
31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
32
33 static struct cfi_init_state initial_func_cfi;
34 static struct cfi_state init_cfi;
35 static struct cfi_state func_cfi;
36
37 struct instruction *find_insn(struct objtool_file *file,
38 struct section *sec, unsigned long offset)
39 {
40 struct instruction *insn;
41
42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43 if (insn->sec == sec && insn->offset == offset)
44 return insn;
45 }
46
47 return NULL;
48 }
49
50 static struct instruction *next_insn_same_sec(struct objtool_file *file,
51 struct instruction *insn)
52 {
53 struct instruction *next = list_next_entry(insn, list);
54
55 if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
56 return NULL;
57
58 return next;
59 }
60
61 static struct instruction *next_insn_same_func(struct objtool_file *file,
62 struct instruction *insn)
63 {
64 struct instruction *next = list_next_entry(insn, list);
65 struct symbol *func = insn->func;
66
67 if (!func)
68 return NULL;
69
70 if (&next->list != &file->insn_list && next->func == func)
71 return next;
72
73 /* Check if we're already in the subfunction: */
74 if (func == func->cfunc)
75 return NULL;
76
77 /* Move to the subfunction: */
78 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
79 }
80
81 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
82 struct instruction *insn)
83 {
84 struct instruction *prev = list_prev_entry(insn, list);
85
86 if (&prev->list != &file->insn_list && prev->func == insn->func)
87 return prev;
88
89 return NULL;
90 }
91
92 #define func_for_each_insn(file, func, insn) \
93 for (insn = find_insn(file, func->sec, func->offset); \
94 insn; \
95 insn = next_insn_same_func(file, insn))
96
97 #define sym_for_each_insn(file, sym, insn) \
98 for (insn = find_insn(file, sym->sec, sym->offset); \
99 insn && &insn->list != &file->insn_list && \
100 insn->sec == sym->sec && \
101 insn->offset < sym->offset + sym->len; \
102 insn = list_next_entry(insn, list))
103
104 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
105 for (insn = list_prev_entry(insn, list); \
106 &insn->list != &file->insn_list && \
107 insn->sec == sym->sec && insn->offset >= sym->offset; \
108 insn = list_prev_entry(insn, list))
109
110 #define sec_for_each_insn_from(file, insn) \
111 for (; insn; insn = next_insn_same_sec(file, insn))
112
113 #define sec_for_each_insn_continue(file, insn) \
114 for (insn = next_insn_same_sec(file, insn); insn; \
115 insn = next_insn_same_sec(file, insn))
116
117 static bool is_jump_table_jump(struct instruction *insn)
118 {
119 struct alt_group *alt_group = insn->alt_group;
120
121 if (insn->jump_table)
122 return true;
123
124 /* Retpoline alternative for a jump table? */
125 return alt_group && alt_group->orig_group &&
126 alt_group->orig_group->first_insn->jump_table;
127 }
128
129 static bool is_sibling_call(struct instruction *insn)
130 {
131 /*
132 * Assume only ELF functions can make sibling calls. This ensures
133 * sibling call detection consistency between vmlinux.o and individual
134 * objects.
135 */
136 if (!insn->func)
137 return false;
138
139 /* An indirect jump is either a sibling call or a jump to a table. */
140 if (insn->type == INSN_JUMP_DYNAMIC)
141 return !is_jump_table_jump(insn);
142
143 /* add_jump_destinations() sets insn->call_dest for sibling calls. */
144 return (is_static_jump(insn) && insn->call_dest);
145 }
146
147 /*
148 * This checks to see if the given function is a "noreturn" function.
149 *
150 * For global functions which are outside the scope of this object file, we
151 * have to keep a manual list of them.
152 *
153 * For local functions, we have to detect them manually by simply looking for
154 * the lack of a return instruction.
155 */
156 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
157 int recursion)
158 {
159 int i;
160 struct instruction *insn;
161 bool empty = true;
162
163 /*
164 * Unfortunately these have to be hard coded because the noreturn
165 * attribute isn't provided in ELF data.
166 */
167 static const char * const global_noreturns[] = {
168 "__stack_chk_fail",
169 "panic",
170 "do_exit",
171 "do_task_dead",
172 "__module_put_and_exit",
173 "complete_and_exit",
174 "__reiserfs_panic",
175 "lbug_with_loc",
176 "fortify_panic",
177 "usercopy_abort",
178 "machine_real_restart",
179 "rewind_stack_do_exit",
180 "kunit_try_catch_throw",
181 "xen_start_kernel",
182 "cpu_bringup_and_idle",
183 };
184
185 if (!func)
186 return false;
187
188 if (func->bind == STB_WEAK)
189 return false;
190
191 if (func->bind == STB_GLOBAL)
192 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
193 if (!strcmp(func->name, global_noreturns[i]))
194 return true;
195
196 if (!func->len)
197 return false;
198
199 insn = find_insn(file, func->sec, func->offset);
200 if (!insn->func)
201 return false;
202
203 func_for_each_insn(file, func, insn) {
204 empty = false;
205
206 if (insn->type == INSN_RETURN)
207 return false;
208 }
209
210 if (empty)
211 return false;
212
213 /*
214 * A function can have a sibling call instead of a return. In that
215 * case, the function's dead-end status depends on whether the target
216 * of the sibling call returns.
217 */
218 func_for_each_insn(file, func, insn) {
219 if (is_sibling_call(insn)) {
220 struct instruction *dest = insn->jump_dest;
221
222 if (!dest)
223 /* sibling call to another file */
224 return false;
225
226 /* local sibling call */
227 if (recursion == 5) {
228 /*
229 * Infinite recursion: two functions have
230 * sibling calls to each other. This is a very
231 * rare case. It means they aren't dead ends.
232 */
233 return false;
234 }
235
236 return __dead_end_function(file, dest->func, recursion+1);
237 }
238 }
239
240 return true;
241 }
242
243 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
244 {
245 return __dead_end_function(file, func, 0);
246 }
247
248 static void init_cfi_state(struct cfi_state *cfi)
249 {
250 int i;
251
252 for (i = 0; i < CFI_NUM_REGS; i++) {
253 cfi->regs[i].base = CFI_UNDEFINED;
254 cfi->vals[i].base = CFI_UNDEFINED;
255 }
256 cfi->cfa.base = CFI_UNDEFINED;
257 cfi->drap_reg = CFI_UNDEFINED;
258 cfi->drap_offset = -1;
259 }
260
261 static void init_insn_state(struct insn_state *state, struct section *sec)
262 {
263 memset(state, 0, sizeof(*state));
264 init_cfi_state(&state->cfi);
265
266 /*
267 * We need the full vmlinux for noinstr validation, otherwise we can
268 * not correctly determine insn->call_dest->sec (external symbols do
269 * not have a section).
270 */
271 if (vmlinux && noinstr && sec)
272 state->noinstr = sec->noinstr;
273 }
274
275 static struct cfi_state *cfi_alloc(void)
276 {
277 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
278 if (!cfi) {
279 WARN("calloc failed");
280 exit(1);
281 }
282 nr_cfi++;
283 return cfi;
284 }
285
286 static int cfi_bits;
287 static struct hlist_head *cfi_hash;
288
289 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
290 {
291 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
292 (void *)cfi2 + sizeof(cfi2->hash),
293 sizeof(struct cfi_state) - sizeof(struct hlist_node));
294 }
295
296 static inline u32 cfi_key(struct cfi_state *cfi)
297 {
298 return jhash((void *)cfi + sizeof(cfi->hash),
299 sizeof(*cfi) - sizeof(cfi->hash), 0);
300 }
301
302 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
303 {
304 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
305 struct cfi_state *obj;
306
307 hlist_for_each_entry(obj, head, hash) {
308 if (!cficmp(cfi, obj)) {
309 nr_cfi_cache++;
310 return obj;
311 }
312 }
313
314 obj = cfi_alloc();
315 *obj = *cfi;
316 hlist_add_head(&obj->hash, head);
317
318 return obj;
319 }
320
321 static void cfi_hash_add(struct cfi_state *cfi)
322 {
323 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
324
325 hlist_add_head(&cfi->hash, head);
326 }
327
328 static void *cfi_hash_alloc(unsigned long size)
329 {
330 cfi_bits = max(10, ilog2(size));
331 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
332 PROT_READ|PROT_WRITE,
333 MAP_PRIVATE|MAP_ANON, -1, 0);
334 if (cfi_hash == (void *)-1L) {
335 WARN("mmap fail cfi_hash");
336 cfi_hash = NULL;
337 } else if (stats) {
338 printf("cfi_bits: %d\n", cfi_bits);
339 }
340
341 return cfi_hash;
342 }
343
344 static unsigned long nr_insns;
345 static unsigned long nr_insns_visited;
346
347 /*
348 * Call the arch-specific instruction decoder for all the instructions and add
349 * them to the global instruction list.
350 */
351 static int decode_instructions(struct objtool_file *file)
352 {
353 struct section *sec;
354 struct symbol *func;
355 unsigned long offset;
356 struct instruction *insn;
357 int ret;
358
359 for_each_sec(file, sec) {
360
361 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
362 continue;
363
364 if (strcmp(sec->name, ".altinstr_replacement") &&
365 strcmp(sec->name, ".altinstr_aux") &&
366 strncmp(sec->name, ".discard.", 9))
367 sec->text = true;
368
369 if (!strcmp(sec->name, ".noinstr.text") ||
370 !strcmp(sec->name, ".entry.text"))
371 sec->noinstr = true;
372
373 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
374 insn = malloc(sizeof(*insn));
375 if (!insn) {
376 WARN("malloc failed");
377 return -1;
378 }
379 memset(insn, 0, sizeof(*insn));
380 INIT_LIST_HEAD(&insn->alts);
381 INIT_LIST_HEAD(&insn->stack_ops);
382
383 insn->sec = sec;
384 insn->offset = offset;
385
386 ret = arch_decode_instruction(file->elf, sec, offset,
387 sec->sh.sh_size - offset,
388 &insn->len, &insn->type,
389 &insn->immediate,
390 &insn->stack_ops);
391 if (ret)
392 goto err;
393
394 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
395 list_add_tail(&insn->list, &file->insn_list);
396 nr_insns++;
397 }
398
399 list_for_each_entry(func, &sec->symbol_list, list) {
400 if (func->type != STT_FUNC || func->alias != func)
401 continue;
402
403 if (!find_insn(file, sec, func->offset)) {
404 WARN("%s(): can't find starting instruction",
405 func->name);
406 return -1;
407 }
408
409 sym_for_each_insn(file, func, insn)
410 insn->func = func;
411 }
412 }
413
414 if (stats)
415 printf("nr_insns: %lu\n", nr_insns);
416
417 return 0;
418
419 err:
420 free(insn);
421 return ret;
422 }
423
424 static struct instruction *find_last_insn(struct objtool_file *file,
425 struct section *sec)
426 {
427 struct instruction *insn = NULL;
428 unsigned int offset;
429 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
430
431 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
432 insn = find_insn(file, sec, offset);
433
434 return insn;
435 }
436
437 /*
438 * Mark "ud2" instructions and manually annotated dead ends.
439 */
440 static int add_dead_ends(struct objtool_file *file)
441 {
442 struct section *sec;
443 struct reloc *reloc;
444 struct instruction *insn;
445
446 /*
447 * By default, "ud2" is a dead end unless otherwise annotated, because
448 * GCC 7 inserts it for certain divide-by-zero cases.
449 */
450 for_each_insn(file, insn)
451 if (insn->type == INSN_BUG)
452 insn->dead_end = true;
453
454 /*
455 * Check for manually annotated dead ends.
456 */
457 sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
458 if (!sec)
459 goto reachable;
460
461 list_for_each_entry(reloc, &sec->reloc_list, list) {
462 if (reloc->sym->type != STT_SECTION) {
463 WARN("unexpected relocation symbol type in %s", sec->name);
464 return -1;
465 }
466 insn = find_insn(file, reloc->sym->sec, reloc->addend);
467 if (insn)
468 insn = list_prev_entry(insn, list);
469 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
470 insn = find_last_insn(file, reloc->sym->sec);
471 if (!insn) {
472 WARN("can't find unreachable insn at %s+0x%" PRIx64,
473 reloc->sym->sec->name, reloc->addend);
474 return -1;
475 }
476 } else {
477 WARN("can't find unreachable insn at %s+0x%" PRIx64,
478 reloc->sym->sec->name, reloc->addend);
479 return -1;
480 }
481
482 insn->dead_end = true;
483 }
484
485 reachable:
486 /*
487 * These manually annotated reachable checks are needed for GCC 4.4,
488 * where the Linux unreachable() macro isn't supported. In that case
489 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
490 * not a dead end.
491 */
492 sec = find_section_by_name(file->elf, ".rela.discard.reachable");
493 if (!sec)
494 return 0;
495
496 list_for_each_entry(reloc, &sec->reloc_list, list) {
497 if (reloc->sym->type != STT_SECTION) {
498 WARN("unexpected relocation symbol type in %s", sec->name);
499 return -1;
500 }
501 insn = find_insn(file, reloc->sym->sec, reloc->addend);
502 if (insn)
503 insn = list_prev_entry(insn, list);
504 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
505 insn = find_last_insn(file, reloc->sym->sec);
506 if (!insn) {
507 WARN("can't find reachable insn at %s+0x%" PRIx64,
508 reloc->sym->sec->name, reloc->addend);
509 return -1;
510 }
511 } else {
512 WARN("can't find reachable insn at %s+0x%" PRIx64,
513 reloc->sym->sec->name, reloc->addend);
514 return -1;
515 }
516
517 insn->dead_end = false;
518 }
519
520 return 0;
521 }
522
523 static int create_static_call_sections(struct objtool_file *file)
524 {
525 struct section *sec;
526 struct static_call_site *site;
527 struct instruction *insn;
528 struct symbol *key_sym;
529 char *key_name, *tmp;
530 int idx;
531
532 sec = find_section_by_name(file->elf, ".static_call_sites");
533 if (sec) {
534 INIT_LIST_HEAD(&file->static_call_list);
535 WARN("file already has .static_call_sites section, skipping");
536 return 0;
537 }
538
539 if (list_empty(&file->static_call_list))
540 return 0;
541
542 idx = 0;
543 list_for_each_entry(insn, &file->static_call_list, call_node)
544 idx++;
545
546 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
547 sizeof(struct static_call_site), idx);
548 if (!sec)
549 return -1;
550
551 idx = 0;
552 list_for_each_entry(insn, &file->static_call_list, call_node) {
553
554 site = (struct static_call_site *)sec->data->d_buf + idx;
555 memset(site, 0, sizeof(struct static_call_site));
556
557 /* populate reloc for 'addr' */
558 if (elf_add_reloc_to_insn(file->elf, sec,
559 idx * sizeof(struct static_call_site),
560 R_X86_64_PC32,
561 insn->sec, insn->offset))
562 return -1;
563
564 /* find key symbol */
565 key_name = strdup(insn->call_dest->name);
566 if (!key_name) {
567 perror("strdup");
568 return -1;
569 }
570 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
571 STATIC_CALL_TRAMP_PREFIX_LEN)) {
572 WARN("static_call: trampoline name malformed: %s", key_name);
573 return -1;
574 }
575 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
576 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
577
578 key_sym = find_symbol_by_name(file->elf, tmp);
579 if (!key_sym) {
580 if (!module) {
581 WARN("static_call: can't find static_call_key symbol: %s", tmp);
582 return -1;
583 }
584
585 /*
586 * For modules(), the key might not be exported, which
587 * means the module can make static calls but isn't
588 * allowed to change them.
589 *
590 * In that case we temporarily set the key to be the
591 * trampoline address. This is fixed up in
592 * static_call_add_module().
593 */
594 key_sym = insn->call_dest;
595 }
596 free(key_name);
597
598 /* populate reloc for 'key' */
599 if (elf_add_reloc(file->elf, sec,
600 idx * sizeof(struct static_call_site) + 4,
601 R_X86_64_PC32, key_sym,
602 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
603 return -1;
604
605 idx++;
606 }
607
608 return 0;
609 }
610
611 static int create_retpoline_sites_sections(struct objtool_file *file)
612 {
613 struct instruction *insn;
614 struct section *sec;
615 int idx;
616
617 sec = find_section_by_name(file->elf, ".retpoline_sites");
618 if (sec) {
619 WARN("file already has .retpoline_sites, skipping");
620 return 0;
621 }
622
623 idx = 0;
624 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
625 idx++;
626
627 if (!idx)
628 return 0;
629
630 sec = elf_create_section(file->elf, ".retpoline_sites", 0,
631 sizeof(int), idx);
632 if (!sec) {
633 WARN("elf_create_section: .retpoline_sites");
634 return -1;
635 }
636
637 idx = 0;
638 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
639
640 int *site = (int *)sec->data->d_buf + idx;
641 *site = 0;
642
643 if (elf_add_reloc_to_insn(file->elf, sec,
644 idx * sizeof(int),
645 R_X86_64_PC32,
646 insn->sec, insn->offset)) {
647 WARN("elf_add_reloc_to_insn: .retpoline_sites");
648 return -1;
649 }
650
651 idx++;
652 }
653
654 return 0;
655 }
656
657 static int create_return_sites_sections(struct objtool_file *file)
658 {
659 struct instruction *insn;
660 struct section *sec;
661 int idx;
662
663 sec = find_section_by_name(file->elf, ".return_sites");
664 if (sec) {
665 WARN("file already has .return_sites, skipping");
666 return 0;
667 }
668
669 idx = 0;
670 list_for_each_entry(insn, &file->return_thunk_list, call_node)
671 idx++;
672
673 if (!idx)
674 return 0;
675
676 sec = elf_create_section(file->elf, ".return_sites", 0,
677 sizeof(int), idx);
678 if (!sec) {
679 WARN("elf_create_section: .return_sites");
680 return -1;
681 }
682
683 idx = 0;
684 list_for_each_entry(insn, &file->return_thunk_list, call_node) {
685
686 int *site = (int *)sec->data->d_buf + idx;
687 *site = 0;
688
689 if (elf_add_reloc_to_insn(file->elf, sec,
690 idx * sizeof(int),
691 R_X86_64_PC32,
692 insn->sec, insn->offset)) {
693 WARN("elf_add_reloc_to_insn: .return_sites");
694 return -1;
695 }
696
697 idx++;
698 }
699
700 return 0;
701 }
702
703 static int create_mcount_loc_sections(struct objtool_file *file)
704 {
705 struct section *sec;
706 unsigned long *loc;
707 struct instruction *insn;
708 int idx;
709
710 sec = find_section_by_name(file->elf, "__mcount_loc");
711 if (sec) {
712 INIT_LIST_HEAD(&file->mcount_loc_list);
713 WARN("file already has __mcount_loc section, skipping");
714 return 0;
715 }
716
717 if (list_empty(&file->mcount_loc_list))
718 return 0;
719
720 idx = 0;
721 list_for_each_entry(insn, &file->mcount_loc_list, call_node)
722 idx++;
723
724 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
725 if (!sec)
726 return -1;
727
728 idx = 0;
729 list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
730
731 loc = (unsigned long *)sec->data->d_buf + idx;
732 memset(loc, 0, sizeof(unsigned long));
733
734 if (elf_add_reloc_to_insn(file->elf, sec,
735 idx * sizeof(unsigned long),
736 R_X86_64_64,
737 insn->sec, insn->offset))
738 return -1;
739
740 idx++;
741 }
742
743 return 0;
744 }
745
746 /*
747 * Warnings shouldn't be reported for ignored functions.
748 */
749 static void add_ignores(struct objtool_file *file)
750 {
751 struct instruction *insn;
752 struct section *sec;
753 struct symbol *func;
754 struct reloc *reloc;
755
756 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
757 if (!sec)
758 return;
759
760 list_for_each_entry(reloc, &sec->reloc_list, list) {
761 switch (reloc->sym->type) {
762 case STT_FUNC:
763 func = reloc->sym;
764 break;
765
766 case STT_SECTION:
767 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
768 if (!func)
769 continue;
770 break;
771
772 default:
773 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
774 continue;
775 }
776
777 func_for_each_insn(file, func, insn)
778 insn->ignore = true;
779 }
780 }
781
782 /*
783 * This is a whitelist of functions that is allowed to be called with AC set.
784 * The list is meant to be minimal and only contains compiler instrumentation
785 * ABI and a few functions used to implement *_{to,from}_user() functions.
786 *
787 * These functions must not directly change AC, but may PUSHF/POPF.
788 */
789 static const char *uaccess_safe_builtin[] = {
790 /* KASAN */
791 "kasan_report",
792 "kasan_check_range",
793 /* KASAN out-of-line */
794 "__asan_loadN_noabort",
795 "__asan_load1_noabort",
796 "__asan_load2_noabort",
797 "__asan_load4_noabort",
798 "__asan_load8_noabort",
799 "__asan_load16_noabort",
800 "__asan_storeN_noabort",
801 "__asan_store1_noabort",
802 "__asan_store2_noabort",
803 "__asan_store4_noabort",
804 "__asan_store8_noabort",
805 "__asan_store16_noabort",
806 "__kasan_check_read",
807 "__kasan_check_write",
808 /* KASAN in-line */
809 "__asan_report_load_n_noabort",
810 "__asan_report_load1_noabort",
811 "__asan_report_load2_noabort",
812 "__asan_report_load4_noabort",
813 "__asan_report_load8_noabort",
814 "__asan_report_load16_noabort",
815 "__asan_report_store_n_noabort",
816 "__asan_report_store1_noabort",
817 "__asan_report_store2_noabort",
818 "__asan_report_store4_noabort",
819 "__asan_report_store8_noabort",
820 "__asan_report_store16_noabort",
821 /* KCSAN */
822 "__kcsan_check_access",
823 "kcsan_found_watchpoint",
824 "kcsan_setup_watchpoint",
825 "kcsan_check_scoped_accesses",
826 "kcsan_disable_current",
827 "kcsan_enable_current_nowarn",
828 /* KCSAN/TSAN */
829 "__tsan_func_entry",
830 "__tsan_func_exit",
831 "__tsan_read_range",
832 "__tsan_write_range",
833 "__tsan_read1",
834 "__tsan_read2",
835 "__tsan_read4",
836 "__tsan_read8",
837 "__tsan_read16",
838 "__tsan_write1",
839 "__tsan_write2",
840 "__tsan_write4",
841 "__tsan_write8",
842 "__tsan_write16",
843 "__tsan_read_write1",
844 "__tsan_read_write2",
845 "__tsan_read_write4",
846 "__tsan_read_write8",
847 "__tsan_read_write16",
848 "__tsan_atomic8_load",
849 "__tsan_atomic16_load",
850 "__tsan_atomic32_load",
851 "__tsan_atomic64_load",
852 "__tsan_atomic8_store",
853 "__tsan_atomic16_store",
854 "__tsan_atomic32_store",
855 "__tsan_atomic64_store",
856 "__tsan_atomic8_exchange",
857 "__tsan_atomic16_exchange",
858 "__tsan_atomic32_exchange",
859 "__tsan_atomic64_exchange",
860 "__tsan_atomic8_fetch_add",
861 "__tsan_atomic16_fetch_add",
862 "__tsan_atomic32_fetch_add",
863 "__tsan_atomic64_fetch_add",
864 "__tsan_atomic8_fetch_sub",
865 "__tsan_atomic16_fetch_sub",
866 "__tsan_atomic32_fetch_sub",
867 "__tsan_atomic64_fetch_sub",
868 "__tsan_atomic8_fetch_and",
869 "__tsan_atomic16_fetch_and",
870 "__tsan_atomic32_fetch_and",
871 "__tsan_atomic64_fetch_and",
872 "__tsan_atomic8_fetch_or",
873 "__tsan_atomic16_fetch_or",
874 "__tsan_atomic32_fetch_or",
875 "__tsan_atomic64_fetch_or",
876 "__tsan_atomic8_fetch_xor",
877 "__tsan_atomic16_fetch_xor",
878 "__tsan_atomic32_fetch_xor",
879 "__tsan_atomic64_fetch_xor",
880 "__tsan_atomic8_fetch_nand",
881 "__tsan_atomic16_fetch_nand",
882 "__tsan_atomic32_fetch_nand",
883 "__tsan_atomic64_fetch_nand",
884 "__tsan_atomic8_compare_exchange_strong",
885 "__tsan_atomic16_compare_exchange_strong",
886 "__tsan_atomic32_compare_exchange_strong",
887 "__tsan_atomic64_compare_exchange_strong",
888 "__tsan_atomic8_compare_exchange_weak",
889 "__tsan_atomic16_compare_exchange_weak",
890 "__tsan_atomic32_compare_exchange_weak",
891 "__tsan_atomic64_compare_exchange_weak",
892 "__tsan_atomic8_compare_exchange_val",
893 "__tsan_atomic16_compare_exchange_val",
894 "__tsan_atomic32_compare_exchange_val",
895 "__tsan_atomic64_compare_exchange_val",
896 "__tsan_atomic_thread_fence",
897 "__tsan_atomic_signal_fence",
898 /* KCOV */
899 "write_comp_data",
900 "check_kcov_mode",
901 "__sanitizer_cov_trace_pc",
902 "__sanitizer_cov_trace_const_cmp1",
903 "__sanitizer_cov_trace_const_cmp2",
904 "__sanitizer_cov_trace_const_cmp4",
905 "__sanitizer_cov_trace_const_cmp8",
906 "__sanitizer_cov_trace_cmp1",
907 "__sanitizer_cov_trace_cmp2",
908 "__sanitizer_cov_trace_cmp4",
909 "__sanitizer_cov_trace_cmp8",
910 "__sanitizer_cov_trace_switch",
911 /* UBSAN */
912 "ubsan_type_mismatch_common",
913 "__ubsan_handle_type_mismatch",
914 "__ubsan_handle_type_mismatch_v1",
915 "__ubsan_handle_shift_out_of_bounds",
916 /* misc */
917 "csum_partial_copy_generic",
918 "copy_mc_fragile",
919 "copy_mc_fragile_handle_tail",
920 "copy_mc_enhanced_fast_string",
921 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
922 NULL
923 };
924
925 static void add_uaccess_safe(struct objtool_file *file)
926 {
927 struct symbol *func;
928 const char **name;
929
930 if (!uaccess)
931 return;
932
933 for (name = uaccess_safe_builtin; *name; name++) {
934 func = find_symbol_by_name(file->elf, *name);
935 if (!func)
936 continue;
937
938 func->uaccess_safe = true;
939 }
940 }
941
942 /*
943 * FIXME: For now, just ignore any alternatives which add retpolines. This is
944 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
945 * But it at least allows objtool to understand the control flow *around* the
946 * retpoline.
947 */
948 static int add_ignore_alternatives(struct objtool_file *file)
949 {
950 struct section *sec;
951 struct reloc *reloc;
952 struct instruction *insn;
953
954 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
955 if (!sec)
956 return 0;
957
958 list_for_each_entry(reloc, &sec->reloc_list, list) {
959 if (reloc->sym->type != STT_SECTION) {
960 WARN("unexpected relocation symbol type in %s", sec->name);
961 return -1;
962 }
963
964 insn = find_insn(file, reloc->sym->sec, reloc->addend);
965 if (!insn) {
966 WARN("bad .discard.ignore_alts entry");
967 return -1;
968 }
969
970 insn->ignore_alts = true;
971 }
972
973 return 0;
974 }
975
976 __weak bool arch_is_retpoline(struct symbol *sym)
977 {
978 return false;
979 }
980
981 __weak bool arch_is_rethunk(struct symbol *sym)
982 {
983 return false;
984 }
985
986 #define NEGATIVE_RELOC ((void *)-1L)
987
988 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
989 {
990 if (insn->reloc == NEGATIVE_RELOC)
991 return NULL;
992
993 if (!insn->reloc) {
994 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
995 insn->offset, insn->len);
996 if (!insn->reloc) {
997 insn->reloc = NEGATIVE_RELOC;
998 return NULL;
999 }
1000 }
1001
1002 return insn->reloc;
1003 }
1004
1005 static void remove_insn_ops(struct instruction *insn)
1006 {
1007 struct stack_op *op, *tmp;
1008
1009 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
1010 list_del(&op->list);
1011 free(op);
1012 }
1013 }
1014
1015 static void annotate_call_site(struct objtool_file *file,
1016 struct instruction *insn, bool sibling)
1017 {
1018 struct reloc *reloc = insn_reloc(file, insn);
1019 struct symbol *sym = insn->call_dest;
1020
1021 if (!sym)
1022 sym = reloc->sym;
1023
1024 /*
1025 * Alternative replacement code is just template code which is
1026 * sometimes copied to the original instruction. For now, don't
1027 * annotate it. (In the future we might consider annotating the
1028 * original instruction if/when it ever makes sense to do so.)
1029 */
1030 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1031 return;
1032
1033 if (sym->static_call_tramp) {
1034 list_add_tail(&insn->call_node, &file->static_call_list);
1035 return;
1036 }
1037
1038 if (sym->retpoline_thunk) {
1039 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1040 return;
1041 }
1042
1043 /*
1044 * Many compilers cannot disable KCOV with a function attribute
1045 * so they need a little help, NOP out any KCOV calls from noinstr
1046 * text.
1047 */
1048 if (insn->sec->noinstr && sym->kcov) {
1049 if (reloc) {
1050 reloc->type = R_NONE;
1051 elf_write_reloc(file->elf, reloc);
1052 }
1053
1054 elf_write_insn(file->elf, insn->sec,
1055 insn->offset, insn->len,
1056 sibling ? arch_ret_insn(insn->len)
1057 : arch_nop_insn(insn->len));
1058
1059 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1060
1061 if (sibling) {
1062 /*
1063 * We've replaced the tail-call JMP insn by two new
1064 * insn: RET; INT3, except we only have a single struct
1065 * insn here. Mark it retpoline_safe to avoid the SLS
1066 * warning, instead of adding another insn.
1067 */
1068 insn->retpoline_safe = true;
1069 }
1070
1071 return;
1072 }
1073
1074 if (mcount && sym->fentry) {
1075 if (sibling)
1076 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
1077
1078 if (reloc) {
1079 reloc->type = R_NONE;
1080 elf_write_reloc(file->elf, reloc);
1081 }
1082
1083 elf_write_insn(file->elf, insn->sec,
1084 insn->offset, insn->len,
1085 arch_nop_insn(insn->len));
1086
1087 insn->type = INSN_NOP;
1088
1089 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1090 return;
1091 }
1092 }
1093
1094 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1095 struct symbol *dest, bool sibling)
1096 {
1097 insn->call_dest = dest;
1098 if (!dest)
1099 return;
1100
1101 /*
1102 * Whatever stack impact regular CALLs have, should be undone
1103 * by the RETURN of the called function.
1104 *
1105 * Annotated intra-function calls retain the stack_ops but
1106 * are converted to JUMP, see read_intra_function_calls().
1107 */
1108 remove_insn_ops(insn);
1109
1110 annotate_call_site(file, insn, sibling);
1111 }
1112
1113 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1114 {
1115 /*
1116 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1117 * so convert them accordingly.
1118 */
1119 switch (insn->type) {
1120 case INSN_CALL:
1121 insn->type = INSN_CALL_DYNAMIC;
1122 break;
1123 case INSN_JUMP_UNCONDITIONAL:
1124 insn->type = INSN_JUMP_DYNAMIC;
1125 break;
1126 case INSN_JUMP_CONDITIONAL:
1127 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1128 break;
1129 default:
1130 return;
1131 }
1132
1133 insn->retpoline_safe = true;
1134
1135 /*
1136 * Whatever stack impact regular CALLs have, should be undone
1137 * by the RETURN of the called function.
1138 *
1139 * Annotated intra-function calls retain the stack_ops but
1140 * are converted to JUMP, see read_intra_function_calls().
1141 */
1142 remove_insn_ops(insn);
1143
1144 annotate_call_site(file, insn, false);
1145 }
1146
1147 static void add_return_call(struct objtool_file *file, struct instruction *insn)
1148 {
1149 /*
1150 * Return thunk tail calls are really just returns in disguise,
1151 * so convert them accordingly.
1152 */
1153 insn->type = INSN_RETURN;
1154 insn->retpoline_safe = true;
1155
1156 list_add_tail(&insn->call_node, &file->return_thunk_list);
1157 }
1158
1159 /*
1160 * Find the destination instructions for all jumps.
1161 */
1162 static int add_jump_destinations(struct objtool_file *file)
1163 {
1164 struct instruction *insn;
1165 struct reloc *reloc;
1166 struct section *dest_sec;
1167 unsigned long dest_off;
1168
1169 for_each_insn(file, insn) {
1170 if (!is_static_jump(insn))
1171 continue;
1172
1173 reloc = insn_reloc(file, insn);
1174 if (!reloc) {
1175 dest_sec = insn->sec;
1176 dest_off = arch_jump_destination(insn);
1177 } else if (reloc->sym->type == STT_SECTION) {
1178 dest_sec = reloc->sym->sec;
1179 dest_off = arch_dest_reloc_offset(reloc->addend);
1180 } else if (reloc->sym->retpoline_thunk) {
1181 add_retpoline_call(file, insn);
1182 continue;
1183 } else if (reloc->sym->return_thunk) {
1184 add_return_call(file, insn);
1185 continue;
1186 } else if (insn->func) {
1187 /* internal or external sibling call (with reloc) */
1188 add_call_dest(file, insn, reloc->sym, true);
1189 continue;
1190 } else if (reloc->sym->sec->idx) {
1191 dest_sec = reloc->sym->sec;
1192 dest_off = reloc->sym->sym.st_value +
1193 arch_dest_reloc_offset(reloc->addend);
1194 } else {
1195 /* non-func asm code jumping to another file */
1196 continue;
1197 }
1198
1199 insn->jump_dest = find_insn(file, dest_sec, dest_off);
1200 if (!insn->jump_dest) {
1201
1202 /*
1203 * This is a special case where an alt instruction
1204 * jumps past the end of the section. These are
1205 * handled later in handle_group_alt().
1206 */
1207 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1208 continue;
1209
1210 WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
1211 insn->sec, insn->offset, dest_sec->name,
1212 dest_off);
1213 return -1;
1214 }
1215
1216 /*
1217 * Cross-function jump.
1218 */
1219 if (insn->func && insn->jump_dest->func &&
1220 insn->func != insn->jump_dest->func) {
1221
1222 /*
1223 * For GCC 8+, create parent/child links for any cold
1224 * subfunctions. This is _mostly_ redundant with a
1225 * similar initialization in read_symbols().
1226 *
1227 * If a function has aliases, we want the *first* such
1228 * function in the symbol table to be the subfunction's
1229 * parent. In that case we overwrite the
1230 * initialization done in read_symbols().
1231 *
1232 * However this code can't completely replace the
1233 * read_symbols() code because this doesn't detect the
1234 * case where the parent function's only reference to a
1235 * subfunction is through a jump table.
1236 */
1237 if (!strstr(insn->func->name, ".cold") &&
1238 strstr(insn->jump_dest->func->name, ".cold")) {
1239 insn->func->cfunc = insn->jump_dest->func;
1240 insn->jump_dest->func->pfunc = insn->func;
1241
1242 } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
1243 insn->jump_dest->offset == insn->jump_dest->func->offset) {
1244 /* internal sibling call (without reloc) */
1245 add_call_dest(file, insn, insn->jump_dest->func, true);
1246 }
1247 }
1248 }
1249
1250 return 0;
1251 }
1252
1253 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1254 {
1255 struct symbol *call_dest;
1256
1257 call_dest = find_func_by_offset(sec, offset);
1258 if (!call_dest)
1259 call_dest = find_symbol_by_offset(sec, offset);
1260
1261 return call_dest;
1262 }
1263
1264 /*
1265 * Find the destination instructions for all calls.
1266 */
1267 static int add_call_destinations(struct objtool_file *file)
1268 {
1269 struct instruction *insn;
1270 unsigned long dest_off;
1271 struct symbol *dest;
1272 struct reloc *reloc;
1273
1274 for_each_insn(file, insn) {
1275 if (insn->type != INSN_CALL)
1276 continue;
1277
1278 reloc = insn_reloc(file, insn);
1279 if (!reloc) {
1280 dest_off = arch_jump_destination(insn);
1281 dest = find_call_destination(insn->sec, dest_off);
1282
1283 add_call_dest(file, insn, dest, false);
1284
1285 if (insn->ignore)
1286 continue;
1287
1288 if (!insn->call_dest) {
1289 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1290 return -1;
1291 }
1292
1293 if (insn->func && insn->call_dest->type != STT_FUNC) {
1294 WARN_FUNC("unsupported call to non-function",
1295 insn->sec, insn->offset);
1296 return -1;
1297 }
1298
1299 } else if (reloc->sym->type == STT_SECTION) {
1300 dest_off = arch_dest_reloc_offset(reloc->addend);
1301 dest = find_call_destination(reloc->sym->sec, dest_off);
1302 if (!dest) {
1303 WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1304 insn->sec, insn->offset,
1305 reloc->sym->sec->name,
1306 dest_off);
1307 return -1;
1308 }
1309
1310 add_call_dest(file, insn, dest, false);
1311
1312 } else if (reloc->sym->retpoline_thunk) {
1313 add_retpoline_call(file, insn);
1314
1315 } else
1316 add_call_dest(file, insn, reloc->sym, false);
1317 }
1318
1319 return 0;
1320 }
1321
1322 /*
1323 * The .alternatives section requires some extra special care over and above
1324 * other special sections because alternatives are patched in place.
1325 */
1326 static int handle_group_alt(struct objtool_file *file,
1327 struct special_alt *special_alt,
1328 struct instruction *orig_insn,
1329 struct instruction **new_insn)
1330 {
1331 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1332 struct alt_group *orig_alt_group, *new_alt_group;
1333 unsigned long dest_off;
1334
1335
1336 orig_alt_group = malloc(sizeof(*orig_alt_group));
1337 if (!orig_alt_group) {
1338 WARN("malloc failed");
1339 return -1;
1340 }
1341 orig_alt_group->cfi = calloc(special_alt->orig_len,
1342 sizeof(struct cfi_state *));
1343 if (!orig_alt_group->cfi) {
1344 WARN("calloc failed");
1345 return -1;
1346 }
1347
1348 last_orig_insn = NULL;
1349 insn = orig_insn;
1350 sec_for_each_insn_from(file, insn) {
1351 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1352 break;
1353
1354 insn->alt_group = orig_alt_group;
1355 last_orig_insn = insn;
1356 }
1357 orig_alt_group->orig_group = NULL;
1358 orig_alt_group->first_insn = orig_insn;
1359 orig_alt_group->last_insn = last_orig_insn;
1360
1361
1362 new_alt_group = malloc(sizeof(*new_alt_group));
1363 if (!new_alt_group) {
1364 WARN("malloc failed");
1365 return -1;
1366 }
1367
1368 if (special_alt->new_len < special_alt->orig_len) {
1369 /*
1370 * Insert a fake nop at the end to make the replacement
1371 * alt_group the same size as the original. This is needed to
1372 * allow propagate_alt_cfi() to do its magic. When the last
1373 * instruction affects the stack, the instruction after it (the
1374 * nop) will propagate the new state to the shared CFI array.
1375 */
1376 nop = malloc(sizeof(*nop));
1377 if (!nop) {
1378 WARN("malloc failed");
1379 return -1;
1380 }
1381 memset(nop, 0, sizeof(*nop));
1382 INIT_LIST_HEAD(&nop->alts);
1383 INIT_LIST_HEAD(&nop->stack_ops);
1384
1385 nop->sec = special_alt->new_sec;
1386 nop->offset = special_alt->new_off + special_alt->new_len;
1387 nop->len = special_alt->orig_len - special_alt->new_len;
1388 nop->type = INSN_NOP;
1389 nop->func = orig_insn->func;
1390 nop->alt_group = new_alt_group;
1391 nop->ignore = orig_insn->ignore_alts;
1392 }
1393
1394 if (!special_alt->new_len) {
1395 *new_insn = nop;
1396 goto end;
1397 }
1398
1399 insn = *new_insn;
1400 sec_for_each_insn_from(file, insn) {
1401 struct reloc *alt_reloc;
1402
1403 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1404 break;
1405
1406 last_new_insn = insn;
1407
1408 insn->ignore = orig_insn->ignore_alts;
1409 insn->func = orig_insn->func;
1410 insn->alt_group = new_alt_group;
1411
1412 /*
1413 * Since alternative replacement code is copy/pasted by the
1414 * kernel after applying relocations, generally such code can't
1415 * have relative-address relocation references to outside the
1416 * .altinstr_replacement section, unless the arch's
1417 * alternatives code can adjust the relative offsets
1418 * accordingly.
1419 */
1420 alt_reloc = insn_reloc(file, insn);
1421 if (alt_reloc &&
1422 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1423
1424 WARN_FUNC("unsupported relocation in alternatives section",
1425 insn->sec, insn->offset);
1426 return -1;
1427 }
1428
1429 if (!is_static_jump(insn))
1430 continue;
1431
1432 if (!insn->immediate)
1433 continue;
1434
1435 dest_off = arch_jump_destination(insn);
1436 if (dest_off == special_alt->new_off + special_alt->new_len)
1437 insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1438
1439 if (!insn->jump_dest) {
1440 WARN_FUNC("can't find alternative jump destination",
1441 insn->sec, insn->offset);
1442 return -1;
1443 }
1444 }
1445
1446 if (!last_new_insn) {
1447 WARN_FUNC("can't find last new alternative instruction",
1448 special_alt->new_sec, special_alt->new_off);
1449 return -1;
1450 }
1451
1452 if (nop)
1453 list_add(&nop->list, &last_new_insn->list);
1454 end:
1455 new_alt_group->orig_group = orig_alt_group;
1456 new_alt_group->first_insn = *new_insn;
1457 new_alt_group->last_insn = nop ? : last_new_insn;
1458 new_alt_group->cfi = orig_alt_group->cfi;
1459 return 0;
1460 }
1461
1462 /*
1463 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1464 * If the original instruction is a jump, make the alt entry an effective nop
1465 * by just skipping the original instruction.
1466 */
1467 static int handle_jump_alt(struct objtool_file *file,
1468 struct special_alt *special_alt,
1469 struct instruction *orig_insn,
1470 struct instruction **new_insn)
1471 {
1472 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1473 orig_insn->type != INSN_NOP) {
1474
1475 WARN_FUNC("unsupported instruction at jump label",
1476 orig_insn->sec, orig_insn->offset);
1477 return -1;
1478 }
1479
1480 if (special_alt->key_addend & 2) {
1481 struct reloc *reloc = insn_reloc(file, orig_insn);
1482
1483 if (reloc) {
1484 reloc->type = R_NONE;
1485 elf_write_reloc(file->elf, reloc);
1486 }
1487 elf_write_insn(file->elf, orig_insn->sec,
1488 orig_insn->offset, orig_insn->len,
1489 arch_nop_insn(orig_insn->len));
1490 orig_insn->type = INSN_NOP;
1491 }
1492
1493 if (orig_insn->type == INSN_NOP) {
1494 if (orig_insn->len == 2)
1495 file->jl_nop_short++;
1496 else
1497 file->jl_nop_long++;
1498
1499 return 0;
1500 }
1501
1502 if (orig_insn->len == 2)
1503 file->jl_short++;
1504 else
1505 file->jl_long++;
1506
1507 *new_insn = list_next_entry(orig_insn, list);
1508 return 0;
1509 }
1510
1511 /*
1512 * Read all the special sections which have alternate instructions which can be
1513 * patched in or redirected to at runtime. Each instruction having alternate
1514 * instruction(s) has them added to its insn->alts list, which will be
1515 * traversed in validate_branch().
1516 */
1517 static int add_special_section_alts(struct objtool_file *file)
1518 {
1519 struct list_head special_alts;
1520 struct instruction *orig_insn, *new_insn;
1521 struct special_alt *special_alt, *tmp;
1522 struct alternative *alt;
1523 int ret;
1524
1525 ret = special_get_alts(file->elf, &special_alts);
1526 if (ret)
1527 return ret;
1528
1529 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1530
1531 orig_insn = find_insn(file, special_alt->orig_sec,
1532 special_alt->orig_off);
1533 if (!orig_insn) {
1534 WARN_FUNC("special: can't find orig instruction",
1535 special_alt->orig_sec, special_alt->orig_off);
1536 ret = -1;
1537 goto out;
1538 }
1539
1540 new_insn = NULL;
1541 if (!special_alt->group || special_alt->new_len) {
1542 new_insn = find_insn(file, special_alt->new_sec,
1543 special_alt->new_off);
1544 if (!new_insn) {
1545 WARN_FUNC("special: can't find new instruction",
1546 special_alt->new_sec,
1547 special_alt->new_off);
1548 ret = -1;
1549 goto out;
1550 }
1551 }
1552
1553 if (special_alt->group) {
1554 if (!special_alt->orig_len) {
1555 WARN_FUNC("empty alternative entry",
1556 orig_insn->sec, orig_insn->offset);
1557 continue;
1558 }
1559
1560 ret = handle_group_alt(file, special_alt, orig_insn,
1561 &new_insn);
1562 if (ret)
1563 goto out;
1564 } else if (special_alt->jump_or_nop) {
1565 ret = handle_jump_alt(file, special_alt, orig_insn,
1566 &new_insn);
1567 if (ret)
1568 goto out;
1569 }
1570
1571 alt = malloc(sizeof(*alt));
1572 if (!alt) {
1573 WARN("malloc failed");
1574 ret = -1;
1575 goto out;
1576 }
1577
1578 alt->insn = new_insn;
1579 alt->skip_orig = special_alt->skip_orig;
1580 orig_insn->ignore_alts |= special_alt->skip_alt;
1581 list_add_tail(&alt->list, &orig_insn->alts);
1582
1583 list_del(&special_alt->list);
1584 free(special_alt);
1585 }
1586
1587 if (stats) {
1588 printf("jl\\\tNOP\tJMP\n");
1589 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1590 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1591 }
1592
1593 out:
1594 return ret;
1595 }
1596
1597 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1598 struct reloc *table)
1599 {
1600 struct reloc *reloc = table;
1601 struct instruction *dest_insn;
1602 struct alternative *alt;
1603 struct symbol *pfunc = insn->func->pfunc;
1604 unsigned int prev_offset = 0;
1605
1606 /*
1607 * Each @reloc is a switch table relocation which points to the target
1608 * instruction.
1609 */
1610 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1611
1612 /* Check for the end of the table: */
1613 if (reloc != table && reloc->jump_table_start)
1614 break;
1615
1616 /* Make sure the table entries are consecutive: */
1617 if (prev_offset && reloc->offset != prev_offset + 8)
1618 break;
1619
1620 /* Detect function pointers from contiguous objects: */
1621 if (reloc->sym->sec == pfunc->sec &&
1622 reloc->addend == pfunc->offset)
1623 break;
1624
1625 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1626 if (!dest_insn)
1627 break;
1628
1629 /* Make sure the destination is in the same function: */
1630 if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1631 break;
1632
1633 alt = malloc(sizeof(*alt));
1634 if (!alt) {
1635 WARN("malloc failed");
1636 return -1;
1637 }
1638
1639 alt->insn = dest_insn;
1640 list_add_tail(&alt->list, &insn->alts);
1641 prev_offset = reloc->offset;
1642 }
1643
1644 if (!prev_offset) {
1645 WARN_FUNC("can't find switch jump table",
1646 insn->sec, insn->offset);
1647 return -1;
1648 }
1649
1650 return 0;
1651 }
1652
1653 /*
1654 * find_jump_table() - Given a dynamic jump, find the switch jump table
1655 * associated with it.
1656 */
1657 static struct reloc *find_jump_table(struct objtool_file *file,
1658 struct symbol *func,
1659 struct instruction *insn)
1660 {
1661 struct reloc *table_reloc;
1662 struct instruction *dest_insn, *orig_insn = insn;
1663
1664 /*
1665 * Backward search using the @first_jump_src links, these help avoid
1666 * much of the 'in between' code. Which avoids us getting confused by
1667 * it.
1668 */
1669 for (;
1670 insn && insn->func && insn->func->pfunc == func;
1671 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1672
1673 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1674 break;
1675
1676 /* allow small jumps within the range */
1677 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
1678 insn->jump_dest &&
1679 (insn->jump_dest->offset <= insn->offset ||
1680 insn->jump_dest->offset > orig_insn->offset))
1681 break;
1682
1683 table_reloc = arch_find_switch_table(file, insn);
1684 if (!table_reloc)
1685 continue;
1686 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1687 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
1688 continue;
1689
1690 return table_reloc;
1691 }
1692
1693 return NULL;
1694 }
1695
1696 /*
1697 * First pass: Mark the head of each jump table so that in the next pass,
1698 * we know when a given jump table ends and the next one starts.
1699 */
1700 static void mark_func_jump_tables(struct objtool_file *file,
1701 struct symbol *func)
1702 {
1703 struct instruction *insn, *last = NULL;
1704 struct reloc *reloc;
1705
1706 func_for_each_insn(file, func, insn) {
1707 if (!last)
1708 last = insn;
1709
1710 /*
1711 * Store back-pointers for unconditional forward jumps such
1712 * that find_jump_table() can back-track using those and
1713 * avoid some potentially confusing code.
1714 */
1715 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
1716 insn->offset > last->offset &&
1717 insn->jump_dest->offset > insn->offset &&
1718 !insn->jump_dest->first_jump_src) {
1719
1720 insn->jump_dest->first_jump_src = insn;
1721 last = insn->jump_dest;
1722 }
1723
1724 if (insn->type != INSN_JUMP_DYNAMIC)
1725 continue;
1726
1727 reloc = find_jump_table(file, func, insn);
1728 if (reloc) {
1729 reloc->jump_table_start = true;
1730 insn->jump_table = reloc;
1731 }
1732 }
1733 }
1734
1735 static int add_func_jump_tables(struct objtool_file *file,
1736 struct symbol *func)
1737 {
1738 struct instruction *insn;
1739 int ret;
1740
1741 func_for_each_insn(file, func, insn) {
1742 if (!insn->jump_table)
1743 continue;
1744
1745 ret = add_jump_table(file, insn, insn->jump_table);
1746 if (ret)
1747 return ret;
1748 }
1749
1750 return 0;
1751 }
1752
1753 /*
1754 * For some switch statements, gcc generates a jump table in the .rodata
1755 * section which contains a list of addresses within the function to jump to.
1756 * This finds these jump tables and adds them to the insn->alts lists.
1757 */
1758 static int add_jump_table_alts(struct objtool_file *file)
1759 {
1760 struct section *sec;
1761 struct symbol *func;
1762 int ret;
1763
1764 if (!file->rodata)
1765 return 0;
1766
1767 for_each_sec(file, sec) {
1768 list_for_each_entry(func, &sec->symbol_list, list) {
1769 if (func->type != STT_FUNC)
1770 continue;
1771
1772 mark_func_jump_tables(file, func);
1773 ret = add_func_jump_tables(file, func);
1774 if (ret)
1775 return ret;
1776 }
1777 }
1778
1779 return 0;
1780 }
1781
1782 static void set_func_state(struct cfi_state *state)
1783 {
1784 state->cfa = initial_func_cfi.cfa;
1785 memcpy(&state->regs, &initial_func_cfi.regs,
1786 CFI_NUM_REGS * sizeof(struct cfi_reg));
1787 state->stack_size = initial_func_cfi.cfa.offset;
1788 }
1789
1790 static int read_unwind_hints(struct objtool_file *file)
1791 {
1792 struct cfi_state cfi = init_cfi;
1793 struct section *sec, *relocsec;
1794 struct unwind_hint *hint;
1795 struct instruction *insn;
1796 struct reloc *reloc;
1797 int i;
1798
1799 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
1800 if (!sec)
1801 return 0;
1802
1803 relocsec = sec->reloc;
1804 if (!relocsec) {
1805 WARN("missing .rela.discard.unwind_hints section");
1806 return -1;
1807 }
1808
1809 if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
1810 WARN("struct unwind_hint size mismatch");
1811 return -1;
1812 }
1813
1814 file->hints = true;
1815
1816 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
1817 hint = (struct unwind_hint *)sec->data->d_buf + i;
1818
1819 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
1820 if (!reloc) {
1821 WARN("can't find reloc for unwind_hints[%d]", i);
1822 return -1;
1823 }
1824
1825 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1826 if (!insn) {
1827 WARN("can't find insn for unwind_hints[%d]", i);
1828 return -1;
1829 }
1830
1831 insn->hint = true;
1832
1833 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
1834 insn->cfi = &func_cfi;
1835 continue;
1836 }
1837
1838 if (insn->cfi)
1839 cfi = *(insn->cfi);
1840
1841 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
1842 WARN_FUNC("unsupported unwind_hint sp base reg %d",
1843 insn->sec, insn->offset, hint->sp_reg);
1844 return -1;
1845 }
1846
1847 cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
1848 cfi.type = hint->type;
1849 cfi.end = hint->end;
1850
1851 insn->cfi = cfi_hash_find_or_add(&cfi);
1852 }
1853
1854 return 0;
1855 }
1856
1857 static int read_retpoline_hints(struct objtool_file *file)
1858 {
1859 struct section *sec;
1860 struct instruction *insn;
1861 struct reloc *reloc;
1862
1863 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
1864 if (!sec)
1865 return 0;
1866
1867 list_for_each_entry(reloc, &sec->reloc_list, list) {
1868 if (reloc->sym->type != STT_SECTION) {
1869 WARN("unexpected relocation symbol type in %s", sec->name);
1870 return -1;
1871 }
1872
1873 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1874 if (!insn) {
1875 WARN("bad .discard.retpoline_safe entry");
1876 return -1;
1877 }
1878
1879 if (insn->type != INSN_JUMP_DYNAMIC &&
1880 insn->type != INSN_CALL_DYNAMIC) {
1881 WARN_FUNC("retpoline_safe hint not an indirect jump/call",
1882 insn->sec, insn->offset);
1883 return -1;
1884 }
1885
1886 insn->retpoline_safe = true;
1887 }
1888
1889 return 0;
1890 }
1891
1892 static int read_instr_hints(struct objtool_file *file)
1893 {
1894 struct section *sec;
1895 struct instruction *insn;
1896 struct reloc *reloc;
1897
1898 sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
1899 if (!sec)
1900 return 0;
1901
1902 list_for_each_entry(reloc, &sec->reloc_list, list) {
1903 if (reloc->sym->type != STT_SECTION) {
1904 WARN("unexpected relocation symbol type in %s", sec->name);
1905 return -1;
1906 }
1907
1908 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1909 if (!insn) {
1910 WARN("bad .discard.instr_end entry");
1911 return -1;
1912 }
1913
1914 insn->instr--;
1915 }
1916
1917 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
1918 if (!sec)
1919 return 0;
1920
1921 list_for_each_entry(reloc, &sec->reloc_list, list) {
1922 if (reloc->sym->type != STT_SECTION) {
1923 WARN("unexpected relocation symbol type in %s", sec->name);
1924 return -1;
1925 }
1926
1927 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1928 if (!insn) {
1929 WARN("bad .discard.instr_begin entry");
1930 return -1;
1931 }
1932
1933 insn->instr++;
1934 }
1935
1936 return 0;
1937 }
1938
1939 static int read_intra_function_calls(struct objtool_file *file)
1940 {
1941 struct instruction *insn;
1942 struct section *sec;
1943 struct reloc *reloc;
1944
1945 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
1946 if (!sec)
1947 return 0;
1948
1949 list_for_each_entry(reloc, &sec->reloc_list, list) {
1950 unsigned long dest_off;
1951
1952 if (reloc->sym->type != STT_SECTION) {
1953 WARN("unexpected relocation symbol type in %s",
1954 sec->name);
1955 return -1;
1956 }
1957
1958 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1959 if (!insn) {
1960 WARN("bad .discard.intra_function_call entry");
1961 return -1;
1962 }
1963
1964 if (insn->type != INSN_CALL) {
1965 WARN_FUNC("intra_function_call not a direct call",
1966 insn->sec, insn->offset);
1967 return -1;
1968 }
1969
1970 /*
1971 * Treat intra-function CALLs as JMPs, but with a stack_op.
1972 * See add_call_destinations(), which strips stack_ops from
1973 * normal CALLs.
1974 */
1975 insn->type = INSN_JUMP_UNCONDITIONAL;
1976
1977 dest_off = insn->offset + insn->len + insn->immediate;
1978 insn->jump_dest = find_insn(file, insn->sec, dest_off);
1979 if (!insn->jump_dest) {
1980 WARN_FUNC("can't find call dest at %s+0x%lx",
1981 insn->sec, insn->offset,
1982 insn->sec->name, dest_off);
1983 return -1;
1984 }
1985 }
1986
1987 return 0;
1988 }
1989
1990 static int classify_symbols(struct objtool_file *file)
1991 {
1992 struct section *sec;
1993 struct symbol *func;
1994
1995 for_each_sec(file, sec) {
1996 list_for_each_entry(func, &sec->symbol_list, list) {
1997 if (func->bind != STB_GLOBAL)
1998 continue;
1999
2000 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2001 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2002 func->static_call_tramp = true;
2003
2004 if (arch_is_retpoline(func))
2005 func->retpoline_thunk = true;
2006
2007 if (arch_is_rethunk(func))
2008 func->return_thunk = true;
2009
2010 if (!strcmp(func->name, "__fentry__"))
2011 func->fentry = true;
2012
2013 if (!strncmp(func->name, "__sanitizer_cov_", 16))
2014 func->kcov = true;
2015 }
2016 }
2017
2018 return 0;
2019 }
2020
2021 static void mark_rodata(struct objtool_file *file)
2022 {
2023 struct section *sec;
2024 bool found = false;
2025
2026 /*
2027 * Search for the following rodata sections, each of which can
2028 * potentially contain jump tables:
2029 *
2030 * - .rodata: can contain GCC switch tables
2031 * - .rodata.<func>: same, if -fdata-sections is being used
2032 * - .rodata..c_jump_table: contains C annotated jump tables
2033 *
2034 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2035 */
2036 for_each_sec(file, sec) {
2037 if (!strncmp(sec->name, ".rodata", 7) &&
2038 !strstr(sec->name, ".str1.")) {
2039 sec->rodata = true;
2040 found = true;
2041 }
2042 }
2043
2044 file->rodata = found;
2045 }
2046
2047 static int decode_sections(struct objtool_file *file)
2048 {
2049 int ret;
2050
2051 mark_rodata(file);
2052
2053 ret = decode_instructions(file);
2054 if (ret)
2055 return ret;
2056
2057 ret = add_dead_ends(file);
2058 if (ret)
2059 return ret;
2060
2061 add_ignores(file);
2062 add_uaccess_safe(file);
2063
2064 ret = add_ignore_alternatives(file);
2065 if (ret)
2066 return ret;
2067
2068 /*
2069 * Must be before add_{jump_call}_destination.
2070 */
2071 ret = classify_symbols(file);
2072 if (ret)
2073 return ret;
2074
2075 /*
2076 * Must be before add_special_section_alts() as that depends on
2077 * jump_dest being set.
2078 */
2079 ret = add_jump_destinations(file);
2080 if (ret)
2081 return ret;
2082
2083 ret = add_special_section_alts(file);
2084 if (ret)
2085 return ret;
2086
2087 /*
2088 * Must be before add_call_destination(); it changes INSN_CALL to
2089 * INSN_JUMP.
2090 */
2091 ret = read_intra_function_calls(file);
2092 if (ret)
2093 return ret;
2094
2095 ret = add_call_destinations(file);
2096 if (ret)
2097 return ret;
2098
2099 ret = add_jump_table_alts(file);
2100 if (ret)
2101 return ret;
2102
2103 ret = read_unwind_hints(file);
2104 if (ret)
2105 return ret;
2106
2107 ret = read_retpoline_hints(file);
2108 if (ret)
2109 return ret;
2110
2111 ret = read_instr_hints(file);
2112 if (ret)
2113 return ret;
2114
2115 return 0;
2116 }
2117
2118 static bool is_fentry_call(struct instruction *insn)
2119 {
2120 if (insn->type == INSN_CALL &&
2121 insn->call_dest &&
2122 insn->call_dest->fentry)
2123 return true;
2124
2125 return false;
2126 }
2127
2128 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2129 {
2130 struct cfi_state *cfi = &state->cfi;
2131 int i;
2132
2133 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2134 return true;
2135
2136 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2137 return true;
2138
2139 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2140 return true;
2141
2142 for (i = 0; i < CFI_NUM_REGS; i++) {
2143 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2144 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2145 return true;
2146 }
2147
2148 return false;
2149 }
2150
2151 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2152 int expected_offset)
2153 {
2154 return reg->base == CFI_CFA &&
2155 reg->offset == expected_offset;
2156 }
2157
2158 static bool has_valid_stack_frame(struct insn_state *state)
2159 {
2160 struct cfi_state *cfi = &state->cfi;
2161
2162 if (cfi->cfa.base == CFI_BP &&
2163 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2164 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2165 return true;
2166
2167 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2168 return true;
2169
2170 return false;
2171 }
2172
2173 static int update_cfi_state_regs(struct instruction *insn,
2174 struct cfi_state *cfi,
2175 struct stack_op *op)
2176 {
2177 struct cfi_reg *cfa = &cfi->cfa;
2178
2179 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2180 return 0;
2181
2182 /* push */
2183 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2184 cfa->offset += 8;
2185
2186 /* pop */
2187 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2188 cfa->offset -= 8;
2189
2190 /* add immediate to sp */
2191 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2192 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2193 cfa->offset -= op->src.offset;
2194
2195 return 0;
2196 }
2197
2198 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2199 {
2200 if (arch_callee_saved_reg(reg) &&
2201 cfi->regs[reg].base == CFI_UNDEFINED) {
2202 cfi->regs[reg].base = base;
2203 cfi->regs[reg].offset = offset;
2204 }
2205 }
2206
2207 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2208 {
2209 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2210 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2211 }
2212
2213 /*
2214 * A note about DRAP stack alignment:
2215 *
2216 * GCC has the concept of a DRAP register, which is used to help keep track of
2217 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2218 * register. The typical DRAP pattern is:
2219 *
2220 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2221 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2222 * 41 ff 72 f8 pushq -0x8(%r10)
2223 * 55 push %rbp
2224 * 48 89 e5 mov %rsp,%rbp
2225 * (more pushes)
2226 * 41 52 push %r10
2227 * ...
2228 * 41 5a pop %r10
2229 * (more pops)
2230 * 5d pop %rbp
2231 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2232 * c3 retq
2233 *
2234 * There are some variations in the epilogues, like:
2235 *
2236 * 5b pop %rbx
2237 * 41 5a pop %r10
2238 * 41 5c pop %r12
2239 * 41 5d pop %r13
2240 * 41 5e pop %r14
2241 * c9 leaveq
2242 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2243 * c3 retq
2244 *
2245 * and:
2246 *
2247 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2248 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2249 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2250 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2251 * c9 leaveq
2252 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2253 * c3 retq
2254 *
2255 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2256 * restored beforehand:
2257 *
2258 * 41 55 push %r13
2259 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2260 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2261 * ...
2262 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2263 * 41 5d pop %r13
2264 * c3 retq
2265 */
2266 static int update_cfi_state(struct instruction *insn,
2267 struct instruction *next_insn,
2268 struct cfi_state *cfi, struct stack_op *op)
2269 {
2270 struct cfi_reg *cfa = &cfi->cfa;
2271 struct cfi_reg *regs = cfi->regs;
2272
2273 /* stack operations don't make sense with an undefined CFA */
2274 if (cfa->base == CFI_UNDEFINED) {
2275 if (insn->func) {
2276 WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2277 return -1;
2278 }
2279 return 0;
2280 }
2281
2282 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2283 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2284 return update_cfi_state_regs(insn, cfi, op);
2285
2286 switch (op->dest.type) {
2287
2288 case OP_DEST_REG:
2289 switch (op->src.type) {
2290
2291 case OP_SRC_REG:
2292 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2293 cfa->base == CFI_SP &&
2294 check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2295
2296 /* mov %rsp, %rbp */
2297 cfa->base = op->dest.reg;
2298 cfi->bp_scratch = false;
2299 }
2300
2301 else if (op->src.reg == CFI_SP &&
2302 op->dest.reg == CFI_BP && cfi->drap) {
2303
2304 /* drap: mov %rsp, %rbp */
2305 regs[CFI_BP].base = CFI_BP;
2306 regs[CFI_BP].offset = -cfi->stack_size;
2307 cfi->bp_scratch = false;
2308 }
2309
2310 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2311
2312 /*
2313 * mov %rsp, %reg
2314 *
2315 * This is needed for the rare case where GCC
2316 * does:
2317 *
2318 * mov %rsp, %rax
2319 * ...
2320 * mov %rax, %rsp
2321 */
2322 cfi->vals[op->dest.reg].base = CFI_CFA;
2323 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2324 }
2325
2326 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2327 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2328
2329 /*
2330 * mov %rbp, %rsp
2331 *
2332 * Restore the original stack pointer (Clang).
2333 */
2334 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2335 }
2336
2337 else if (op->dest.reg == cfa->base) {
2338
2339 /* mov %reg, %rsp */
2340 if (cfa->base == CFI_SP &&
2341 cfi->vals[op->src.reg].base == CFI_CFA) {
2342
2343 /*
2344 * This is needed for the rare case
2345 * where GCC does something dumb like:
2346 *
2347 * lea 0x8(%rsp), %rcx
2348 * ...
2349 * mov %rcx, %rsp
2350 */
2351 cfa->offset = -cfi->vals[op->src.reg].offset;
2352 cfi->stack_size = cfa->offset;
2353
2354 } else if (cfa->base == CFI_SP &&
2355 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2356 cfi->vals[op->src.reg].offset == cfa->offset) {
2357
2358 /*
2359 * Stack swizzle:
2360 *
2361 * 1: mov %rsp, (%[tos])
2362 * 2: mov %[tos], %rsp
2363 * ...
2364 * 3: pop %rsp
2365 *
2366 * Where:
2367 *
2368 * 1 - places a pointer to the previous
2369 * stack at the Top-of-Stack of the
2370 * new stack.
2371 *
2372 * 2 - switches to the new stack.
2373 *
2374 * 3 - pops the Top-of-Stack to restore
2375 * the original stack.
2376 *
2377 * Note: we set base to SP_INDIRECT
2378 * here and preserve offset. Therefore
2379 * when the unwinder reaches ToS it
2380 * will dereference SP and then add the
2381 * offset to find the next frame, IOW:
2382 * (%rsp) + offset.
2383 */
2384 cfa->base = CFI_SP_INDIRECT;
2385
2386 } else {
2387 cfa->base = CFI_UNDEFINED;
2388 cfa->offset = 0;
2389 }
2390 }
2391
2392 else if (op->dest.reg == CFI_SP &&
2393 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2394 cfi->vals[op->src.reg].offset == cfa->offset) {
2395
2396 /*
2397 * The same stack swizzle case 2) as above. But
2398 * because we can't change cfa->base, case 3)
2399 * will become a regular POP. Pretend we're a
2400 * PUSH so things don't go unbalanced.
2401 */
2402 cfi->stack_size += 8;
2403 }
2404
2405
2406 break;
2407
2408 case OP_SRC_ADD:
2409 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2410
2411 /* add imm, %rsp */
2412 cfi->stack_size -= op->src.offset;
2413 if (cfa->base == CFI_SP)
2414 cfa->offset -= op->src.offset;
2415 break;
2416 }
2417
2418 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2419
2420 /* lea disp(%rbp), %rsp */
2421 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2422 break;
2423 }
2424
2425 if (!cfi->drap && op->src.reg == CFI_SP &&
2426 op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2427 check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) {
2428
2429 /* lea disp(%rsp), %rbp */
2430 cfa->base = CFI_BP;
2431 cfa->offset -= op->src.offset;
2432 cfi->bp_scratch = false;
2433 break;
2434 }
2435
2436 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2437
2438 /* drap: lea disp(%rsp), %drap */
2439 cfi->drap_reg = op->dest.reg;
2440
2441 /*
2442 * lea disp(%rsp), %reg
2443 *
2444 * This is needed for the rare case where GCC
2445 * does something dumb like:
2446 *
2447 * lea 0x8(%rsp), %rcx
2448 * ...
2449 * mov %rcx, %rsp
2450 */
2451 cfi->vals[op->dest.reg].base = CFI_CFA;
2452 cfi->vals[op->dest.reg].offset = \
2453 -cfi->stack_size + op->src.offset;
2454
2455 break;
2456 }
2457
2458 if (cfi->drap && op->dest.reg == CFI_SP &&
2459 op->src.reg == cfi->drap_reg) {
2460
2461 /* drap: lea disp(%drap), %rsp */
2462 cfa->base = CFI_SP;
2463 cfa->offset = cfi->stack_size = -op->src.offset;
2464 cfi->drap_reg = CFI_UNDEFINED;
2465 cfi->drap = false;
2466 break;
2467 }
2468
2469 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2470 WARN_FUNC("unsupported stack register modification",
2471 insn->sec, insn->offset);
2472 return -1;
2473 }
2474
2475 break;
2476
2477 case OP_SRC_AND:
2478 if (op->dest.reg != CFI_SP ||
2479 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2480 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2481 WARN_FUNC("unsupported stack pointer realignment",
2482 insn->sec, insn->offset);
2483 return -1;
2484 }
2485
2486 if (cfi->drap_reg != CFI_UNDEFINED) {
2487 /* drap: and imm, %rsp */
2488 cfa->base = cfi->drap_reg;
2489 cfa->offset = cfi->stack_size = 0;
2490 cfi->drap = true;
2491 }
2492
2493 /*
2494 * Older versions of GCC (4.8ish) realign the stack
2495 * without DRAP, with a frame pointer.
2496 */
2497
2498 break;
2499
2500 case OP_SRC_POP:
2501 case OP_SRC_POPF:
2502 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2503
2504 /* pop %rsp; # restore from a stack swizzle */
2505 cfa->base = CFI_SP;
2506 break;
2507 }
2508
2509 if (!cfi->drap && op->dest.reg == cfa->base) {
2510
2511 /* pop %rbp */
2512 cfa->base = CFI_SP;
2513 }
2514
2515 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
2516 op->dest.reg == cfi->drap_reg &&
2517 cfi->drap_offset == -cfi->stack_size) {
2518
2519 /* drap: pop %drap */
2520 cfa->base = cfi->drap_reg;
2521 cfa->offset = 0;
2522 cfi->drap_offset = -1;
2523
2524 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
2525
2526 /* pop %reg */
2527 restore_reg(cfi, op->dest.reg);
2528 }
2529
2530 cfi->stack_size -= 8;
2531 if (cfa->base == CFI_SP)
2532 cfa->offset -= 8;
2533
2534 break;
2535
2536 case OP_SRC_REG_INDIRECT:
2537 if (!cfi->drap && op->dest.reg == cfa->base &&
2538 op->dest.reg == CFI_BP) {
2539
2540 /* mov disp(%rsp), %rbp */
2541 cfa->base = CFI_SP;
2542 cfa->offset = cfi->stack_size;
2543 }
2544
2545 if (cfi->drap && op->src.reg == CFI_BP &&
2546 op->src.offset == cfi->drap_offset) {
2547
2548 /* drap: mov disp(%rbp), %drap */
2549 cfa->base = cfi->drap_reg;
2550 cfa->offset = 0;
2551 cfi->drap_offset = -1;
2552 }
2553
2554 if (cfi->drap && op->src.reg == CFI_BP &&
2555 op->src.offset == regs[op->dest.reg].offset) {
2556
2557 /* drap: mov disp(%rbp), %reg */
2558 restore_reg(cfi, op->dest.reg);
2559
2560 } else if (op->src.reg == cfa->base &&
2561 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
2562
2563 /* mov disp(%rbp), %reg */
2564 /* mov disp(%rsp), %reg */
2565 restore_reg(cfi, op->dest.reg);
2566
2567 } else if (op->src.reg == CFI_SP &&
2568 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
2569
2570 /* mov disp(%rsp), %reg */
2571 restore_reg(cfi, op->dest.reg);
2572 }
2573
2574 break;
2575
2576 default:
2577 WARN_FUNC("unknown stack-related instruction",
2578 insn->sec, insn->offset);
2579 return -1;
2580 }
2581
2582 break;
2583
2584 case OP_DEST_PUSH:
2585 case OP_DEST_PUSHF:
2586 cfi->stack_size += 8;
2587 if (cfa->base == CFI_SP)
2588 cfa->offset += 8;
2589
2590 if (op->src.type != OP_SRC_REG)
2591 break;
2592
2593 if (cfi->drap) {
2594 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2595
2596 /* drap: push %drap */
2597 cfa->base = CFI_BP_INDIRECT;
2598 cfa->offset = -cfi->stack_size;
2599
2600 /* save drap so we know when to restore it */
2601 cfi->drap_offset = -cfi->stack_size;
2602
2603 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2604
2605 /* drap: push %rbp */
2606 cfi->stack_size = 0;
2607
2608 } else {
2609
2610 /* drap: push %reg */
2611 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2612 }
2613
2614 } else {
2615
2616 /* push %reg */
2617 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2618 }
2619
2620 /* detect when asm code uses rbp as a scratch register */
2621 if (!no_fp && insn->func && op->src.reg == CFI_BP &&
2622 cfa->base != CFI_BP)
2623 cfi->bp_scratch = true;
2624 break;
2625
2626 case OP_DEST_REG_INDIRECT:
2627
2628 if (cfi->drap) {
2629 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2630
2631 /* drap: mov %drap, disp(%rbp) */
2632 cfa->base = CFI_BP_INDIRECT;
2633 cfa->offset = op->dest.offset;
2634
2635 /* save drap offset so we know when to restore it */
2636 cfi->drap_offset = op->dest.offset;
2637 } else {
2638
2639 /* drap: mov reg, disp(%rbp) */
2640 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2641 }
2642
2643 } else if (op->dest.reg == cfa->base) {
2644
2645 /* mov reg, disp(%rbp) */
2646 /* mov reg, disp(%rsp) */
2647 save_reg(cfi, op->src.reg, CFI_CFA,
2648 op->dest.offset - cfi->cfa.offset);
2649
2650 } else if (op->dest.reg == CFI_SP) {
2651
2652 /* mov reg, disp(%rsp) */
2653 save_reg(cfi, op->src.reg, CFI_CFA,
2654 op->dest.offset - cfi->stack_size);
2655
2656 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
2657
2658 /* mov %rsp, (%reg); # setup a stack swizzle. */
2659 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
2660 cfi->vals[op->dest.reg].offset = cfa->offset;
2661 }
2662
2663 break;
2664
2665 case OP_DEST_MEM:
2666 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2667 WARN_FUNC("unknown stack-related memory operation",
2668 insn->sec, insn->offset);
2669 return -1;
2670 }
2671
2672 /* pop mem */
2673 cfi->stack_size -= 8;
2674 if (cfa->base == CFI_SP)
2675 cfa->offset -= 8;
2676
2677 break;
2678
2679 default:
2680 WARN_FUNC("unknown stack-related instruction",
2681 insn->sec, insn->offset);
2682 return -1;
2683 }
2684
2685 return 0;
2686 }
2687
2688 /*
2689 * The stack layouts of alternatives instructions can sometimes diverge when
2690 * they have stack modifications. That's fine as long as the potential stack
2691 * layouts don't conflict at any given potential instruction boundary.
2692 *
2693 * Flatten the CFIs of the different alternative code streams (both original
2694 * and replacement) into a single shared CFI array which can be used to detect
2695 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
2696 */
2697 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2698 {
2699 struct cfi_state **alt_cfi;
2700 int group_off;
2701
2702 if (!insn->alt_group)
2703 return 0;
2704
2705 if (!insn->cfi) {
2706 WARN("CFI missing");
2707 return -1;
2708 }
2709
2710 alt_cfi = insn->alt_group->cfi;
2711 group_off = insn->offset - insn->alt_group->first_insn->offset;
2712
2713 if (!alt_cfi[group_off]) {
2714 alt_cfi[group_off] = insn->cfi;
2715 } else {
2716 if (cficmp(alt_cfi[group_off], insn->cfi)) {
2717 WARN_FUNC("stack layout conflict in alternatives",
2718 insn->sec, insn->offset);
2719 return -1;
2720 }
2721 }
2722
2723 return 0;
2724 }
2725
2726 static int handle_insn_ops(struct instruction *insn,
2727 struct instruction *next_insn,
2728 struct insn_state *state)
2729 {
2730 struct stack_op *op;
2731
2732 list_for_each_entry(op, &insn->stack_ops, list) {
2733
2734 if (update_cfi_state(insn, next_insn, &state->cfi, op))
2735 return 1;
2736
2737 if (!insn->alt_group)
2738 continue;
2739
2740 if (op->dest.type == OP_DEST_PUSHF) {
2741 if (!state->uaccess_stack) {
2742 state->uaccess_stack = 1;
2743 } else if (state->uaccess_stack >> 31) {
2744 WARN_FUNC("PUSHF stack exhausted",
2745 insn->sec, insn->offset);
2746 return 1;
2747 }
2748 state->uaccess_stack <<= 1;
2749 state->uaccess_stack |= state->uaccess;
2750 }
2751
2752 if (op->src.type == OP_SRC_POPF) {
2753 if (state->uaccess_stack) {
2754 state->uaccess = state->uaccess_stack & 1;
2755 state->uaccess_stack >>= 1;
2756 if (state->uaccess_stack == 1)
2757 state->uaccess_stack = 0;
2758 }
2759 }
2760 }
2761
2762 return 0;
2763 }
2764
2765 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
2766 {
2767 struct cfi_state *cfi1 = insn->cfi;
2768 int i;
2769
2770 if (!cfi1) {
2771 WARN("CFI missing");
2772 return false;
2773 }
2774
2775 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
2776
2777 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
2778 insn->sec, insn->offset,
2779 cfi1->cfa.base, cfi1->cfa.offset,
2780 cfi2->cfa.base, cfi2->cfa.offset);
2781
2782 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
2783 for (i = 0; i < CFI_NUM_REGS; i++) {
2784 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
2785 sizeof(struct cfi_reg)))
2786 continue;
2787
2788 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
2789 insn->sec, insn->offset,
2790 i, cfi1->regs[i].base, cfi1->regs[i].offset,
2791 i, cfi2->regs[i].base, cfi2->regs[i].offset);
2792 break;
2793 }
2794
2795 } else if (cfi1->type != cfi2->type) {
2796
2797 WARN_FUNC("stack state mismatch: type1=%d type2=%d",
2798 insn->sec, insn->offset, cfi1->type, cfi2->type);
2799
2800 } else if (cfi1->drap != cfi2->drap ||
2801 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
2802 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
2803
2804 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
2805 insn->sec, insn->offset,
2806 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
2807 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
2808
2809 } else
2810 return true;
2811
2812 return false;
2813 }
2814
2815 static inline bool func_uaccess_safe(struct symbol *func)
2816 {
2817 if (func)
2818 return func->uaccess_safe;
2819
2820 return false;
2821 }
2822
2823 static inline const char *call_dest_name(struct instruction *insn)
2824 {
2825 if (insn->call_dest)
2826 return insn->call_dest->name;
2827
2828 return "{dynamic}";
2829 }
2830
2831 static inline bool noinstr_call_dest(struct symbol *func)
2832 {
2833 /*
2834 * We can't deal with indirect function calls at present;
2835 * assume they're instrumented.
2836 */
2837 if (!func)
2838 return false;
2839
2840 /*
2841 * If the symbol is from a noinstr section; we good.
2842 */
2843 if (func->sec->noinstr)
2844 return true;
2845
2846 /*
2847 * The __ubsan_handle_*() calls are like WARN(), they only happen when
2848 * something 'BAD' happened. At the risk of taking the machine down,
2849 * let them proceed to get the message out.
2850 */
2851 if (!strncmp(func->name, "__ubsan_handle_", 15))
2852 return true;
2853
2854 return false;
2855 }
2856
2857 static int validate_call(struct instruction *insn, struct insn_state *state)
2858 {
2859 if (state->noinstr && state->instr <= 0 &&
2860 !noinstr_call_dest(insn->call_dest)) {
2861 WARN_FUNC("call to %s() leaves .noinstr.text section",
2862 insn->sec, insn->offset, call_dest_name(insn));
2863 return 1;
2864 }
2865
2866 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
2867 WARN_FUNC("call to %s() with UACCESS enabled",
2868 insn->sec, insn->offset, call_dest_name(insn));
2869 return 1;
2870 }
2871
2872 if (state->df) {
2873 WARN_FUNC("call to %s() with DF set",
2874 insn->sec, insn->offset, call_dest_name(insn));
2875 return 1;
2876 }
2877
2878 return 0;
2879 }
2880
2881 static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
2882 {
2883 if (has_modified_stack_frame(insn, state)) {
2884 WARN_FUNC("sibling call from callable instruction with modified stack frame",
2885 insn->sec, insn->offset);
2886 return 1;
2887 }
2888
2889 return validate_call(insn, state);
2890 }
2891
2892 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
2893 {
2894 if (state->noinstr && state->instr > 0) {
2895 WARN_FUNC("return with instrumentation enabled",
2896 insn->sec, insn->offset);
2897 return 1;
2898 }
2899
2900 if (state->uaccess && !func_uaccess_safe(func)) {
2901 WARN_FUNC("return with UACCESS enabled",
2902 insn->sec, insn->offset);
2903 return 1;
2904 }
2905
2906 if (!state->uaccess && func_uaccess_safe(func)) {
2907 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
2908 insn->sec, insn->offset);
2909 return 1;
2910 }
2911
2912 if (state->df) {
2913 WARN_FUNC("return with DF set",
2914 insn->sec, insn->offset);
2915 return 1;
2916 }
2917
2918 if (func && has_modified_stack_frame(insn, state)) {
2919 WARN_FUNC("return with modified stack frame",
2920 insn->sec, insn->offset);
2921 return 1;
2922 }
2923
2924 if (state->cfi.bp_scratch) {
2925 WARN_FUNC("BP used as a scratch register",
2926 insn->sec, insn->offset);
2927 return 1;
2928 }
2929
2930 return 0;
2931 }
2932
2933 static struct instruction *next_insn_to_validate(struct objtool_file *file,
2934 struct instruction *insn)
2935 {
2936 struct alt_group *alt_group = insn->alt_group;
2937
2938 /*
2939 * Simulate the fact that alternatives are patched in-place. When the
2940 * end of a replacement alt_group is reached, redirect objtool flow to
2941 * the end of the original alt_group.
2942 */
2943 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
2944 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
2945
2946 return next_insn_same_sec(file, insn);
2947 }
2948
2949 /*
2950 * Follow the branch starting at the given instruction, and recursively follow
2951 * any other branches (jumps). Meanwhile, track the frame pointer state at
2952 * each instruction and validate all the rules described in
2953 * tools/objtool/Documentation/stack-validation.txt.
2954 */
2955 static int validate_branch(struct objtool_file *file, struct symbol *func,
2956 struct instruction *insn, struct insn_state state)
2957 {
2958 struct alternative *alt;
2959 struct instruction *next_insn, *prev_insn = NULL;
2960 struct section *sec;
2961 u8 visited;
2962 int ret;
2963
2964 sec = insn->sec;
2965
2966 while (1) {
2967 next_insn = next_insn_to_validate(file, insn);
2968
2969 if (file->c_file && func && insn->func && func != insn->func->pfunc) {
2970 WARN("%s() falls through to next function %s()",
2971 func->name, insn->func->name);
2972 return 1;
2973 }
2974
2975 if (func && insn->ignore) {
2976 WARN_FUNC("BUG: why am I validating an ignored function?",
2977 sec, insn->offset);
2978 return 1;
2979 }
2980
2981 visited = 1 << state.uaccess;
2982 if (insn->visited) {
2983 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
2984 return 1;
2985
2986 if (insn->visited & visited)
2987 return 0;
2988 } else {
2989 nr_insns_visited++;
2990 }
2991
2992 if (state.noinstr)
2993 state.instr += insn->instr;
2994
2995 if (insn->hint) {
2996 state.cfi = *insn->cfi;
2997 } else {
2998 /* XXX track if we actually changed state.cfi */
2999
3000 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3001 insn->cfi = prev_insn->cfi;
3002 nr_cfi_reused++;
3003 } else {
3004 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3005 }
3006 }
3007
3008 insn->visited |= visited;
3009
3010 if (propagate_alt_cfi(file, insn))
3011 return 1;
3012
3013 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3014 bool skip_orig = false;
3015
3016 list_for_each_entry(alt, &insn->alts, list) {
3017 if (alt->skip_orig)
3018 skip_orig = true;
3019
3020 ret = validate_branch(file, func, alt->insn, state);
3021 if (ret) {
3022 if (backtrace)
3023 BT_FUNC("(alt)", insn);
3024 return ret;
3025 }
3026 }
3027
3028 if (skip_orig)
3029 return 0;
3030 }
3031
3032 if (handle_insn_ops(insn, next_insn, &state))
3033 return 1;
3034
3035 switch (insn->type) {
3036
3037 case INSN_RETURN:
3038 if (sls && !insn->retpoline_safe &&
3039 next_insn && next_insn->type != INSN_TRAP) {
3040 WARN_FUNC("missing int3 after ret",
3041 insn->sec, insn->offset);
3042 }
3043 return validate_return(func, insn, &state);
3044
3045 case INSN_CALL:
3046 case INSN_CALL_DYNAMIC:
3047 ret = validate_call(insn, &state);
3048 if (ret)
3049 return ret;
3050
3051 if (!no_fp && func && !is_fentry_call(insn) &&
3052 !has_valid_stack_frame(&state)) {
3053 WARN_FUNC("call without frame pointer save/setup",
3054 sec, insn->offset);
3055 return 1;
3056 }
3057
3058 if (dead_end_function(file, insn->call_dest))
3059 return 0;
3060
3061 break;
3062
3063 case INSN_JUMP_CONDITIONAL:
3064 case INSN_JUMP_UNCONDITIONAL:
3065 if (is_sibling_call(insn)) {
3066 ret = validate_sibling_call(insn, &state);
3067 if (ret)
3068 return ret;
3069
3070 } else if (insn->jump_dest) {
3071 ret = validate_branch(file, func,
3072 insn->jump_dest, state);
3073 if (ret) {
3074 if (backtrace)
3075 BT_FUNC("(branch)", insn);
3076 return ret;
3077 }
3078 }
3079
3080 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3081 return 0;
3082
3083 break;
3084
3085 case INSN_JUMP_DYNAMIC:
3086 if (sls && !insn->retpoline_safe &&
3087 next_insn && next_insn->type != INSN_TRAP) {
3088 WARN_FUNC("missing int3 after indirect jump",
3089 insn->sec, insn->offset);
3090 }
3091
3092 /* fallthrough */
3093 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3094 if (is_sibling_call(insn)) {
3095 ret = validate_sibling_call(insn, &state);
3096 if (ret)
3097 return ret;
3098 }
3099
3100 if (insn->type == INSN_JUMP_DYNAMIC)
3101 return 0;
3102
3103 break;
3104
3105 case INSN_CONTEXT_SWITCH:
3106 if (func && (!next_insn || !next_insn->hint)) {
3107 WARN_FUNC("unsupported instruction in callable function",
3108 sec, insn->offset);
3109 return 1;
3110 }
3111 return 0;
3112
3113 case INSN_STAC:
3114 if (state.uaccess) {
3115 WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
3116 return 1;
3117 }
3118
3119 state.uaccess = true;
3120 break;
3121
3122 case INSN_CLAC:
3123 if (!state.uaccess && func) {
3124 WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
3125 return 1;
3126 }
3127
3128 if (func_uaccess_safe(func) && !state.uaccess_stack) {
3129 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
3130 return 1;
3131 }
3132
3133 state.uaccess = false;
3134 break;
3135
3136 case INSN_STD:
3137 if (state.df) {
3138 WARN_FUNC("recursive STD", sec, insn->offset);
3139 return 1;
3140 }
3141
3142 state.df = true;
3143 break;
3144
3145 case INSN_CLD:
3146 if (!state.df && func) {
3147 WARN_FUNC("redundant CLD", sec, insn->offset);
3148 return 1;
3149 }
3150
3151 state.df = false;
3152 break;
3153
3154 default:
3155 break;
3156 }
3157
3158 if (insn->dead_end)
3159 return 0;
3160
3161 if (!next_insn) {
3162 if (state.cfi.cfa.base == CFI_UNDEFINED)
3163 return 0;
3164 WARN("%s: unexpected end of section", sec->name);
3165 return 1;
3166 }
3167
3168 prev_insn = insn;
3169 insn = next_insn;
3170 }
3171
3172 return 0;
3173 }
3174
3175 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3176 {
3177 struct instruction *insn;
3178 struct insn_state state;
3179 int ret, warnings = 0;
3180
3181 if (!file->hints)
3182 return 0;
3183
3184 init_insn_state(&state, sec);
3185
3186 if (sec) {
3187 insn = find_insn(file, sec, 0);
3188 if (!insn)
3189 return 0;
3190 } else {
3191 insn = list_first_entry(&file->insn_list, typeof(*insn), list);
3192 }
3193
3194 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
3195 if (insn->hint && !insn->visited) {
3196 ret = validate_branch(file, insn->func, insn, state);
3197 if (ret && backtrace)
3198 BT_FUNC("<=== (hint)", insn);
3199 warnings += ret;
3200 }
3201
3202 insn = list_next_entry(insn, list);
3203 }
3204
3205 return warnings;
3206 }
3207
3208 static int validate_retpoline(struct objtool_file *file)
3209 {
3210 struct instruction *insn;
3211 int warnings = 0;
3212
3213 for_each_insn(file, insn) {
3214 if (insn->type != INSN_JUMP_DYNAMIC &&
3215 insn->type != INSN_CALL_DYNAMIC)
3216 continue;
3217
3218 if (insn->retpoline_safe)
3219 continue;
3220
3221 /*
3222 * .init.text code is ran before userspace and thus doesn't
3223 * strictly need retpolines, except for modules which are
3224 * loaded late, they very much do need retpoline in their
3225 * .init.text
3226 */
3227 if (!strcmp(insn->sec->name, ".init.text") && !module)
3228 continue;
3229
3230 WARN_FUNC("indirect %s found in RETPOLINE build",
3231 insn->sec, insn->offset,
3232 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3233
3234 warnings++;
3235 }
3236
3237 return warnings;
3238 }
3239
3240 static bool is_kasan_insn(struct instruction *insn)
3241 {
3242 return (insn->type == INSN_CALL &&
3243 !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
3244 }
3245
3246 static bool is_ubsan_insn(struct instruction *insn)
3247 {
3248 return (insn->type == INSN_CALL &&
3249 !strcmp(insn->call_dest->name,
3250 "__ubsan_handle_builtin_unreachable"));
3251 }
3252
3253 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
3254 {
3255 int i;
3256 struct instruction *prev_insn;
3257
3258 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
3259 return true;
3260
3261 /*
3262 * Ignore any unused exceptions. This can happen when a whitelisted
3263 * function has an exception table entry.
3264 *
3265 * Also ignore alternative replacement instructions. This can happen
3266 * when a whitelisted function uses one of the ALTERNATIVE macros.
3267 */
3268 if (!strcmp(insn->sec->name, ".fixup") ||
3269 !strcmp(insn->sec->name, ".altinstr_replacement") ||
3270 !strcmp(insn->sec->name, ".altinstr_aux"))
3271 return true;
3272
3273 if (!insn->func)
3274 return false;
3275
3276 /*
3277 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
3278 * __builtin_unreachable(). The BUG() macro has an unreachable() after
3279 * the UD2, which causes GCC's undefined trap logic to emit another UD2
3280 * (or occasionally a JMP to UD2).
3281 *
3282 * It may also insert a UD2 after calling a __noreturn function.
3283 */
3284 prev_insn = list_prev_entry(insn, list);
3285 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
3286 (insn->type == INSN_BUG ||
3287 (insn->type == INSN_JUMP_UNCONDITIONAL &&
3288 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
3289 return true;
3290
3291 /*
3292 * Check if this (or a subsequent) instruction is related to
3293 * CONFIG_UBSAN or CONFIG_KASAN.
3294 *
3295 * End the search at 5 instructions to avoid going into the weeds.
3296 */
3297 for (i = 0; i < 5; i++) {
3298
3299 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
3300 return true;
3301
3302 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
3303 if (insn->jump_dest &&
3304 insn->jump_dest->func == insn->func) {
3305 insn = insn->jump_dest;
3306 continue;
3307 }
3308
3309 break;
3310 }
3311
3312 if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
3313 break;
3314
3315 insn = list_next_entry(insn, list);
3316 }
3317
3318 return false;
3319 }
3320
3321 static int validate_symbol(struct objtool_file *file, struct section *sec,
3322 struct symbol *sym, struct insn_state *state)
3323 {
3324 struct instruction *insn;
3325 int ret;
3326
3327 if (!sym->len) {
3328 WARN("%s() is missing an ELF size annotation", sym->name);
3329 return 1;
3330 }
3331
3332 if (sym->pfunc != sym || sym->alias != sym)
3333 return 0;
3334
3335 insn = find_insn(file, sec, sym->offset);
3336 if (!insn || insn->ignore || insn->visited)
3337 return 0;
3338
3339 state->uaccess = sym->uaccess_safe;
3340
3341 ret = validate_branch(file, insn->func, insn, *state);
3342 if (ret && backtrace)
3343 BT_FUNC("<=== (sym)", insn);
3344 return ret;
3345 }
3346
3347 static int validate_section(struct objtool_file *file, struct section *sec)
3348 {
3349 struct insn_state state;
3350 struct symbol *func;
3351 int warnings = 0;
3352
3353 list_for_each_entry(func, &sec->symbol_list, list) {
3354 if (func->type != STT_FUNC)
3355 continue;
3356
3357 init_insn_state(&state, sec);
3358 set_func_state(&state.cfi);
3359
3360 warnings += validate_symbol(file, sec, func, &state);
3361 }
3362
3363 return warnings;
3364 }
3365
3366 static int validate_vmlinux_functions(struct objtool_file *file)
3367 {
3368 struct section *sec;
3369 int warnings = 0;
3370
3371 sec = find_section_by_name(file->elf, ".noinstr.text");
3372 if (sec) {
3373 warnings += validate_section(file, sec);
3374 warnings += validate_unwind_hints(file, sec);
3375 }
3376
3377 sec = find_section_by_name(file->elf, ".entry.text");
3378 if (sec) {
3379 warnings += validate_section(file, sec);
3380 warnings += validate_unwind_hints(file, sec);
3381 }
3382
3383 return warnings;
3384 }
3385
3386 static int validate_functions(struct objtool_file *file)
3387 {
3388 struct section *sec;
3389 int warnings = 0;
3390
3391 for_each_sec(file, sec) {
3392 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
3393 continue;
3394
3395 warnings += validate_section(file, sec);
3396 }
3397
3398 return warnings;
3399 }
3400
3401 static int validate_reachable_instructions(struct objtool_file *file)
3402 {
3403 struct instruction *insn;
3404
3405 if (file->ignore_unreachables)
3406 return 0;
3407
3408 for_each_insn(file, insn) {
3409 if (insn->visited || ignore_unreachable_insn(file, insn))
3410 continue;
3411
3412 WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
3413 return 1;
3414 }
3415
3416 return 0;
3417 }
3418
3419 int check(struct objtool_file *file)
3420 {
3421 int ret, warnings = 0;
3422
3423 arch_initial_func_cfi_state(&initial_func_cfi);
3424 init_cfi_state(&init_cfi);
3425 init_cfi_state(&func_cfi);
3426 set_func_state(&func_cfi);
3427
3428 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
3429 goto out;
3430
3431 cfi_hash_add(&init_cfi);
3432 cfi_hash_add(&func_cfi);
3433
3434 ret = decode_sections(file);
3435 if (ret < 0)
3436 goto out;
3437
3438 warnings += ret;
3439
3440 if (list_empty(&file->insn_list))
3441 goto out;
3442
3443 if (vmlinux && !validate_dup) {
3444 ret = validate_vmlinux_functions(file);
3445 if (ret < 0)
3446 goto out;
3447
3448 warnings += ret;
3449 goto out;
3450 }
3451
3452 if (retpoline) {
3453 ret = validate_retpoline(file);
3454 if (ret < 0)
3455 return ret;
3456 warnings += ret;
3457 }
3458
3459 ret = validate_functions(file);
3460 if (ret < 0)
3461 goto out;
3462 warnings += ret;
3463
3464 ret = validate_unwind_hints(file, NULL);
3465 if (ret < 0)
3466 goto out;
3467 warnings += ret;
3468
3469 if (!warnings) {
3470 ret = validate_reachable_instructions(file);
3471 if (ret < 0)
3472 goto out;
3473 warnings += ret;
3474 }
3475
3476 ret = create_static_call_sections(file);
3477 if (ret < 0)
3478 goto out;
3479 warnings += ret;
3480
3481 if (retpoline) {
3482 ret = create_retpoline_sites_sections(file);
3483 if (ret < 0)
3484 goto out;
3485 warnings += ret;
3486
3487 ret = create_return_sites_sections(file);
3488 if (ret < 0)
3489 goto out;
3490 warnings += ret;
3491 }
3492
3493 if (mcount) {
3494 ret = create_mcount_loc_sections(file);
3495 if (ret < 0)
3496 goto out;
3497 warnings += ret;
3498 }
3499
3500 if (stats) {
3501 printf("nr_insns_visited: %ld\n", nr_insns_visited);
3502 printf("nr_cfi: %ld\n", nr_cfi);
3503 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
3504 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
3505 }
3506
3507 out:
3508 /*
3509 * For now, don't fail the kernel build on fatal warnings. These
3510 * errors are still fairly common due to the growing matrix of
3511 * supported toolchains and their recent pace of change.
3512 */
3513 return 0;
3514 }