]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob
a09b2f94ab25
[mirror_ubuntu-eoan-kernel.git] /
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
3
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/types.h>
6 #include <linux/seq_file.h>
7 #include <linux/compiler.h>
8 #include <linux/ctype.h>
9 #include <linux/errno.h>
10 #include <linux/slab.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/uaccess.h>
14 #include <linux/kernel.h>
15 #include <linux/idr.h>
16 #include <linux/sort.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/btf.h>
19
20 /* BTF (BPF Type Format) is the meta data format which describes
21 * the data types of BPF program/map. Hence, it basically focus
22 * on the C programming language which the modern BPF is primary
23 * using.
24 *
25 * ELF Section:
26 * ~~~~~~~~~~~
27 * The BTF data is stored under the ".BTF" ELF section
28 *
29 * struct btf_type:
30 * ~~~~~~~~~~~~~~~
31 * Each 'struct btf_type' object describes a C data type.
32 * Depending on the type it is describing, a 'struct btf_type'
33 * object may be followed by more data. F.e.
34 * To describe an array, 'struct btf_type' is followed by
35 * 'struct btf_array'.
36 *
37 * 'struct btf_type' and any extra data following it are
38 * 4 bytes aligned.
39 *
40 * Type section:
41 * ~~~~~~~~~~~~~
42 * The BTF type section contains a list of 'struct btf_type' objects.
43 * Each one describes a C type. Recall from the above section
44 * that a 'struct btf_type' object could be immediately followed by extra
45 * data in order to desribe some particular C types.
46 *
47 * type_id:
48 * ~~~~~~~
49 * Each btf_type object is identified by a type_id. The type_id
50 * is implicitly implied by the location of the btf_type object in
51 * the BTF type section. The first one has type_id 1. The second
52 * one has type_id 2...etc. Hence, an earlier btf_type has
53 * a smaller type_id.
54 *
55 * A btf_type object may refer to another btf_type object by using
56 * type_id (i.e. the "type" in the "struct btf_type").
57 *
58 * NOTE that we cannot assume any reference-order.
59 * A btf_type object can refer to an earlier btf_type object
60 * but it can also refer to a later btf_type object.
61 *
62 * For example, to describe "const void *". A btf_type
63 * object describing "const" may refer to another btf_type
64 * object describing "void *". This type-reference is done
65 * by specifying type_id:
66 *
67 * [1] CONST (anon) type_id=2
68 * [2] PTR (anon) type_id=0
69 *
70 * The above is the btf_verifier debug log:
71 * - Each line started with "[?]" is a btf_type object
72 * - [?] is the type_id of the btf_type object.
73 * - CONST/PTR is the BTF_KIND_XXX
74 * - "(anon)" is the name of the type. It just
75 * happens that CONST and PTR has no name.
76 * - type_id=XXX is the 'u32 type' in btf_type
77 *
78 * NOTE: "void" has type_id 0
79 *
80 * String section:
81 * ~~~~~~~~~~~~~~
82 * The BTF string section contains the names used by the type section.
83 * Each string is referred by an "offset" from the beginning of the
84 * string section.
85 *
86 * Each string is '\0' terminated.
87 *
88 * The first character in the string section must be '\0'
89 * which is used to mean 'anonymous'. Some btf_type may not
90 * have a name.
91 */
92
93 /* BTF verification:
94 *
95 * To verify BTF data, two passes are needed.
96 *
97 * Pass #1
98 * ~~~~~~~
99 * The first pass is to collect all btf_type objects to
100 * an array: "btf->types".
101 *
102 * Depending on the C type that a btf_type is describing,
103 * a btf_type may be followed by extra data. We don't know
104 * how many btf_type is there, and more importantly we don't
105 * know where each btf_type is located in the type section.
106 *
107 * Without knowing the location of each type_id, most verifications
108 * cannot be done. e.g. an earlier btf_type may refer to a later
109 * btf_type (recall the "const void *" above), so we cannot
110 * check this type-reference in the first pass.
111 *
112 * In the first pass, it still does some verifications (e.g.
113 * checking the name is a valid offset to the string section).
114 *
115 * Pass #2
116 * ~~~~~~~
117 * The main focus is to resolve a btf_type that is referring
118 * to another type.
119 *
120 * We have to ensure the referring type:
121 * 1) does exist in the BTF (i.e. in btf->types[])
122 * 2) does not cause a loop:
123 * struct A {
124 * struct B b;
125 * };
126 *
127 * struct B {
128 * struct A a;
129 * };
130 *
131 * btf_type_needs_resolve() decides if a btf_type needs
132 * to be resolved.
133 *
134 * The needs_resolve type implements the "resolve()" ops which
135 * essentially does a DFS and detects backedge.
136 *
137 * During resolve (or DFS), different C types have different
138 * "RESOLVED" conditions.
139 *
140 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
141 * members because a member is always referring to another
142 * type. A struct's member can be treated as "RESOLVED" if
143 * it is referring to a BTF_KIND_PTR. Otherwise, the
144 * following valid C struct would be rejected:
145 *
146 * struct A {
147 * int m;
148 * struct A *a;
149 * };
150 *
151 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
152 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
153 * detect a pointer loop, e.g.:
154 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
155 * ^ |
156 * +-----------------------------------------+
157 *
158 */
159
160 #define BITS_PER_U64 (sizeof(u64) * BITS_PER_BYTE)
161 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
162 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
163 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
164 #define BITS_ROUNDUP_BYTES(bits) \
165 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
166
167 #define BTF_INFO_MASK 0x0f00ffff
168 #define BTF_INT_MASK 0x0fffffff
169 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
170 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
171
172 /* 16MB for 64k structs and each has 16 members and
173 * a few MB spaces for the string section.
174 * The hard limit is S32_MAX.
175 */
176 #define BTF_MAX_SIZE (16 * 1024 * 1024)
177
178 #define for_each_member(i, struct_type, member) \
179 for (i = 0, member = btf_type_member(struct_type); \
180 i < btf_type_vlen(struct_type); \
181 i++, member++)
182
183 #define for_each_member_from(i, from, struct_type, member) \
184 for (i = from, member = btf_type_member(struct_type) + from; \
185 i < btf_type_vlen(struct_type); \
186 i++, member++)
187
188 static DEFINE_IDR(btf_idr);
189 static DEFINE_SPINLOCK(btf_idr_lock);
190
191 struct btf {
192 void *data;
193 struct btf_type **types;
194 u32 *resolved_ids;
195 u32 *resolved_sizes;
196 const char *strings;
197 void *nohdr_data;
198 struct btf_header hdr;
199 u32 nr_types;
200 u32 types_size;
201 u32 data_size;
202 refcount_t refcnt;
203 u32 id;
204 struct rcu_head rcu;
205 };
206
207 enum verifier_phase {
208 CHECK_META,
209 CHECK_TYPE,
210 };
211
212 struct resolve_vertex {
213 const struct btf_type *t;
214 u32 type_id;
215 u16 next_member;
216 };
217
218 enum visit_state {
219 NOT_VISITED,
220 VISITED,
221 RESOLVED,
222 };
223
224 enum resolve_mode {
225 RESOLVE_TBD, /* To Be Determined */
226 RESOLVE_PTR, /* Resolving for Pointer */
227 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
228 * or array
229 */
230 };
231
232 #define MAX_RESOLVE_DEPTH 32
233
234 struct btf_sec_info {
235 u32 off;
236 u32 len;
237 };
238
239 struct btf_verifier_env {
240 struct btf *btf;
241 u8 *visit_states;
242 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
243 struct bpf_verifier_log log;
244 u32 log_type_id;
245 u32 top_stack;
246 enum verifier_phase phase;
247 enum resolve_mode resolve_mode;
248 };
249
250 static const char * const btf_kind_str[NR_BTF_KINDS] = {
251 [BTF_KIND_UNKN] = "UNKNOWN",
252 [BTF_KIND_INT] = "INT",
253 [BTF_KIND_PTR] = "PTR",
254 [BTF_KIND_ARRAY] = "ARRAY",
255 [BTF_KIND_STRUCT] = "STRUCT",
256 [BTF_KIND_UNION] = "UNION",
257 [BTF_KIND_ENUM] = "ENUM",
258 [BTF_KIND_FWD] = "FWD",
259 [BTF_KIND_TYPEDEF] = "TYPEDEF",
260 [BTF_KIND_VOLATILE] = "VOLATILE",
261 [BTF_KIND_CONST] = "CONST",
262 [BTF_KIND_RESTRICT] = "RESTRICT",
263 [BTF_KIND_FUNC] = "FUNC",
264 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
265 };
266
267 struct btf_kind_operations {
268 s32 (*check_meta)(struct btf_verifier_env *env,
269 const struct btf_type *t,
270 u32 meta_left);
271 int (*resolve)(struct btf_verifier_env *env,
272 const struct resolve_vertex *v);
273 int (*check_member)(struct btf_verifier_env *env,
274 const struct btf_type *struct_type,
275 const struct btf_member *member,
276 const struct btf_type *member_type);
277 void (*log_details)(struct btf_verifier_env *env,
278 const struct btf_type *t);
279 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
280 u32 type_id, void *data, u8 bits_offsets,
281 struct seq_file *m);
282 };
283
284 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
285 static struct btf_type btf_void;
286
287 static int btf_resolve(struct btf_verifier_env *env,
288 const struct btf_type *t, u32 type_id);
289
290 static bool btf_type_is_modifier(const struct btf_type *t)
291 {
292 /* Some of them is not strictly a C modifier
293 * but they are grouped into the same bucket
294 * for BTF concern:
295 * A type (t) that refers to another
296 * type through t->type AND its size cannot
297 * be determined without following the t->type.
298 *
299 * ptr does not fall into this bucket
300 * because its size is always sizeof(void *).
301 */
302 switch (BTF_INFO_KIND(t->info)) {
303 case BTF_KIND_TYPEDEF:
304 case BTF_KIND_VOLATILE:
305 case BTF_KIND_CONST:
306 case BTF_KIND_RESTRICT:
307 return true;
308 }
309
310 return false;
311 }
312
313 static bool btf_type_is_void(const struct btf_type *t)
314 {
315 return t == &btf_void;
316 }
317
318 static bool btf_type_is_fwd(const struct btf_type *t)
319 {
320 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
321 }
322
323 static bool btf_type_is_func(const struct btf_type *t)
324 {
325 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
326 }
327
328 static bool btf_type_is_func_proto(const struct btf_type *t)
329 {
330 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
331 }
332
333 static bool btf_type_nosize(const struct btf_type *t)
334 {
335 return btf_type_is_void(t) || btf_type_is_fwd(t) ||
336 btf_type_is_func(t) || btf_type_is_func_proto(t);
337 }
338
339 static bool btf_type_nosize_or_null(const struct btf_type *t)
340 {
341 return !t || btf_type_nosize(t);
342 }
343
344 /* union is only a special case of struct:
345 * all its offsetof(member) == 0
346 */
347 static bool btf_type_is_struct(const struct btf_type *t)
348 {
349 u8 kind = BTF_INFO_KIND(t->info);
350
351 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
352 }
353
354 static bool btf_type_is_array(const struct btf_type *t)
355 {
356 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
357 }
358
359 static bool btf_type_is_ptr(const struct btf_type *t)
360 {
361 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
362 }
363
364 static bool btf_type_is_int(const struct btf_type *t)
365 {
366 return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
367 }
368
369 /* What types need to be resolved?
370 *
371 * btf_type_is_modifier() is an obvious one.
372 *
373 * btf_type_is_struct() because its member refers to
374 * another type (through member->type).
375
376 * btf_type_is_array() because its element (array->type)
377 * refers to another type. Array can be thought of a
378 * special case of struct while array just has the same
379 * member-type repeated by array->nelems of times.
380 */
381 static bool btf_type_needs_resolve(const struct btf_type *t)
382 {
383 return btf_type_is_modifier(t) ||
384 btf_type_is_ptr(t) ||
385 btf_type_is_struct(t) ||
386 btf_type_is_array(t);
387 }
388
389 /* t->size can be used */
390 static bool btf_type_has_size(const struct btf_type *t)
391 {
392 switch (BTF_INFO_KIND(t->info)) {
393 case BTF_KIND_INT:
394 case BTF_KIND_STRUCT:
395 case BTF_KIND_UNION:
396 case BTF_KIND_ENUM:
397 return true;
398 }
399
400 return false;
401 }
402
403 static const char *btf_int_encoding_str(u8 encoding)
404 {
405 if (encoding == 0)
406 return "(none)";
407 else if (encoding == BTF_INT_SIGNED)
408 return "SIGNED";
409 else if (encoding == BTF_INT_CHAR)
410 return "CHAR";
411 else if (encoding == BTF_INT_BOOL)
412 return "BOOL";
413 else
414 return "UNKN";
415 }
416
417 static u16 btf_type_vlen(const struct btf_type *t)
418 {
419 return BTF_INFO_VLEN(t->info);
420 }
421
422 static u32 btf_type_int(const struct btf_type *t)
423 {
424 return *(u32 *)(t + 1);
425 }
426
427 static const struct btf_array *btf_type_array(const struct btf_type *t)
428 {
429 return (const struct btf_array *)(t + 1);
430 }
431
432 static const struct btf_member *btf_type_member(const struct btf_type *t)
433 {
434 return (const struct btf_member *)(t + 1);
435 }
436
437 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
438 {
439 return (const struct btf_enum *)(t + 1);
440 }
441
442 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
443 {
444 return kind_ops[BTF_INFO_KIND(t->info)];
445 }
446
447 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
448 {
449 return BTF_STR_OFFSET_VALID(offset) &&
450 offset < btf->hdr.str_len;
451 }
452
453 /* Only C-style identifier is permitted. This can be relaxed if
454 * necessary.
455 */
456 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
457 {
458 /* offset must be valid */
459 const char *src = &btf->strings[offset];
460 const char *src_limit;
461
462 if (!isalpha(*src) && *src != '_')
463 return false;
464
465 /* set a limit on identifier length */
466 src_limit = src + KSYM_NAME_LEN;
467 src++;
468 while (*src && src < src_limit) {
469 if (!isalnum(*src) && *src != '_')
470 return false;
471 src++;
472 }
473
474 return !*src;
475 }
476
477 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
478 {
479 if (!offset)
480 return "(anon)";
481 else if (offset < btf->hdr.str_len)
482 return &btf->strings[offset];
483 else
484 return "(invalid-name-offset)";
485 }
486
487 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
488 {
489 if (type_id > btf->nr_types)
490 return NULL;
491
492 return btf->types[type_id];
493 }
494
495 /*
496 * Regular int is not a bit field and it must be either
497 * u8/u16/u32/u64.
498 */
499 static bool btf_type_int_is_regular(const struct btf_type *t)
500 {
501 u8 nr_bits, nr_bytes;
502 u32 int_data;
503
504 int_data = btf_type_int(t);
505 nr_bits = BTF_INT_BITS(int_data);
506 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
507 if (BITS_PER_BYTE_MASKED(nr_bits) ||
508 BTF_INT_OFFSET(int_data) ||
509 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
510 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64))) {
511 return false;
512 }
513
514 return true;
515 }
516
517 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
518 const char *fmt, ...)
519 {
520 va_list args;
521
522 va_start(args, fmt);
523 bpf_verifier_vlog(log, fmt, args);
524 va_end(args);
525 }
526
527 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
528 const char *fmt, ...)
529 {
530 struct bpf_verifier_log *log = &env->log;
531 va_list args;
532
533 if (!bpf_verifier_log_needed(log))
534 return;
535
536 va_start(args, fmt);
537 bpf_verifier_vlog(log, fmt, args);
538 va_end(args);
539 }
540
541 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
542 const struct btf_type *t,
543 bool log_details,
544 const char *fmt, ...)
545 {
546 struct bpf_verifier_log *log = &env->log;
547 u8 kind = BTF_INFO_KIND(t->info);
548 struct btf *btf = env->btf;
549 va_list args;
550
551 if (!bpf_verifier_log_needed(log))
552 return;
553
554 __btf_verifier_log(log, "[%u] %s %s%s",
555 env->log_type_id,
556 btf_kind_str[kind],
557 btf_name_by_offset(btf, t->name_off),
558 log_details ? " " : "");
559
560 if (log_details)
561 btf_type_ops(t)->log_details(env, t);
562
563 if (fmt && *fmt) {
564 __btf_verifier_log(log, " ");
565 va_start(args, fmt);
566 bpf_verifier_vlog(log, fmt, args);
567 va_end(args);
568 }
569
570 __btf_verifier_log(log, "\n");
571 }
572
573 #define btf_verifier_log_type(env, t, ...) \
574 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
575 #define btf_verifier_log_basic(env, t, ...) \
576 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
577
578 __printf(4, 5)
579 static void btf_verifier_log_member(struct btf_verifier_env *env,
580 const struct btf_type *struct_type,
581 const struct btf_member *member,
582 const char *fmt, ...)
583 {
584 struct bpf_verifier_log *log = &env->log;
585 struct btf *btf = env->btf;
586 va_list args;
587
588 if (!bpf_verifier_log_needed(log))
589 return;
590
591 /* The CHECK_META phase already did a btf dump.
592 *
593 * If member is logged again, it must hit an error in
594 * parsing this member. It is useful to print out which
595 * struct this member belongs to.
596 */
597 if (env->phase != CHECK_META)
598 btf_verifier_log_type(env, struct_type, NULL);
599
600 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
601 btf_name_by_offset(btf, member->name_off),
602 member->type, member->offset);
603
604 if (fmt && *fmt) {
605 __btf_verifier_log(log, " ");
606 va_start(args, fmt);
607 bpf_verifier_vlog(log, fmt, args);
608 va_end(args);
609 }
610
611 __btf_verifier_log(log, "\n");
612 }
613
614 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
615 u32 btf_data_size)
616 {
617 struct bpf_verifier_log *log = &env->log;
618 const struct btf *btf = env->btf;
619 const struct btf_header *hdr;
620
621 if (!bpf_verifier_log_needed(log))
622 return;
623
624 hdr = &btf->hdr;
625 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
626 __btf_verifier_log(log, "version: %u\n", hdr->version);
627 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
628 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
629 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
630 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
631 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
632 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
633 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
634 }
635
636 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
637 {
638 struct btf *btf = env->btf;
639
640 /* < 2 because +1 for btf_void which is always in btf->types[0].
641 * btf_void is not accounted in btf->nr_types because btf_void
642 * does not come from the BTF file.
643 */
644 if (btf->types_size - btf->nr_types < 2) {
645 /* Expand 'types' array */
646
647 struct btf_type **new_types;
648 u32 expand_by, new_size;
649
650 if (btf->types_size == BTF_MAX_TYPE) {
651 btf_verifier_log(env, "Exceeded max num of types");
652 return -E2BIG;
653 }
654
655 expand_by = max_t(u32, btf->types_size >> 2, 16);
656 new_size = min_t(u32, BTF_MAX_TYPE,
657 btf->types_size + expand_by);
658
659 new_types = kvcalloc(new_size, sizeof(*new_types),
660 GFP_KERNEL | __GFP_NOWARN);
661 if (!new_types)
662 return -ENOMEM;
663
664 if (btf->nr_types == 0)
665 new_types[0] = &btf_void;
666 else
667 memcpy(new_types, btf->types,
668 sizeof(*btf->types) * (btf->nr_types + 1));
669
670 kvfree(btf->types);
671 btf->types = new_types;
672 btf->types_size = new_size;
673 }
674
675 btf->types[++(btf->nr_types)] = t;
676
677 return 0;
678 }
679
680 static int btf_alloc_id(struct btf *btf)
681 {
682 int id;
683
684 idr_preload(GFP_KERNEL);
685 spin_lock_bh(&btf_idr_lock);
686 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
687 if (id > 0)
688 btf->id = id;
689 spin_unlock_bh(&btf_idr_lock);
690 idr_preload_end();
691
692 if (WARN_ON_ONCE(!id))
693 return -ENOSPC;
694
695 return id > 0 ? 0 : id;
696 }
697
698 static void btf_free_id(struct btf *btf)
699 {
700 unsigned long flags;
701
702 /*
703 * In map-in-map, calling map_delete_elem() on outer
704 * map will call bpf_map_put on the inner map.
705 * It will then eventually call btf_free_id()
706 * on the inner map. Some of the map_delete_elem()
707 * implementation may have irq disabled, so
708 * we need to use the _irqsave() version instead
709 * of the _bh() version.
710 */
711 spin_lock_irqsave(&btf_idr_lock, flags);
712 idr_remove(&btf_idr, btf->id);
713 spin_unlock_irqrestore(&btf_idr_lock, flags);
714 }
715
716 static void btf_free(struct btf *btf)
717 {
718 kvfree(btf->types);
719 kvfree(btf->resolved_sizes);
720 kvfree(btf->resolved_ids);
721 kvfree(btf->data);
722 kfree(btf);
723 }
724
725 static void btf_free_rcu(struct rcu_head *rcu)
726 {
727 struct btf *btf = container_of(rcu, struct btf, rcu);
728
729 btf_free(btf);
730 }
731
732 void btf_put(struct btf *btf)
733 {
734 if (btf && refcount_dec_and_test(&btf->refcnt)) {
735 btf_free_id(btf);
736 call_rcu(&btf->rcu, btf_free_rcu);
737 }
738 }
739
740 static int env_resolve_init(struct btf_verifier_env *env)
741 {
742 struct btf *btf = env->btf;
743 u32 nr_types = btf->nr_types;
744 u32 *resolved_sizes = NULL;
745 u32 *resolved_ids = NULL;
746 u8 *visit_states = NULL;
747
748 /* +1 for btf_void */
749 resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
750 GFP_KERNEL | __GFP_NOWARN);
751 if (!resolved_sizes)
752 goto nomem;
753
754 resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
755 GFP_KERNEL | __GFP_NOWARN);
756 if (!resolved_ids)
757 goto nomem;
758
759 visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
760 GFP_KERNEL | __GFP_NOWARN);
761 if (!visit_states)
762 goto nomem;
763
764 btf->resolved_sizes = resolved_sizes;
765 btf->resolved_ids = resolved_ids;
766 env->visit_states = visit_states;
767
768 return 0;
769
770 nomem:
771 kvfree(resolved_sizes);
772 kvfree(resolved_ids);
773 kvfree(visit_states);
774 return -ENOMEM;
775 }
776
777 static void btf_verifier_env_free(struct btf_verifier_env *env)
778 {
779 kvfree(env->visit_states);
780 kfree(env);
781 }
782
783 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
784 const struct btf_type *next_type)
785 {
786 switch (env->resolve_mode) {
787 case RESOLVE_TBD:
788 /* int, enum or void is a sink */
789 return !btf_type_needs_resolve(next_type);
790 case RESOLVE_PTR:
791 /* int, enum, void, struct, array, func or func_proto is a sink
792 * for ptr
793 */
794 return !btf_type_is_modifier(next_type) &&
795 !btf_type_is_ptr(next_type);
796 case RESOLVE_STRUCT_OR_ARRAY:
797 /* int, enum, void, ptr, func or func_proto is a sink
798 * for struct and array
799 */
800 return !btf_type_is_modifier(next_type) &&
801 !btf_type_is_array(next_type) &&
802 !btf_type_is_struct(next_type);
803 default:
804 BUG();
805 }
806 }
807
808 static bool env_type_is_resolved(const struct btf_verifier_env *env,
809 u32 type_id)
810 {
811 return env->visit_states[type_id] == RESOLVED;
812 }
813
814 static int env_stack_push(struct btf_verifier_env *env,
815 const struct btf_type *t, u32 type_id)
816 {
817 struct resolve_vertex *v;
818
819 if (env->top_stack == MAX_RESOLVE_DEPTH)
820 return -E2BIG;
821
822 if (env->visit_states[type_id] != NOT_VISITED)
823 return -EEXIST;
824
825 env->visit_states[type_id] = VISITED;
826
827 v = &env->stack[env->top_stack++];
828 v->t = t;
829 v->type_id = type_id;
830 v->next_member = 0;
831
832 if (env->resolve_mode == RESOLVE_TBD) {
833 if (btf_type_is_ptr(t))
834 env->resolve_mode = RESOLVE_PTR;
835 else if (btf_type_is_struct(t) || btf_type_is_array(t))
836 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
837 }
838
839 return 0;
840 }
841
842 static void env_stack_set_next_member(struct btf_verifier_env *env,
843 u16 next_member)
844 {
845 env->stack[env->top_stack - 1].next_member = next_member;
846 }
847
848 static void env_stack_pop_resolved(struct btf_verifier_env *env,
849 u32 resolved_type_id,
850 u32 resolved_size)
851 {
852 u32 type_id = env->stack[--(env->top_stack)].type_id;
853 struct btf *btf = env->btf;
854
855 btf->resolved_sizes[type_id] = resolved_size;
856 btf->resolved_ids[type_id] = resolved_type_id;
857 env->visit_states[type_id] = RESOLVED;
858 }
859
860 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
861 {
862 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
863 }
864
865 /* The input param "type_id" must point to a needs_resolve type */
866 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
867 u32 *type_id)
868 {
869 *type_id = btf->resolved_ids[*type_id];
870 return btf_type_by_id(btf, *type_id);
871 }
872
873 const struct btf_type *btf_type_id_size(const struct btf *btf,
874 u32 *type_id, u32 *ret_size)
875 {
876 const struct btf_type *size_type;
877 u32 size_type_id = *type_id;
878 u32 size = 0;
879
880 size_type = btf_type_by_id(btf, size_type_id);
881 if (btf_type_nosize_or_null(size_type))
882 return NULL;
883
884 if (btf_type_has_size(size_type)) {
885 size = size_type->size;
886 } else if (btf_type_is_array(size_type)) {
887 size = btf->resolved_sizes[size_type_id];
888 } else if (btf_type_is_ptr(size_type)) {
889 size = sizeof(void *);
890 } else {
891 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type)))
892 return NULL;
893
894 size = btf->resolved_sizes[size_type_id];
895 size_type_id = btf->resolved_ids[size_type_id];
896 size_type = btf_type_by_id(btf, size_type_id);
897 if (btf_type_nosize_or_null(size_type))
898 return NULL;
899 }
900
901 *type_id = size_type_id;
902 if (ret_size)
903 *ret_size = size;
904
905 return size_type;
906 }
907
908 static int btf_df_check_member(struct btf_verifier_env *env,
909 const struct btf_type *struct_type,
910 const struct btf_member *member,
911 const struct btf_type *member_type)
912 {
913 btf_verifier_log_basic(env, struct_type,
914 "Unsupported check_member");
915 return -EINVAL;
916 }
917
918 static int btf_df_resolve(struct btf_verifier_env *env,
919 const struct resolve_vertex *v)
920 {
921 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
922 return -EINVAL;
923 }
924
925 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
926 u32 type_id, void *data, u8 bits_offsets,
927 struct seq_file *m)
928 {
929 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
930 }
931
932 static int btf_int_check_member(struct btf_verifier_env *env,
933 const struct btf_type *struct_type,
934 const struct btf_member *member,
935 const struct btf_type *member_type)
936 {
937 u32 int_data = btf_type_int(member_type);
938 u32 struct_bits_off = member->offset;
939 u32 struct_size = struct_type->size;
940 u32 nr_copy_bits;
941 u32 bytes_offset;
942
943 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
944 btf_verifier_log_member(env, struct_type, member,
945 "bits_offset exceeds U32_MAX");
946 return -EINVAL;
947 }
948
949 struct_bits_off += BTF_INT_OFFSET(int_data);
950 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
951 nr_copy_bits = BTF_INT_BITS(int_data) +
952 BITS_PER_BYTE_MASKED(struct_bits_off);
953
954 if (nr_copy_bits > BITS_PER_U64) {
955 btf_verifier_log_member(env, struct_type, member,
956 "nr_copy_bits exceeds 64");
957 return -EINVAL;
958 }
959
960 if (struct_size < bytes_offset ||
961 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
962 btf_verifier_log_member(env, struct_type, member,
963 "Member exceeds struct_size");
964 return -EINVAL;
965 }
966
967 return 0;
968 }
969
970 static s32 btf_int_check_meta(struct btf_verifier_env *env,
971 const struct btf_type *t,
972 u32 meta_left)
973 {
974 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
975 u16 encoding;
976
977 if (meta_left < meta_needed) {
978 btf_verifier_log_basic(env, t,
979 "meta_left:%u meta_needed:%u",
980 meta_left, meta_needed);
981 return -EINVAL;
982 }
983
984 if (btf_type_vlen(t)) {
985 btf_verifier_log_type(env, t, "vlen != 0");
986 return -EINVAL;
987 }
988
989 int_data = btf_type_int(t);
990 if (int_data & ~BTF_INT_MASK) {
991 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
992 int_data);
993 return -EINVAL;
994 }
995
996 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
997
998 if (nr_bits > BITS_PER_U64) {
999 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
1000 BITS_PER_U64);
1001 return -EINVAL;
1002 }
1003
1004 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1005 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1006 return -EINVAL;
1007 }
1008
1009 /*
1010 * Only one of the encoding bits is allowed and it
1011 * should be sufficient for the pretty print purpose (i.e. decoding).
1012 * Multiple bits can be allowed later if it is found
1013 * to be insufficient.
1014 */
1015 encoding = BTF_INT_ENCODING(int_data);
1016 if (encoding &&
1017 encoding != BTF_INT_SIGNED &&
1018 encoding != BTF_INT_CHAR &&
1019 encoding != BTF_INT_BOOL) {
1020 btf_verifier_log_type(env, t, "Unsupported encoding");
1021 return -ENOTSUPP;
1022 }
1023
1024 btf_verifier_log_type(env, t, NULL);
1025
1026 return meta_needed;
1027 }
1028
1029 static void btf_int_log(struct btf_verifier_env *env,
1030 const struct btf_type *t)
1031 {
1032 int int_data = btf_type_int(t);
1033
1034 btf_verifier_log(env,
1035 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1036 t->size, BTF_INT_OFFSET(int_data),
1037 BTF_INT_BITS(int_data),
1038 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1039 }
1040
1041 static void btf_int_bits_seq_show(const struct btf *btf,
1042 const struct btf_type *t,
1043 void *data, u8 bits_offset,
1044 struct seq_file *m)
1045 {
1046 u16 left_shift_bits, right_shift_bits;
1047 u32 int_data = btf_type_int(t);
1048 u8 nr_bits = BTF_INT_BITS(int_data);
1049 u8 total_bits_offset;
1050 u8 nr_copy_bytes;
1051 u8 nr_copy_bits;
1052 u64 print_num;
1053
1054 /*
1055 * bits_offset is at most 7.
1056 * BTF_INT_OFFSET() cannot exceed 64 bits.
1057 */
1058 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1059 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1060 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1061 nr_copy_bits = nr_bits + bits_offset;
1062 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1063
1064 print_num = 0;
1065 memcpy(&print_num, data, nr_copy_bytes);
1066
1067 #ifdef __BIG_ENDIAN_BITFIELD
1068 left_shift_bits = bits_offset;
1069 #else
1070 left_shift_bits = BITS_PER_U64 - nr_copy_bits;
1071 #endif
1072 right_shift_bits = BITS_PER_U64 - nr_bits;
1073
1074 print_num <<= left_shift_bits;
1075 print_num >>= right_shift_bits;
1076
1077 seq_printf(m, "0x%llx", print_num);
1078 }
1079
1080 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1081 u32 type_id, void *data, u8 bits_offset,
1082 struct seq_file *m)
1083 {
1084 u32 int_data = btf_type_int(t);
1085 u8 encoding = BTF_INT_ENCODING(int_data);
1086 bool sign = encoding & BTF_INT_SIGNED;
1087 u8 nr_bits = BTF_INT_BITS(int_data);
1088
1089 if (bits_offset || BTF_INT_OFFSET(int_data) ||
1090 BITS_PER_BYTE_MASKED(nr_bits)) {
1091 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1092 return;
1093 }
1094
1095 switch (nr_bits) {
1096 case 64:
1097 if (sign)
1098 seq_printf(m, "%lld", *(s64 *)data);
1099 else
1100 seq_printf(m, "%llu", *(u64 *)data);
1101 break;
1102 case 32:
1103 if (sign)
1104 seq_printf(m, "%d", *(s32 *)data);
1105 else
1106 seq_printf(m, "%u", *(u32 *)data);
1107 break;
1108 case 16:
1109 if (sign)
1110 seq_printf(m, "%d", *(s16 *)data);
1111 else
1112 seq_printf(m, "%u", *(u16 *)data);
1113 break;
1114 case 8:
1115 if (sign)
1116 seq_printf(m, "%d", *(s8 *)data);
1117 else
1118 seq_printf(m, "%u", *(u8 *)data);
1119 break;
1120 default:
1121 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1122 }
1123 }
1124
1125 static const struct btf_kind_operations int_ops = {
1126 .check_meta = btf_int_check_meta,
1127 .resolve = btf_df_resolve,
1128 .check_member = btf_int_check_member,
1129 .log_details = btf_int_log,
1130 .seq_show = btf_int_seq_show,
1131 };
1132
1133 static int btf_modifier_check_member(struct btf_verifier_env *env,
1134 const struct btf_type *struct_type,
1135 const struct btf_member *member,
1136 const struct btf_type *member_type)
1137 {
1138 const struct btf_type *resolved_type;
1139 u32 resolved_type_id = member->type;
1140 struct btf_member resolved_member;
1141 struct btf *btf = env->btf;
1142
1143 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1144 if (!resolved_type) {
1145 btf_verifier_log_member(env, struct_type, member,
1146 "Invalid member");
1147 return -EINVAL;
1148 }
1149
1150 resolved_member = *member;
1151 resolved_member.type = resolved_type_id;
1152
1153 return btf_type_ops(resolved_type)->check_member(env, struct_type,
1154 &resolved_member,
1155 resolved_type);
1156 }
1157
1158 static int btf_ptr_check_member(struct btf_verifier_env *env,
1159 const struct btf_type *struct_type,
1160 const struct btf_member *member,
1161 const struct btf_type *member_type)
1162 {
1163 u32 struct_size, struct_bits_off, bytes_offset;
1164
1165 struct_size = struct_type->size;
1166 struct_bits_off = member->offset;
1167 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1168
1169 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1170 btf_verifier_log_member(env, struct_type, member,
1171 "Member is not byte aligned");
1172 return -EINVAL;
1173 }
1174
1175 if (struct_size - bytes_offset < sizeof(void *)) {
1176 btf_verifier_log_member(env, struct_type, member,
1177 "Member exceeds struct_size");
1178 return -EINVAL;
1179 }
1180
1181 return 0;
1182 }
1183
1184 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1185 const struct btf_type *t,
1186 u32 meta_left)
1187 {
1188 if (btf_type_vlen(t)) {
1189 btf_verifier_log_type(env, t, "vlen != 0");
1190 return -EINVAL;
1191 }
1192
1193 if (!BTF_TYPE_ID_VALID(t->type)) {
1194 btf_verifier_log_type(env, t, "Invalid type_id");
1195 return -EINVAL;
1196 }
1197
1198 btf_verifier_log_type(env, t, NULL);
1199
1200 return 0;
1201 }
1202
1203 static int btf_modifier_resolve(struct btf_verifier_env *env,
1204 const struct resolve_vertex *v)
1205 {
1206 const struct btf_type *t = v->t;
1207 const struct btf_type *next_type;
1208 u32 next_type_id = t->type;
1209 struct btf *btf = env->btf;
1210 u32 next_type_size = 0;
1211
1212 next_type = btf_type_by_id(btf, next_type_id);
1213 if (!next_type) {
1214 btf_verifier_log_type(env, v->t, "Invalid type_id");
1215 return -EINVAL;
1216 }
1217
1218 if (!env_type_is_resolve_sink(env, next_type) &&
1219 !env_type_is_resolved(env, next_type_id))
1220 return env_stack_push(env, next_type, next_type_id);
1221
1222 /* Figure out the resolved next_type_id with size.
1223 * They will be stored in the current modifier's
1224 * resolved_ids and resolved_sizes such that it can
1225 * save us a few type-following when we use it later (e.g. in
1226 * pretty print).
1227 */
1228 if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) {
1229 if (env_type_is_resolved(env, next_type_id))
1230 next_type = btf_type_id_resolve(btf, &next_type_id);
1231
1232 /* "typedef void new_void", "const void"...etc */
1233 if (!btf_type_is_void(next_type) &&
1234 !btf_type_is_fwd(next_type)) {
1235 btf_verifier_log_type(env, v->t, "Invalid type_id");
1236 return -EINVAL;
1237 }
1238 }
1239
1240 env_stack_pop_resolved(env, next_type_id, next_type_size);
1241
1242 return 0;
1243 }
1244
1245 static int btf_ptr_resolve(struct btf_verifier_env *env,
1246 const struct resolve_vertex *v)
1247 {
1248 const struct btf_type *next_type;
1249 const struct btf_type *t = v->t;
1250 u32 next_type_id = t->type;
1251 struct btf *btf = env->btf;
1252
1253 next_type = btf_type_by_id(btf, next_type_id);
1254 if (!next_type) {
1255 btf_verifier_log_type(env, v->t, "Invalid type_id");
1256 return -EINVAL;
1257 }
1258
1259 if (!env_type_is_resolve_sink(env, next_type) &&
1260 !env_type_is_resolved(env, next_type_id))
1261 return env_stack_push(env, next_type, next_type_id);
1262
1263 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1264 * the modifier may have stopped resolving when it was resolved
1265 * to a ptr (last-resolved-ptr).
1266 *
1267 * We now need to continue from the last-resolved-ptr to
1268 * ensure the last-resolved-ptr will not referring back to
1269 * the currenct ptr (t).
1270 */
1271 if (btf_type_is_modifier(next_type)) {
1272 const struct btf_type *resolved_type;
1273 u32 resolved_type_id;
1274
1275 resolved_type_id = next_type_id;
1276 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1277
1278 if (btf_type_is_ptr(resolved_type) &&
1279 !env_type_is_resolve_sink(env, resolved_type) &&
1280 !env_type_is_resolved(env, resolved_type_id))
1281 return env_stack_push(env, resolved_type,
1282 resolved_type_id);
1283 }
1284
1285 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1286 if (env_type_is_resolved(env, next_type_id))
1287 next_type = btf_type_id_resolve(btf, &next_type_id);
1288
1289 if (!btf_type_is_void(next_type) &&
1290 !btf_type_is_fwd(next_type) &&
1291 !btf_type_is_func_proto(next_type)) {
1292 btf_verifier_log_type(env, v->t, "Invalid type_id");
1293 return -EINVAL;
1294 }
1295 }
1296
1297 env_stack_pop_resolved(env, next_type_id, 0);
1298
1299 return 0;
1300 }
1301
1302 static void btf_modifier_seq_show(const struct btf *btf,
1303 const struct btf_type *t,
1304 u32 type_id, void *data,
1305 u8 bits_offset, struct seq_file *m)
1306 {
1307 t = btf_type_id_resolve(btf, &type_id);
1308
1309 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1310 }
1311
1312 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1313 u32 type_id, void *data, u8 bits_offset,
1314 struct seq_file *m)
1315 {
1316 /* It is a hashed value */
1317 seq_printf(m, "%p", *(void **)data);
1318 }
1319
1320 static void btf_ref_type_log(struct btf_verifier_env *env,
1321 const struct btf_type *t)
1322 {
1323 btf_verifier_log(env, "type_id=%u", t->type);
1324 }
1325
1326 static struct btf_kind_operations modifier_ops = {
1327 .check_meta = btf_ref_type_check_meta,
1328 .resolve = btf_modifier_resolve,
1329 .check_member = btf_modifier_check_member,
1330 .log_details = btf_ref_type_log,
1331 .seq_show = btf_modifier_seq_show,
1332 };
1333
1334 static struct btf_kind_operations ptr_ops = {
1335 .check_meta = btf_ref_type_check_meta,
1336 .resolve = btf_ptr_resolve,
1337 .check_member = btf_ptr_check_member,
1338 .log_details = btf_ref_type_log,
1339 .seq_show = btf_ptr_seq_show,
1340 };
1341
1342 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1343 const struct btf_type *t,
1344 u32 meta_left)
1345 {
1346 if (btf_type_vlen(t)) {
1347 btf_verifier_log_type(env, t, "vlen != 0");
1348 return -EINVAL;
1349 }
1350
1351 if (t->type) {
1352 btf_verifier_log_type(env, t, "type != 0");
1353 return -EINVAL;
1354 }
1355
1356 btf_verifier_log_type(env, t, NULL);
1357
1358 return 0;
1359 }
1360
1361 static struct btf_kind_operations fwd_ops = {
1362 .check_meta = btf_fwd_check_meta,
1363 .resolve = btf_df_resolve,
1364 .check_member = btf_df_check_member,
1365 .log_details = btf_ref_type_log,
1366 .seq_show = btf_df_seq_show,
1367 };
1368
1369 static int btf_array_check_member(struct btf_verifier_env *env,
1370 const struct btf_type *struct_type,
1371 const struct btf_member *member,
1372 const struct btf_type *member_type)
1373 {
1374 u32 struct_bits_off = member->offset;
1375 u32 struct_size, bytes_offset;
1376 u32 array_type_id, array_size;
1377 struct btf *btf = env->btf;
1378
1379 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1380 btf_verifier_log_member(env, struct_type, member,
1381 "Member is not byte aligned");
1382 return -EINVAL;
1383 }
1384
1385 array_type_id = member->type;
1386 btf_type_id_size(btf, &array_type_id, &array_size);
1387 struct_size = struct_type->size;
1388 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1389 if (struct_size - bytes_offset < array_size) {
1390 btf_verifier_log_member(env, struct_type, member,
1391 "Member exceeds struct_size");
1392 return -EINVAL;
1393 }
1394
1395 return 0;
1396 }
1397
1398 static s32 btf_array_check_meta(struct btf_verifier_env *env,
1399 const struct btf_type *t,
1400 u32 meta_left)
1401 {
1402 const struct btf_array *array = btf_type_array(t);
1403 u32 meta_needed = sizeof(*array);
1404
1405 if (meta_left < meta_needed) {
1406 btf_verifier_log_basic(env, t,
1407 "meta_left:%u meta_needed:%u",
1408 meta_left, meta_needed);
1409 return -EINVAL;
1410 }
1411
1412 if (btf_type_vlen(t)) {
1413 btf_verifier_log_type(env, t, "vlen != 0");
1414 return -EINVAL;
1415 }
1416
1417 if (t->size) {
1418 btf_verifier_log_type(env, t, "size != 0");
1419 return -EINVAL;
1420 }
1421
1422 /* Array elem type and index type cannot be in type void,
1423 * so !array->type and !array->index_type are not allowed.
1424 */
1425 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
1426 btf_verifier_log_type(env, t, "Invalid elem");
1427 return -EINVAL;
1428 }
1429
1430 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
1431 btf_verifier_log_type(env, t, "Invalid index");
1432 return -EINVAL;
1433 }
1434
1435 btf_verifier_log_type(env, t, NULL);
1436
1437 return meta_needed;
1438 }
1439
1440 static int btf_array_resolve(struct btf_verifier_env *env,
1441 const struct resolve_vertex *v)
1442 {
1443 const struct btf_array *array = btf_type_array(v->t);
1444 const struct btf_type *elem_type, *index_type;
1445 u32 elem_type_id, index_type_id;
1446 struct btf *btf = env->btf;
1447 u32 elem_size;
1448
1449 /* Check array->index_type */
1450 index_type_id = array->index_type;
1451 index_type = btf_type_by_id(btf, index_type_id);
1452 if (btf_type_nosize_or_null(index_type)) {
1453 btf_verifier_log_type(env, v->t, "Invalid index");
1454 return -EINVAL;
1455 }
1456
1457 if (!env_type_is_resolve_sink(env, index_type) &&
1458 !env_type_is_resolved(env, index_type_id))
1459 return env_stack_push(env, index_type, index_type_id);
1460
1461 index_type = btf_type_id_size(btf, &index_type_id, NULL);
1462 if (!index_type || !btf_type_is_int(index_type) ||
1463 !btf_type_int_is_regular(index_type)) {
1464 btf_verifier_log_type(env, v->t, "Invalid index");
1465 return -EINVAL;
1466 }
1467
1468 /* Check array->type */
1469 elem_type_id = array->type;
1470 elem_type = btf_type_by_id(btf, elem_type_id);
1471 if (btf_type_nosize_or_null(elem_type)) {
1472 btf_verifier_log_type(env, v->t,
1473 "Invalid elem");
1474 return -EINVAL;
1475 }
1476
1477 if (!env_type_is_resolve_sink(env, elem_type) &&
1478 !env_type_is_resolved(env, elem_type_id))
1479 return env_stack_push(env, elem_type, elem_type_id);
1480
1481 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1482 if (!elem_type) {
1483 btf_verifier_log_type(env, v->t, "Invalid elem");
1484 return -EINVAL;
1485 }
1486
1487 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
1488 btf_verifier_log_type(env, v->t, "Invalid array of int");
1489 return -EINVAL;
1490 }
1491
1492 if (array->nelems && elem_size > U32_MAX / array->nelems) {
1493 btf_verifier_log_type(env, v->t,
1494 "Array size overflows U32_MAX");
1495 return -EINVAL;
1496 }
1497
1498 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
1499
1500 return 0;
1501 }
1502
1503 static void btf_array_log(struct btf_verifier_env *env,
1504 const struct btf_type *t)
1505 {
1506 const struct btf_array *array = btf_type_array(t);
1507
1508 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
1509 array->type, array->index_type, array->nelems);
1510 }
1511
1512 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
1513 u32 type_id, void *data, u8 bits_offset,
1514 struct seq_file *m)
1515 {
1516 const struct btf_array *array = btf_type_array(t);
1517 const struct btf_kind_operations *elem_ops;
1518 const struct btf_type *elem_type;
1519 u32 i, elem_size, elem_type_id;
1520
1521 elem_type_id = array->type;
1522 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1523 elem_ops = btf_type_ops(elem_type);
1524 seq_puts(m, "[");
1525 for (i = 0; i < array->nelems; i++) {
1526 if (i)
1527 seq_puts(m, ",");
1528
1529 elem_ops->seq_show(btf, elem_type, elem_type_id, data,
1530 bits_offset, m);
1531 data += elem_size;
1532 }
1533 seq_puts(m, "]");
1534 }
1535
1536 static struct btf_kind_operations array_ops = {
1537 .check_meta = btf_array_check_meta,
1538 .resolve = btf_array_resolve,
1539 .check_member = btf_array_check_member,
1540 .log_details = btf_array_log,
1541 .seq_show = btf_array_seq_show,
1542 };
1543
1544 static int btf_struct_check_member(struct btf_verifier_env *env,
1545 const struct btf_type *struct_type,
1546 const struct btf_member *member,
1547 const struct btf_type *member_type)
1548 {
1549 u32 struct_bits_off = member->offset;
1550 u32 struct_size, bytes_offset;
1551
1552 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1553 btf_verifier_log_member(env, struct_type, member,
1554 "Member is not byte aligned");
1555 return -EINVAL;
1556 }
1557
1558 struct_size = struct_type->size;
1559 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1560 if (struct_size - bytes_offset < member_type->size) {
1561 btf_verifier_log_member(env, struct_type, member,
1562 "Member exceeds struct_size");
1563 return -EINVAL;
1564 }
1565
1566 return 0;
1567 }
1568
1569 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1570 const struct btf_type *t,
1571 u32 meta_left)
1572 {
1573 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
1574 const struct btf_member *member;
1575 u32 meta_needed, last_offset;
1576 struct btf *btf = env->btf;
1577 u32 struct_size = t->size;
1578 u16 i;
1579
1580 meta_needed = btf_type_vlen(t) * sizeof(*member);
1581 if (meta_left < meta_needed) {
1582 btf_verifier_log_basic(env, t,
1583 "meta_left:%u meta_needed:%u",
1584 meta_left, meta_needed);
1585 return -EINVAL;
1586 }
1587
1588 btf_verifier_log_type(env, t, NULL);
1589
1590 last_offset = 0;
1591 for_each_member(i, t, member) {
1592 if (!btf_name_offset_valid(btf, member->name_off)) {
1593 btf_verifier_log_member(env, t, member,
1594 "Invalid member name_offset:%u",
1595 member->name_off);
1596 return -EINVAL;
1597 }
1598
1599 /* A member cannot be in type void */
1600 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
1601 btf_verifier_log_member(env, t, member,
1602 "Invalid type_id");
1603 return -EINVAL;
1604 }
1605
1606 if (is_union && member->offset) {
1607 btf_verifier_log_member(env, t, member,
1608 "Invalid member bits_offset");
1609 return -EINVAL;
1610 }
1611
1612 /*
1613 * ">" instead of ">=" because the last member could be
1614 * "char a[0];"
1615 */
1616 if (last_offset > member->offset) {
1617 btf_verifier_log_member(env, t, member,
1618 "Invalid member bits_offset");
1619 return -EINVAL;
1620 }
1621
1622 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
1623 btf_verifier_log_member(env, t, member,
1624 "Member bits_offset exceeds its struct size");
1625 return -EINVAL;
1626 }
1627
1628 btf_verifier_log_member(env, t, member, NULL);
1629 last_offset = member->offset;
1630 }
1631
1632 return meta_needed;
1633 }
1634
1635 static int btf_struct_resolve(struct btf_verifier_env *env,
1636 const struct resolve_vertex *v)
1637 {
1638 const struct btf_member *member;
1639 int err;
1640 u16 i;
1641
1642 /* Before continue resolving the next_member,
1643 * ensure the last member is indeed resolved to a
1644 * type with size info.
1645 */
1646 if (v->next_member) {
1647 const struct btf_type *last_member_type;
1648 const struct btf_member *last_member;
1649 u16 last_member_type_id;
1650
1651 last_member = btf_type_member(v->t) + v->next_member - 1;
1652 last_member_type_id = last_member->type;
1653 if (WARN_ON_ONCE(!env_type_is_resolved(env,
1654 last_member_type_id)))
1655 return -EINVAL;
1656
1657 last_member_type = btf_type_by_id(env->btf,
1658 last_member_type_id);
1659 err = btf_type_ops(last_member_type)->check_member(env, v->t,
1660 last_member,
1661 last_member_type);
1662 if (err)
1663 return err;
1664 }
1665
1666 for_each_member_from(i, v->next_member, v->t, member) {
1667 u32 member_type_id = member->type;
1668 const struct btf_type *member_type = btf_type_by_id(env->btf,
1669 member_type_id);
1670
1671 if (btf_type_nosize_or_null(member_type)) {
1672 btf_verifier_log_member(env, v->t, member,
1673 "Invalid member");
1674 return -EINVAL;
1675 }
1676
1677 if (!env_type_is_resolve_sink(env, member_type) &&
1678 !env_type_is_resolved(env, member_type_id)) {
1679 env_stack_set_next_member(env, i + 1);
1680 return env_stack_push(env, member_type, member_type_id);
1681 }
1682
1683 err = btf_type_ops(member_type)->check_member(env, v->t,
1684 member,
1685 member_type);
1686 if (err)
1687 return err;
1688 }
1689
1690 env_stack_pop_resolved(env, 0, 0);
1691
1692 return 0;
1693 }
1694
1695 static void btf_struct_log(struct btf_verifier_env *env,
1696 const struct btf_type *t)
1697 {
1698 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1699 }
1700
1701 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
1702 u32 type_id, void *data, u8 bits_offset,
1703 struct seq_file *m)
1704 {
1705 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
1706 const struct btf_member *member;
1707 u32 i;
1708
1709 seq_puts(m, "{");
1710 for_each_member(i, t, member) {
1711 const struct btf_type *member_type = btf_type_by_id(btf,
1712 member->type);
1713 u32 member_offset = member->offset;
1714 u32 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
1715 u8 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
1716 const struct btf_kind_operations *ops;
1717
1718 if (i)
1719 seq_puts(m, seq);
1720
1721 ops = btf_type_ops(member_type);
1722 ops->seq_show(btf, member_type, member->type,
1723 data + bytes_offset, bits8_offset, m);
1724 }
1725 seq_puts(m, "}");
1726 }
1727
1728 static struct btf_kind_operations struct_ops = {
1729 .check_meta = btf_struct_check_meta,
1730 .resolve = btf_struct_resolve,
1731 .check_member = btf_struct_check_member,
1732 .log_details = btf_struct_log,
1733 .seq_show = btf_struct_seq_show,
1734 };
1735
1736 static int btf_enum_check_member(struct btf_verifier_env *env,
1737 const struct btf_type *struct_type,
1738 const struct btf_member *member,
1739 const struct btf_type *member_type)
1740 {
1741 u32 struct_bits_off = member->offset;
1742 u32 struct_size, bytes_offset;
1743
1744 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1745 btf_verifier_log_member(env, struct_type, member,
1746 "Member is not byte aligned");
1747 return -EINVAL;
1748 }
1749
1750 struct_size = struct_type->size;
1751 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1752 if (struct_size - bytes_offset < sizeof(int)) {
1753 btf_verifier_log_member(env, struct_type, member,
1754 "Member exceeds struct_size");
1755 return -EINVAL;
1756 }
1757
1758 return 0;
1759 }
1760
1761 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
1762 const struct btf_type *t,
1763 u32 meta_left)
1764 {
1765 const struct btf_enum *enums = btf_type_enum(t);
1766 struct btf *btf = env->btf;
1767 u16 i, nr_enums;
1768 u32 meta_needed;
1769
1770 nr_enums = btf_type_vlen(t);
1771 meta_needed = nr_enums * sizeof(*enums);
1772
1773 if (meta_left < meta_needed) {
1774 btf_verifier_log_basic(env, t,
1775 "meta_left:%u meta_needed:%u",
1776 meta_left, meta_needed);
1777 return -EINVAL;
1778 }
1779
1780 if (t->size != sizeof(int)) {
1781 btf_verifier_log_type(env, t, "Expected size:%zu",
1782 sizeof(int));
1783 return -EINVAL;
1784 }
1785
1786 btf_verifier_log_type(env, t, NULL);
1787
1788 for (i = 0; i < nr_enums; i++) {
1789 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
1790 btf_verifier_log(env, "\tInvalid name_offset:%u",
1791 enums[i].name_off);
1792 return -EINVAL;
1793 }
1794
1795 btf_verifier_log(env, "\t%s val=%d\n",
1796 btf_name_by_offset(btf, enums[i].name_off),
1797 enums[i].val);
1798 }
1799
1800 return meta_needed;
1801 }
1802
1803 static void btf_enum_log(struct btf_verifier_env *env,
1804 const struct btf_type *t)
1805 {
1806 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1807 }
1808
1809 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
1810 u32 type_id, void *data, u8 bits_offset,
1811 struct seq_file *m)
1812 {
1813 const struct btf_enum *enums = btf_type_enum(t);
1814 u32 i, nr_enums = btf_type_vlen(t);
1815 int v = *(int *)data;
1816
1817 for (i = 0; i < nr_enums; i++) {
1818 if (v == enums[i].val) {
1819 seq_printf(m, "%s",
1820 btf_name_by_offset(btf, enums[i].name_off));
1821 return;
1822 }
1823 }
1824
1825 seq_printf(m, "%d", v);
1826 }
1827
1828 static struct btf_kind_operations enum_ops = {
1829 .check_meta = btf_enum_check_meta,
1830 .resolve = btf_df_resolve,
1831 .check_member = btf_enum_check_member,
1832 .log_details = btf_enum_log,
1833 .seq_show = btf_enum_seq_show,
1834 };
1835
1836 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
1837 const struct btf_type *t,
1838 u32 meta_left)
1839 {
1840 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
1841
1842 if (meta_left < meta_needed) {
1843 btf_verifier_log_basic(env, t,
1844 "meta_left:%u meta_needed:%u",
1845 meta_left, meta_needed);
1846 return -EINVAL;
1847 }
1848
1849 if (t->name_off) {
1850 btf_verifier_log_type(env, t, "Invalid name");
1851 return -EINVAL;
1852 }
1853
1854 btf_verifier_log_type(env, t, NULL);
1855
1856 return meta_needed;
1857 }
1858
1859 static void btf_func_proto_log(struct btf_verifier_env *env,
1860 const struct btf_type *t)
1861 {
1862 const struct btf_param *args = (const struct btf_param *)(t + 1);
1863 u16 nr_args = btf_type_vlen(t), i;
1864
1865 btf_verifier_log(env, "return=%u args=(", t->type);
1866 if (!nr_args) {
1867 btf_verifier_log(env, "void");
1868 goto done;
1869 }
1870
1871 if (nr_args == 1 && !args[0].type) {
1872 /* Only one vararg */
1873 btf_verifier_log(env, "vararg");
1874 goto done;
1875 }
1876
1877 btf_verifier_log(env, "%u %s", args[0].type,
1878 btf_name_by_offset(env->btf,
1879 args[0].name_off));
1880 for (i = 1; i < nr_args - 1; i++)
1881 btf_verifier_log(env, ", %u %s", args[i].type,
1882 btf_name_by_offset(env->btf,
1883 args[i].name_off));
1884
1885 if (nr_args > 1) {
1886 const struct btf_param *last_arg = &args[nr_args - 1];
1887
1888 if (last_arg->type)
1889 btf_verifier_log(env, ", %u %s", last_arg->type,
1890 btf_name_by_offset(env->btf,
1891 last_arg->name_off));
1892 else
1893 btf_verifier_log(env, ", vararg");
1894 }
1895
1896 done:
1897 btf_verifier_log(env, ")");
1898 }
1899
1900 static struct btf_kind_operations func_proto_ops = {
1901 .check_meta = btf_func_proto_check_meta,
1902 .resolve = btf_df_resolve,
1903 /*
1904 * BTF_KIND_FUNC_PROTO cannot be directly referred by
1905 * a struct's member.
1906 *
1907 * It should be a funciton pointer instead.
1908 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
1909 *
1910 * Hence, there is no btf_func_check_member().
1911 */
1912 .check_member = btf_df_check_member,
1913 .log_details = btf_func_proto_log,
1914 .seq_show = btf_df_seq_show,
1915 };
1916
1917 static s32 btf_func_check_meta(struct btf_verifier_env *env,
1918 const struct btf_type *t,
1919 u32 meta_left)
1920 {
1921 if (!t->name_off ||
1922 !btf_name_valid_identifier(env->btf, t->name_off)) {
1923 btf_verifier_log_type(env, t, "Invalid name");
1924 return -EINVAL;
1925 }
1926
1927 if (btf_type_vlen(t)) {
1928 btf_verifier_log_type(env, t, "vlen != 0");
1929 return -EINVAL;
1930 }
1931
1932 btf_verifier_log_type(env, t, NULL);
1933
1934 return 0;
1935 }
1936
1937 static struct btf_kind_operations func_ops = {
1938 .check_meta = btf_func_check_meta,
1939 .resolve = btf_df_resolve,
1940 .check_member = btf_df_check_member,
1941 .log_details = btf_ref_type_log,
1942 .seq_show = btf_df_seq_show,
1943 };
1944
1945 static int btf_func_proto_check(struct btf_verifier_env *env,
1946 const struct btf_type *t)
1947 {
1948 const struct btf_type *ret_type;
1949 const struct btf_param *args;
1950 const struct btf *btf;
1951 u16 nr_args, i;
1952 int err;
1953
1954 btf = env->btf;
1955 args = (const struct btf_param *)(t + 1);
1956 nr_args = btf_type_vlen(t);
1957
1958 /* Check func return type which could be "void" (t->type == 0) */
1959 if (t->type) {
1960 u32 ret_type_id = t->type;
1961
1962 ret_type = btf_type_by_id(btf, ret_type_id);
1963 if (!ret_type) {
1964 btf_verifier_log_type(env, t, "Invalid return type");
1965 return -EINVAL;
1966 }
1967
1968 if (btf_type_needs_resolve(ret_type) &&
1969 !env_type_is_resolved(env, ret_type_id)) {
1970 err = btf_resolve(env, ret_type, ret_type_id);
1971 if (err)
1972 return err;
1973 }
1974
1975 /* Ensure the return type is a type that has a size */
1976 if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
1977 btf_verifier_log_type(env, t, "Invalid return type");
1978 return -EINVAL;
1979 }
1980 }
1981
1982 if (!nr_args)
1983 return 0;
1984
1985 /* Last func arg type_id could be 0 if it is a vararg */
1986 if (!args[nr_args - 1].type) {
1987 if (args[nr_args - 1].name_off) {
1988 btf_verifier_log_type(env, t, "Invalid arg#%u",
1989 nr_args);
1990 return -EINVAL;
1991 }
1992 nr_args--;
1993 }
1994
1995 err = 0;
1996 for (i = 0; i < nr_args; i++) {
1997 const struct btf_type *arg_type;
1998 u32 arg_type_id;
1999
2000 arg_type_id = args[i].type;
2001 arg_type = btf_type_by_id(btf, arg_type_id);
2002 if (!arg_type) {
2003 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2004 err = -EINVAL;
2005 break;
2006 }
2007
2008 if (args[i].name_off &&
2009 (!btf_name_offset_valid(btf, args[i].name_off) ||
2010 !btf_name_valid_identifier(btf, args[i].name_off))) {
2011 btf_verifier_log_type(env, t,
2012 "Invalid arg#%u", i + 1);
2013 err = -EINVAL;
2014 break;
2015 }
2016
2017 if (btf_type_needs_resolve(arg_type) &&
2018 !env_type_is_resolved(env, arg_type_id)) {
2019 err = btf_resolve(env, arg_type, arg_type_id);
2020 if (err)
2021 break;
2022 }
2023
2024 if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
2025 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2026 err = -EINVAL;
2027 break;
2028 }
2029 }
2030
2031 return err;
2032 }
2033
2034 static int btf_func_check(struct btf_verifier_env *env,
2035 const struct btf_type *t)
2036 {
2037 const struct btf_type *proto_type;
2038 const struct btf_param *args;
2039 const struct btf *btf;
2040 u16 nr_args, i;
2041
2042 btf = env->btf;
2043 proto_type = btf_type_by_id(btf, t->type);
2044
2045 if (!proto_type || !btf_type_is_func_proto(proto_type)) {
2046 btf_verifier_log_type(env, t, "Invalid type_id");
2047 return -EINVAL;
2048 }
2049
2050 args = (const struct btf_param *)(proto_type + 1);
2051 nr_args = btf_type_vlen(proto_type);
2052 for (i = 0; i < nr_args; i++) {
2053 if (!args[i].name_off && args[i].type) {
2054 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2055 return -EINVAL;
2056 }
2057 }
2058
2059 return 0;
2060 }
2061
2062 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
2063 [BTF_KIND_INT] = &int_ops,
2064 [BTF_KIND_PTR] = &ptr_ops,
2065 [BTF_KIND_ARRAY] = &array_ops,
2066 [BTF_KIND_STRUCT] = &struct_ops,
2067 [BTF_KIND_UNION] = &struct_ops,
2068 [BTF_KIND_ENUM] = &enum_ops,
2069 [BTF_KIND_FWD] = &fwd_ops,
2070 [BTF_KIND_TYPEDEF] = &modifier_ops,
2071 [BTF_KIND_VOLATILE] = &modifier_ops,
2072 [BTF_KIND_CONST] = &modifier_ops,
2073 [BTF_KIND_RESTRICT] = &modifier_ops,
2074 [BTF_KIND_FUNC] = &func_ops,
2075 [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
2076 };
2077
2078 static s32 btf_check_meta(struct btf_verifier_env *env,
2079 const struct btf_type *t,
2080 u32 meta_left)
2081 {
2082 u32 saved_meta_left = meta_left;
2083 s32 var_meta_size;
2084
2085 if (meta_left < sizeof(*t)) {
2086 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
2087 env->log_type_id, meta_left, sizeof(*t));
2088 return -EINVAL;
2089 }
2090 meta_left -= sizeof(*t);
2091
2092 if (t->info & ~BTF_INFO_MASK) {
2093 btf_verifier_log(env, "[%u] Invalid btf_info:%x",
2094 env->log_type_id, t->info);
2095 return -EINVAL;
2096 }
2097
2098 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
2099 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
2100 btf_verifier_log(env, "[%u] Invalid kind:%u",
2101 env->log_type_id, BTF_INFO_KIND(t->info));
2102 return -EINVAL;
2103 }
2104
2105 if (!btf_name_offset_valid(env->btf, t->name_off)) {
2106 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
2107 env->log_type_id, t->name_off);
2108 return -EINVAL;
2109 }
2110
2111 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
2112 if (var_meta_size < 0)
2113 return var_meta_size;
2114
2115 meta_left -= var_meta_size;
2116
2117 return saved_meta_left - meta_left;
2118 }
2119
2120 static int btf_check_all_metas(struct btf_verifier_env *env)
2121 {
2122 struct btf *btf = env->btf;
2123 struct btf_header *hdr;
2124 void *cur, *end;
2125
2126 hdr = &btf->hdr;
2127 cur = btf->nohdr_data + hdr->type_off;
2128 end = cur + hdr->type_len;
2129
2130 env->log_type_id = 1;
2131 while (cur < end) {
2132 struct btf_type *t = cur;
2133 s32 meta_size;
2134
2135 meta_size = btf_check_meta(env, t, end - cur);
2136 if (meta_size < 0)
2137 return meta_size;
2138
2139 btf_add_type(env, t);
2140 cur += meta_size;
2141 env->log_type_id++;
2142 }
2143
2144 return 0;
2145 }
2146
2147 static bool btf_resolve_valid(struct btf_verifier_env *env,
2148 const struct btf_type *t,
2149 u32 type_id)
2150 {
2151 struct btf *btf = env->btf;
2152
2153 if (!env_type_is_resolved(env, type_id))
2154 return false;
2155
2156 if (btf_type_is_struct(t))
2157 return !btf->resolved_ids[type_id] &&
2158 !btf->resolved_sizes[type_id];
2159
2160 if (btf_type_is_modifier(t) || btf_type_is_ptr(t)) {
2161 t = btf_type_id_resolve(btf, &type_id);
2162 return t && !btf_type_is_modifier(t);
2163 }
2164
2165 if (btf_type_is_array(t)) {
2166 const struct btf_array *array = btf_type_array(t);
2167 const struct btf_type *elem_type;
2168 u32 elem_type_id = array->type;
2169 u32 elem_size;
2170
2171 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2172 return elem_type && !btf_type_is_modifier(elem_type) &&
2173 (array->nelems * elem_size ==
2174 btf->resolved_sizes[type_id]);
2175 }
2176
2177 return false;
2178 }
2179
2180 static int btf_resolve(struct btf_verifier_env *env,
2181 const struct btf_type *t, u32 type_id)
2182 {
2183 u32 save_log_type_id = env->log_type_id;
2184 const struct resolve_vertex *v;
2185 int err = 0;
2186
2187 env->resolve_mode = RESOLVE_TBD;
2188 env_stack_push(env, t, type_id);
2189 while (!err && (v = env_stack_peak(env))) {
2190 env->log_type_id = v->type_id;
2191 err = btf_type_ops(v->t)->resolve(env, v);
2192 }
2193
2194 env->log_type_id = type_id;
2195 if (err == -E2BIG) {
2196 btf_verifier_log_type(env, t,
2197 "Exceeded max resolving depth:%u",
2198 MAX_RESOLVE_DEPTH);
2199 } else if (err == -EEXIST) {
2200 btf_verifier_log_type(env, t, "Loop detected");
2201 }
2202
2203 /* Final sanity check */
2204 if (!err && !btf_resolve_valid(env, t, type_id)) {
2205 btf_verifier_log_type(env, t, "Invalid resolve state");
2206 err = -EINVAL;
2207 }
2208
2209 env->log_type_id = save_log_type_id;
2210 return err;
2211 }
2212
2213 static int btf_check_all_types(struct btf_verifier_env *env)
2214 {
2215 struct btf *btf = env->btf;
2216 u32 type_id;
2217 int err;
2218
2219 err = env_resolve_init(env);
2220 if (err)
2221 return err;
2222
2223 env->phase++;
2224 for (type_id = 1; type_id <= btf->nr_types; type_id++) {
2225 const struct btf_type *t = btf_type_by_id(btf, type_id);
2226
2227 env->log_type_id = type_id;
2228 if (btf_type_needs_resolve(t) &&
2229 !env_type_is_resolved(env, type_id)) {
2230 err = btf_resolve(env, t, type_id);
2231 if (err)
2232 return err;
2233 }
2234
2235 if (btf_type_is_func_proto(t)) {
2236 err = btf_func_proto_check(env, t);
2237 if (err)
2238 return err;
2239 }
2240
2241 if (btf_type_is_func(t)) {
2242 err = btf_func_check(env, t);
2243 if (err)
2244 return err;
2245 }
2246 }
2247
2248 return 0;
2249 }
2250
2251 static int btf_parse_type_sec(struct btf_verifier_env *env)
2252 {
2253 const struct btf_header *hdr = &env->btf->hdr;
2254 int err;
2255
2256 /* Type section must align to 4 bytes */
2257 if (hdr->type_off & (sizeof(u32) - 1)) {
2258 btf_verifier_log(env, "Unaligned type_off");
2259 return -EINVAL;
2260 }
2261
2262 if (!hdr->type_len) {
2263 btf_verifier_log(env, "No type found");
2264 return -EINVAL;
2265 }
2266
2267 err = btf_check_all_metas(env);
2268 if (err)
2269 return err;
2270
2271 return btf_check_all_types(env);
2272 }
2273
2274 static int btf_parse_str_sec(struct btf_verifier_env *env)
2275 {
2276 const struct btf_header *hdr;
2277 struct btf *btf = env->btf;
2278 const char *start, *end;
2279
2280 hdr = &btf->hdr;
2281 start = btf->nohdr_data + hdr->str_off;
2282 end = start + hdr->str_len;
2283
2284 if (end != btf->data + btf->data_size) {
2285 btf_verifier_log(env, "String section is not at the end");
2286 return -EINVAL;
2287 }
2288
2289 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
2290 start[0] || end[-1]) {
2291 btf_verifier_log(env, "Invalid string section");
2292 return -EINVAL;
2293 }
2294
2295 btf->strings = start;
2296
2297 return 0;
2298 }
2299
2300 static const size_t btf_sec_info_offset[] = {
2301 offsetof(struct btf_header, type_off),
2302 offsetof(struct btf_header, str_off),
2303 };
2304
2305 static int btf_sec_info_cmp(const void *a, const void *b)
2306 {
2307 const struct btf_sec_info *x = a;
2308 const struct btf_sec_info *y = b;
2309
2310 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
2311 }
2312
2313 static int btf_check_sec_info(struct btf_verifier_env *env,
2314 u32 btf_data_size)
2315 {
2316 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
2317 u32 total, expected_total, i;
2318 const struct btf_header *hdr;
2319 const struct btf *btf;
2320
2321 btf = env->btf;
2322 hdr = &btf->hdr;
2323
2324 /* Populate the secs from hdr */
2325 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
2326 secs[i] = *(struct btf_sec_info *)((void *)hdr +
2327 btf_sec_info_offset[i]);
2328
2329 sort(secs, ARRAY_SIZE(btf_sec_info_offset),
2330 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
2331
2332 /* Check for gaps and overlap among sections */
2333 total = 0;
2334 expected_total = btf_data_size - hdr->hdr_len;
2335 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
2336 if (expected_total < secs[i].off) {
2337 btf_verifier_log(env, "Invalid section offset");
2338 return -EINVAL;
2339 }
2340 if (total < secs[i].off) {
2341 /* gap */
2342 btf_verifier_log(env, "Unsupported section found");
2343 return -EINVAL;
2344 }
2345 if (total > secs[i].off) {
2346 btf_verifier_log(env, "Section overlap found");
2347 return -EINVAL;
2348 }
2349 if (expected_total - total < secs[i].len) {
2350 btf_verifier_log(env,
2351 "Total section length too long");
2352 return -EINVAL;
2353 }
2354 total += secs[i].len;
2355 }
2356
2357 /* There is data other than hdr and known sections */
2358 if (expected_total != total) {
2359 btf_verifier_log(env, "Unsupported section found");
2360 return -EINVAL;
2361 }
2362
2363 return 0;
2364 }
2365
2366 static int btf_parse_hdr(struct btf_verifier_env *env)
2367 {
2368 u32 hdr_len, hdr_copy, btf_data_size;
2369 const struct btf_header *hdr;
2370 struct btf *btf;
2371 int err;
2372
2373 btf = env->btf;
2374 btf_data_size = btf->data_size;
2375
2376 if (btf_data_size <
2377 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
2378 btf_verifier_log(env, "hdr_len not found");
2379 return -EINVAL;
2380 }
2381
2382 hdr = btf->data;
2383 hdr_len = hdr->hdr_len;
2384 if (btf_data_size < hdr_len) {
2385 btf_verifier_log(env, "btf_header not found");
2386 return -EINVAL;
2387 }
2388
2389 /* Ensure the unsupported header fields are zero */
2390 if (hdr_len > sizeof(btf->hdr)) {
2391 u8 *expected_zero = btf->data + sizeof(btf->hdr);
2392 u8 *end = btf->data + hdr_len;
2393
2394 for (; expected_zero < end; expected_zero++) {
2395 if (*expected_zero) {
2396 btf_verifier_log(env, "Unsupported btf_header");
2397 return -E2BIG;
2398 }
2399 }
2400 }
2401
2402 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
2403 memcpy(&btf->hdr, btf->data, hdr_copy);
2404
2405 hdr = &btf->hdr;
2406
2407 btf_verifier_log_hdr(env, btf_data_size);
2408
2409 if (hdr->magic != BTF_MAGIC) {
2410 btf_verifier_log(env, "Invalid magic");
2411 return -EINVAL;
2412 }
2413
2414 if (hdr->version != BTF_VERSION) {
2415 btf_verifier_log(env, "Unsupported version");
2416 return -ENOTSUPP;
2417 }
2418
2419 if (hdr->flags) {
2420 btf_verifier_log(env, "Unsupported flags");
2421 return -ENOTSUPP;
2422 }
2423
2424 if (btf_data_size == hdr->hdr_len) {
2425 btf_verifier_log(env, "No data");
2426 return -EINVAL;
2427 }
2428
2429 err = btf_check_sec_info(env, btf_data_size);
2430 if (err)
2431 return err;
2432
2433 return 0;
2434 }
2435
2436 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
2437 u32 log_level, char __user *log_ubuf, u32 log_size)
2438 {
2439 struct btf_verifier_env *env = NULL;
2440 struct bpf_verifier_log *log;
2441 struct btf *btf = NULL;
2442 u8 *data;
2443 int err;
2444
2445 if (btf_data_size > BTF_MAX_SIZE)
2446 return ERR_PTR(-E2BIG);
2447
2448 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
2449 if (!env)
2450 return ERR_PTR(-ENOMEM);
2451
2452 log = &env->log;
2453 if (log_level || log_ubuf || log_size) {
2454 /* user requested verbose verifier output
2455 * and supplied buffer to store the verification trace
2456 */
2457 log->level = log_level;
2458 log->ubuf = log_ubuf;
2459 log->len_total = log_size;
2460
2461 /* log attributes have to be sane */
2462 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
2463 !log->level || !log->ubuf) {
2464 err = -EINVAL;
2465 goto errout;
2466 }
2467 }
2468
2469 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
2470 if (!btf) {
2471 err = -ENOMEM;
2472 goto errout;
2473 }
2474 env->btf = btf;
2475
2476 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
2477 if (!data) {
2478 err = -ENOMEM;
2479 goto errout;
2480 }
2481
2482 btf->data = data;
2483 btf->data_size = btf_data_size;
2484
2485 if (copy_from_user(data, btf_data, btf_data_size)) {
2486 err = -EFAULT;
2487 goto errout;
2488 }
2489
2490 err = btf_parse_hdr(env);
2491 if (err)
2492 goto errout;
2493
2494 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
2495
2496 err = btf_parse_str_sec(env);
2497 if (err)
2498 goto errout;
2499
2500 err = btf_parse_type_sec(env);
2501 if (err)
2502 goto errout;
2503
2504 if (log->level && bpf_verifier_log_full(log)) {
2505 err = -ENOSPC;
2506 goto errout;
2507 }
2508
2509 btf_verifier_env_free(env);
2510 refcount_set(&btf->refcnt, 1);
2511 return btf;
2512
2513 errout:
2514 btf_verifier_env_free(env);
2515 if (btf)
2516 btf_free(btf);
2517 return ERR_PTR(err);
2518 }
2519
2520 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
2521 struct seq_file *m)
2522 {
2523 const struct btf_type *t = btf_type_by_id(btf, type_id);
2524
2525 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
2526 }
2527
2528 static int btf_release(struct inode *inode, struct file *filp)
2529 {
2530 btf_put(filp->private_data);
2531 return 0;
2532 }
2533
2534 const struct file_operations btf_fops = {
2535 .release = btf_release,
2536 };
2537
2538 static int __btf_new_fd(struct btf *btf)
2539 {
2540 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
2541 }
2542
2543 int btf_new_fd(const union bpf_attr *attr)
2544 {
2545 struct btf *btf;
2546 int ret;
2547
2548 btf = btf_parse(u64_to_user_ptr(attr->btf),
2549 attr->btf_size, attr->btf_log_level,
2550 u64_to_user_ptr(attr->btf_log_buf),
2551 attr->btf_log_size);
2552 if (IS_ERR(btf))
2553 return PTR_ERR(btf);
2554
2555 ret = btf_alloc_id(btf);
2556 if (ret) {
2557 btf_free(btf);
2558 return ret;
2559 }
2560
2561 /*
2562 * The BTF ID is published to the userspace.
2563 * All BTF free must go through call_rcu() from
2564 * now on (i.e. free by calling btf_put()).
2565 */
2566
2567 ret = __btf_new_fd(btf);
2568 if (ret < 0)
2569 btf_put(btf);
2570
2571 return ret;
2572 }
2573
2574 struct btf *btf_get_by_fd(int fd)
2575 {
2576 struct btf *btf;
2577 struct fd f;
2578
2579 f = fdget(fd);
2580
2581 if (!f.file)
2582 return ERR_PTR(-EBADF);
2583
2584 if (f.file->f_op != &btf_fops) {
2585 fdput(f);
2586 return ERR_PTR(-EINVAL);
2587 }
2588
2589 btf = f.file->private_data;
2590 refcount_inc(&btf->refcnt);
2591 fdput(f);
2592
2593 return btf;
2594 }
2595
2596 int btf_get_info_by_fd(const struct btf *btf,
2597 const union bpf_attr *attr,
2598 union bpf_attr __user *uattr)
2599 {
2600 struct bpf_btf_info __user *uinfo;
2601 struct bpf_btf_info info = {};
2602 u32 info_copy, btf_copy;
2603 void __user *ubtf;
2604 u32 uinfo_len;
2605
2606 uinfo = u64_to_user_ptr(attr->info.info);
2607 uinfo_len = attr->info.info_len;
2608
2609 info_copy = min_t(u32, uinfo_len, sizeof(info));
2610 if (copy_from_user(&info, uinfo, info_copy))
2611 return -EFAULT;
2612
2613 info.id = btf->id;
2614 ubtf = u64_to_user_ptr(info.btf);
2615 btf_copy = min_t(u32, btf->data_size, info.btf_size);
2616 if (copy_to_user(ubtf, btf->data, btf_copy))
2617 return -EFAULT;
2618 info.btf_size = btf->data_size;
2619
2620 if (copy_to_user(uinfo, &info, info_copy) ||
2621 put_user(info_copy, &uattr->info.info_len))
2622 return -EFAULT;
2623
2624 return 0;
2625 }
2626
2627 int btf_get_fd_by_id(u32 id)
2628 {
2629 struct btf *btf;
2630 int fd;
2631
2632 rcu_read_lock();
2633 btf = idr_find(&btf_idr, id);
2634 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
2635 btf = ERR_PTR(-ENOENT);
2636 rcu_read_unlock();
2637
2638 if (IS_ERR(btf))
2639 return PTR_ERR(btf);
2640
2641 fd = __btf_new_fd(btf);
2642 if (fd < 0)
2643 btf_put(btf);
2644
2645 return fd;
2646 }
2647
2648 u32 btf_id(const struct btf *btf)
2649 {
2650 return btf->id;
2651 }