]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - kernel/bpf/btf.c
bpf: Process in-kernel BTF
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / btf.c
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
3
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/types.h>
6 #include <linux/seq_file.h>
7 #include <linux/compiler.h>
8 #include <linux/ctype.h>
9 #include <linux/errno.h>
10 #include <linux/slab.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/uaccess.h>
14 #include <linux/kernel.h>
15 #include <linux/idr.h>
16 #include <linux/sort.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/btf.h>
19
20 /* BTF (BPF Type Format) is the meta data format which describes
21 * the data types of BPF program/map. Hence, it basically focus
22 * on the C programming language which the modern BPF is primary
23 * using.
24 *
25 * ELF Section:
26 * ~~~~~~~~~~~
27 * The BTF data is stored under the ".BTF" ELF section
28 *
29 * struct btf_type:
30 * ~~~~~~~~~~~~~~~
31 * Each 'struct btf_type' object describes a C data type.
32 * Depending on the type it is describing, a 'struct btf_type'
33 * object may be followed by more data. F.e.
34 * To describe an array, 'struct btf_type' is followed by
35 * 'struct btf_array'.
36 *
37 * 'struct btf_type' and any extra data following it are
38 * 4 bytes aligned.
39 *
40 * Type section:
41 * ~~~~~~~~~~~~~
42 * The BTF type section contains a list of 'struct btf_type' objects.
43 * Each one describes a C type. Recall from the above section
44 * that a 'struct btf_type' object could be immediately followed by extra
45 * data in order to desribe some particular C types.
46 *
47 * type_id:
48 * ~~~~~~~
49 * Each btf_type object is identified by a type_id. The type_id
50 * is implicitly implied by the location of the btf_type object in
51 * the BTF type section. The first one has type_id 1. The second
52 * one has type_id 2...etc. Hence, an earlier btf_type has
53 * a smaller type_id.
54 *
55 * A btf_type object may refer to another btf_type object by using
56 * type_id (i.e. the "type" in the "struct btf_type").
57 *
58 * NOTE that we cannot assume any reference-order.
59 * A btf_type object can refer to an earlier btf_type object
60 * but it can also refer to a later btf_type object.
61 *
62 * For example, to describe "const void *". A btf_type
63 * object describing "const" may refer to another btf_type
64 * object describing "void *". This type-reference is done
65 * by specifying type_id:
66 *
67 * [1] CONST (anon) type_id=2
68 * [2] PTR (anon) type_id=0
69 *
70 * The above is the btf_verifier debug log:
71 * - Each line started with "[?]" is a btf_type object
72 * - [?] is the type_id of the btf_type object.
73 * - CONST/PTR is the BTF_KIND_XXX
74 * - "(anon)" is the name of the type. It just
75 * happens that CONST and PTR has no name.
76 * - type_id=XXX is the 'u32 type' in btf_type
77 *
78 * NOTE: "void" has type_id 0
79 *
80 * String section:
81 * ~~~~~~~~~~~~~~
82 * The BTF string section contains the names used by the type section.
83 * Each string is referred by an "offset" from the beginning of the
84 * string section.
85 *
86 * Each string is '\0' terminated.
87 *
88 * The first character in the string section must be '\0'
89 * which is used to mean 'anonymous'. Some btf_type may not
90 * have a name.
91 */
92
93 /* BTF verification:
94 *
95 * To verify BTF data, two passes are needed.
96 *
97 * Pass #1
98 * ~~~~~~~
99 * The first pass is to collect all btf_type objects to
100 * an array: "btf->types".
101 *
102 * Depending on the C type that a btf_type is describing,
103 * a btf_type may be followed by extra data. We don't know
104 * how many btf_type is there, and more importantly we don't
105 * know where each btf_type is located in the type section.
106 *
107 * Without knowing the location of each type_id, most verifications
108 * cannot be done. e.g. an earlier btf_type may refer to a later
109 * btf_type (recall the "const void *" above), so we cannot
110 * check this type-reference in the first pass.
111 *
112 * In the first pass, it still does some verifications (e.g.
113 * checking the name is a valid offset to the string section).
114 *
115 * Pass #2
116 * ~~~~~~~
117 * The main focus is to resolve a btf_type that is referring
118 * to another type.
119 *
120 * We have to ensure the referring type:
121 * 1) does exist in the BTF (i.e. in btf->types[])
122 * 2) does not cause a loop:
123 * struct A {
124 * struct B b;
125 * };
126 *
127 * struct B {
128 * struct A a;
129 * };
130 *
131 * btf_type_needs_resolve() decides if a btf_type needs
132 * to be resolved.
133 *
134 * The needs_resolve type implements the "resolve()" ops which
135 * essentially does a DFS and detects backedge.
136 *
137 * During resolve (or DFS), different C types have different
138 * "RESOLVED" conditions.
139 *
140 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
141 * members because a member is always referring to another
142 * type. A struct's member can be treated as "RESOLVED" if
143 * it is referring to a BTF_KIND_PTR. Otherwise, the
144 * following valid C struct would be rejected:
145 *
146 * struct A {
147 * int m;
148 * struct A *a;
149 * };
150 *
151 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
152 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
153 * detect a pointer loop, e.g.:
154 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
155 * ^ |
156 * +-----------------------------------------+
157 *
158 */
159
160 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
161 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
162 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
163 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
164 #define BITS_ROUNDUP_BYTES(bits) \
165 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
166
167 #define BTF_INFO_MASK 0x8f00ffff
168 #define BTF_INT_MASK 0x0fffffff
169 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
170 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
171
172 /* 16MB for 64k structs and each has 16 members and
173 * a few MB spaces for the string section.
174 * The hard limit is S32_MAX.
175 */
176 #define BTF_MAX_SIZE (16 * 1024 * 1024)
177
178 #define for_each_member(i, struct_type, member) \
179 for (i = 0, member = btf_type_member(struct_type); \
180 i < btf_type_vlen(struct_type); \
181 i++, member++)
182
183 #define for_each_member_from(i, from, struct_type, member) \
184 for (i = from, member = btf_type_member(struct_type) + from; \
185 i < btf_type_vlen(struct_type); \
186 i++, member++)
187
188 #define for_each_vsi(i, struct_type, member) \
189 for (i = 0, member = btf_type_var_secinfo(struct_type); \
190 i < btf_type_vlen(struct_type); \
191 i++, member++)
192
193 #define for_each_vsi_from(i, from, struct_type, member) \
194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
195 i < btf_type_vlen(struct_type); \
196 i++, member++)
197
198 DEFINE_IDR(btf_idr);
199 DEFINE_SPINLOCK(btf_idr_lock);
200
201 struct btf {
202 void *data;
203 struct btf_type **types;
204 u32 *resolved_ids;
205 u32 *resolved_sizes;
206 const char *strings;
207 void *nohdr_data;
208 struct btf_header hdr;
209 u32 nr_types;
210 u32 types_size;
211 u32 data_size;
212 refcount_t refcnt;
213 u32 id;
214 struct rcu_head rcu;
215 };
216
217 enum verifier_phase {
218 CHECK_META,
219 CHECK_TYPE,
220 };
221
222 struct resolve_vertex {
223 const struct btf_type *t;
224 u32 type_id;
225 u16 next_member;
226 };
227
228 enum visit_state {
229 NOT_VISITED,
230 VISITED,
231 RESOLVED,
232 };
233
234 enum resolve_mode {
235 RESOLVE_TBD, /* To Be Determined */
236 RESOLVE_PTR, /* Resolving for Pointer */
237 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
238 * or array
239 */
240 };
241
242 #define MAX_RESOLVE_DEPTH 32
243
244 struct btf_sec_info {
245 u32 off;
246 u32 len;
247 };
248
249 struct btf_verifier_env {
250 struct btf *btf;
251 u8 *visit_states;
252 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
253 struct bpf_verifier_log log;
254 u32 log_type_id;
255 u32 top_stack;
256 enum verifier_phase phase;
257 enum resolve_mode resolve_mode;
258 };
259
260 static const char * const btf_kind_str[NR_BTF_KINDS] = {
261 [BTF_KIND_UNKN] = "UNKNOWN",
262 [BTF_KIND_INT] = "INT",
263 [BTF_KIND_PTR] = "PTR",
264 [BTF_KIND_ARRAY] = "ARRAY",
265 [BTF_KIND_STRUCT] = "STRUCT",
266 [BTF_KIND_UNION] = "UNION",
267 [BTF_KIND_ENUM] = "ENUM",
268 [BTF_KIND_FWD] = "FWD",
269 [BTF_KIND_TYPEDEF] = "TYPEDEF",
270 [BTF_KIND_VOLATILE] = "VOLATILE",
271 [BTF_KIND_CONST] = "CONST",
272 [BTF_KIND_RESTRICT] = "RESTRICT",
273 [BTF_KIND_FUNC] = "FUNC",
274 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
275 [BTF_KIND_VAR] = "VAR",
276 [BTF_KIND_DATASEC] = "DATASEC",
277 };
278
279 struct btf_kind_operations {
280 s32 (*check_meta)(struct btf_verifier_env *env,
281 const struct btf_type *t,
282 u32 meta_left);
283 int (*resolve)(struct btf_verifier_env *env,
284 const struct resolve_vertex *v);
285 int (*check_member)(struct btf_verifier_env *env,
286 const struct btf_type *struct_type,
287 const struct btf_member *member,
288 const struct btf_type *member_type);
289 int (*check_kflag_member)(struct btf_verifier_env *env,
290 const struct btf_type *struct_type,
291 const struct btf_member *member,
292 const struct btf_type *member_type);
293 void (*log_details)(struct btf_verifier_env *env,
294 const struct btf_type *t);
295 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
296 u32 type_id, void *data, u8 bits_offsets,
297 struct seq_file *m);
298 };
299
300 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
301 static struct btf_type btf_void;
302
303 static int btf_resolve(struct btf_verifier_env *env,
304 const struct btf_type *t, u32 type_id);
305
306 static bool btf_type_is_modifier(const struct btf_type *t)
307 {
308 /* Some of them is not strictly a C modifier
309 * but they are grouped into the same bucket
310 * for BTF concern:
311 * A type (t) that refers to another
312 * type through t->type AND its size cannot
313 * be determined without following the t->type.
314 *
315 * ptr does not fall into this bucket
316 * because its size is always sizeof(void *).
317 */
318 switch (BTF_INFO_KIND(t->info)) {
319 case BTF_KIND_TYPEDEF:
320 case BTF_KIND_VOLATILE:
321 case BTF_KIND_CONST:
322 case BTF_KIND_RESTRICT:
323 return true;
324 }
325
326 return false;
327 }
328
329 bool btf_type_is_void(const struct btf_type *t)
330 {
331 return t == &btf_void;
332 }
333
334 static bool btf_type_is_fwd(const struct btf_type *t)
335 {
336 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
337 }
338
339 static bool btf_type_is_func(const struct btf_type *t)
340 {
341 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
342 }
343
344 static bool btf_type_is_func_proto(const struct btf_type *t)
345 {
346 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
347 }
348
349 static bool btf_type_nosize(const struct btf_type *t)
350 {
351 return btf_type_is_void(t) || btf_type_is_fwd(t) ||
352 btf_type_is_func(t) || btf_type_is_func_proto(t);
353 }
354
355 static bool btf_type_nosize_or_null(const struct btf_type *t)
356 {
357 return !t || btf_type_nosize(t);
358 }
359
360 /* union is only a special case of struct:
361 * all its offsetof(member) == 0
362 */
363 static bool btf_type_is_struct(const struct btf_type *t)
364 {
365 u8 kind = BTF_INFO_KIND(t->info);
366
367 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
368 }
369
370 static bool __btf_type_is_struct(const struct btf_type *t)
371 {
372 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
373 }
374
375 static bool btf_type_is_array(const struct btf_type *t)
376 {
377 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
378 }
379
380 static bool btf_type_is_ptr(const struct btf_type *t)
381 {
382 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
383 }
384
385 static bool btf_type_is_int(const struct btf_type *t)
386 {
387 return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
388 }
389
390 static bool btf_type_is_var(const struct btf_type *t)
391 {
392 return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
393 }
394
395 static bool btf_type_is_datasec(const struct btf_type *t)
396 {
397 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
398 }
399
400 /* Types that act only as a source, not sink or intermediate
401 * type when resolving.
402 */
403 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
404 {
405 return btf_type_is_var(t) ||
406 btf_type_is_datasec(t);
407 }
408
409 /* What types need to be resolved?
410 *
411 * btf_type_is_modifier() is an obvious one.
412 *
413 * btf_type_is_struct() because its member refers to
414 * another type (through member->type).
415 *
416 * btf_type_is_var() because the variable refers to
417 * another type. btf_type_is_datasec() holds multiple
418 * btf_type_is_var() types that need resolving.
419 *
420 * btf_type_is_array() because its element (array->type)
421 * refers to another type. Array can be thought of a
422 * special case of struct while array just has the same
423 * member-type repeated by array->nelems of times.
424 */
425 static bool btf_type_needs_resolve(const struct btf_type *t)
426 {
427 return btf_type_is_modifier(t) ||
428 btf_type_is_ptr(t) ||
429 btf_type_is_struct(t) ||
430 btf_type_is_array(t) ||
431 btf_type_is_var(t) ||
432 btf_type_is_datasec(t);
433 }
434
435 /* t->size can be used */
436 static bool btf_type_has_size(const struct btf_type *t)
437 {
438 switch (BTF_INFO_KIND(t->info)) {
439 case BTF_KIND_INT:
440 case BTF_KIND_STRUCT:
441 case BTF_KIND_UNION:
442 case BTF_KIND_ENUM:
443 case BTF_KIND_DATASEC:
444 return true;
445 }
446
447 return false;
448 }
449
450 static const char *btf_int_encoding_str(u8 encoding)
451 {
452 if (encoding == 0)
453 return "(none)";
454 else if (encoding == BTF_INT_SIGNED)
455 return "SIGNED";
456 else if (encoding == BTF_INT_CHAR)
457 return "CHAR";
458 else if (encoding == BTF_INT_BOOL)
459 return "BOOL";
460 else
461 return "UNKN";
462 }
463
464 static u16 btf_type_vlen(const struct btf_type *t)
465 {
466 return BTF_INFO_VLEN(t->info);
467 }
468
469 static bool btf_type_kflag(const struct btf_type *t)
470 {
471 return BTF_INFO_KFLAG(t->info);
472 }
473
474 static u32 btf_member_bit_offset(const struct btf_type *struct_type,
475 const struct btf_member *member)
476 {
477 return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
478 : member->offset;
479 }
480
481 static u32 btf_member_bitfield_size(const struct btf_type *struct_type,
482 const struct btf_member *member)
483 {
484 return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
485 : 0;
486 }
487
488 static u32 btf_type_int(const struct btf_type *t)
489 {
490 return *(u32 *)(t + 1);
491 }
492
493 static const struct btf_array *btf_type_array(const struct btf_type *t)
494 {
495 return (const struct btf_array *)(t + 1);
496 }
497
498 static const struct btf_member *btf_type_member(const struct btf_type *t)
499 {
500 return (const struct btf_member *)(t + 1);
501 }
502
503 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
504 {
505 return (const struct btf_enum *)(t + 1);
506 }
507
508 static const struct btf_var *btf_type_var(const struct btf_type *t)
509 {
510 return (const struct btf_var *)(t + 1);
511 }
512
513 static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
514 {
515 return (const struct btf_var_secinfo *)(t + 1);
516 }
517
518 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
519 {
520 return kind_ops[BTF_INFO_KIND(t->info)];
521 }
522
523 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
524 {
525 return BTF_STR_OFFSET_VALID(offset) &&
526 offset < btf->hdr.str_len;
527 }
528
529 static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
530 {
531 if ((first ? !isalpha(c) :
532 !isalnum(c)) &&
533 c != '_' &&
534 ((c == '.' && !dot_ok) ||
535 c != '.'))
536 return false;
537 return true;
538 }
539
540 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
541 {
542 /* offset must be valid */
543 const char *src = &btf->strings[offset];
544 const char *src_limit;
545
546 if (!__btf_name_char_ok(*src, true, dot_ok))
547 return false;
548
549 /* set a limit on identifier length */
550 src_limit = src + KSYM_NAME_LEN;
551 src++;
552 while (*src && src < src_limit) {
553 if (!__btf_name_char_ok(*src, false, dot_ok))
554 return false;
555 src++;
556 }
557
558 return !*src;
559 }
560
561 /* Only C-style identifier is permitted. This can be relaxed if
562 * necessary.
563 */
564 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
565 {
566 return __btf_name_valid(btf, offset, false);
567 }
568
569 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
570 {
571 return __btf_name_valid(btf, offset, true);
572 }
573
574 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
575 {
576 if (!offset)
577 return "(anon)";
578 else if (offset < btf->hdr.str_len)
579 return &btf->strings[offset];
580 else
581 return "(invalid-name-offset)";
582 }
583
584 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
585 {
586 if (offset < btf->hdr.str_len)
587 return &btf->strings[offset];
588
589 return NULL;
590 }
591
592 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
593 {
594 if (type_id > btf->nr_types)
595 return NULL;
596
597 return btf->types[type_id];
598 }
599
600 /*
601 * Regular int is not a bit field and it must be either
602 * u8/u16/u32/u64 or __int128.
603 */
604 static bool btf_type_int_is_regular(const struct btf_type *t)
605 {
606 u8 nr_bits, nr_bytes;
607 u32 int_data;
608
609 int_data = btf_type_int(t);
610 nr_bits = BTF_INT_BITS(int_data);
611 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
612 if (BITS_PER_BYTE_MASKED(nr_bits) ||
613 BTF_INT_OFFSET(int_data) ||
614 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
615 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
616 nr_bytes != (2 * sizeof(u64)))) {
617 return false;
618 }
619
620 return true;
621 }
622
623 /*
624 * Check that given struct member is a regular int with expected
625 * offset and size.
626 */
627 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
628 const struct btf_member *m,
629 u32 expected_offset, u32 expected_size)
630 {
631 const struct btf_type *t;
632 u32 id, int_data;
633 u8 nr_bits;
634
635 id = m->type;
636 t = btf_type_id_size(btf, &id, NULL);
637 if (!t || !btf_type_is_int(t))
638 return false;
639
640 int_data = btf_type_int(t);
641 nr_bits = BTF_INT_BITS(int_data);
642 if (btf_type_kflag(s)) {
643 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
644 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
645
646 /* if kflag set, int should be a regular int and
647 * bit offset should be at byte boundary.
648 */
649 return !bitfield_size &&
650 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
651 BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
652 }
653
654 if (BTF_INT_OFFSET(int_data) ||
655 BITS_PER_BYTE_MASKED(m->offset) ||
656 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
657 BITS_PER_BYTE_MASKED(nr_bits) ||
658 BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
659 return false;
660
661 return true;
662 }
663
664 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
665 const char *fmt, ...)
666 {
667 va_list args;
668
669 va_start(args, fmt);
670 bpf_verifier_vlog(log, fmt, args);
671 va_end(args);
672 }
673
674 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
675 const char *fmt, ...)
676 {
677 struct bpf_verifier_log *log = &env->log;
678 va_list args;
679
680 if (!bpf_verifier_log_needed(log))
681 return;
682
683 va_start(args, fmt);
684 bpf_verifier_vlog(log, fmt, args);
685 va_end(args);
686 }
687
688 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
689 const struct btf_type *t,
690 bool log_details,
691 const char *fmt, ...)
692 {
693 struct bpf_verifier_log *log = &env->log;
694 u8 kind = BTF_INFO_KIND(t->info);
695 struct btf *btf = env->btf;
696 va_list args;
697
698 if (!bpf_verifier_log_needed(log))
699 return;
700
701 /* btf verifier prints all types it is processing via
702 * btf_verifier_log_type(..., fmt = NULL).
703 * Skip those prints for in-kernel BTF verification.
704 */
705 if (log->level == BPF_LOG_KERNEL && !fmt)
706 return;
707
708 __btf_verifier_log(log, "[%u] %s %s%s",
709 env->log_type_id,
710 btf_kind_str[kind],
711 __btf_name_by_offset(btf, t->name_off),
712 log_details ? " " : "");
713
714 if (log_details)
715 btf_type_ops(t)->log_details(env, t);
716
717 if (fmt && *fmt) {
718 __btf_verifier_log(log, " ");
719 va_start(args, fmt);
720 bpf_verifier_vlog(log, fmt, args);
721 va_end(args);
722 }
723
724 __btf_verifier_log(log, "\n");
725 }
726
727 #define btf_verifier_log_type(env, t, ...) \
728 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
729 #define btf_verifier_log_basic(env, t, ...) \
730 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
731
732 __printf(4, 5)
733 static void btf_verifier_log_member(struct btf_verifier_env *env,
734 const struct btf_type *struct_type,
735 const struct btf_member *member,
736 const char *fmt, ...)
737 {
738 struct bpf_verifier_log *log = &env->log;
739 struct btf *btf = env->btf;
740 va_list args;
741
742 if (!bpf_verifier_log_needed(log))
743 return;
744
745 if (log->level == BPF_LOG_KERNEL && !fmt)
746 return;
747 /* The CHECK_META phase already did a btf dump.
748 *
749 * If member is logged again, it must hit an error in
750 * parsing this member. It is useful to print out which
751 * struct this member belongs to.
752 */
753 if (env->phase != CHECK_META)
754 btf_verifier_log_type(env, struct_type, NULL);
755
756 if (btf_type_kflag(struct_type))
757 __btf_verifier_log(log,
758 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
759 __btf_name_by_offset(btf, member->name_off),
760 member->type,
761 BTF_MEMBER_BITFIELD_SIZE(member->offset),
762 BTF_MEMBER_BIT_OFFSET(member->offset));
763 else
764 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
765 __btf_name_by_offset(btf, member->name_off),
766 member->type, member->offset);
767
768 if (fmt && *fmt) {
769 __btf_verifier_log(log, " ");
770 va_start(args, fmt);
771 bpf_verifier_vlog(log, fmt, args);
772 va_end(args);
773 }
774
775 __btf_verifier_log(log, "\n");
776 }
777
778 __printf(4, 5)
779 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
780 const struct btf_type *datasec_type,
781 const struct btf_var_secinfo *vsi,
782 const char *fmt, ...)
783 {
784 struct bpf_verifier_log *log = &env->log;
785 va_list args;
786
787 if (!bpf_verifier_log_needed(log))
788 return;
789 if (log->level == BPF_LOG_KERNEL && !fmt)
790 return;
791 if (env->phase != CHECK_META)
792 btf_verifier_log_type(env, datasec_type, NULL);
793
794 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
795 vsi->type, vsi->offset, vsi->size);
796 if (fmt && *fmt) {
797 __btf_verifier_log(log, " ");
798 va_start(args, fmt);
799 bpf_verifier_vlog(log, fmt, args);
800 va_end(args);
801 }
802
803 __btf_verifier_log(log, "\n");
804 }
805
806 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
807 u32 btf_data_size)
808 {
809 struct bpf_verifier_log *log = &env->log;
810 const struct btf *btf = env->btf;
811 const struct btf_header *hdr;
812
813 if (!bpf_verifier_log_needed(log))
814 return;
815
816 if (log->level == BPF_LOG_KERNEL)
817 return;
818 hdr = &btf->hdr;
819 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
820 __btf_verifier_log(log, "version: %u\n", hdr->version);
821 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
822 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
823 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
824 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
825 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
826 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
827 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
828 }
829
830 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
831 {
832 struct btf *btf = env->btf;
833
834 /* < 2 because +1 for btf_void which is always in btf->types[0].
835 * btf_void is not accounted in btf->nr_types because btf_void
836 * does not come from the BTF file.
837 */
838 if (btf->types_size - btf->nr_types < 2) {
839 /* Expand 'types' array */
840
841 struct btf_type **new_types;
842 u32 expand_by, new_size;
843
844 if (btf->types_size == BTF_MAX_TYPE) {
845 btf_verifier_log(env, "Exceeded max num of types");
846 return -E2BIG;
847 }
848
849 expand_by = max_t(u32, btf->types_size >> 2, 16);
850 new_size = min_t(u32, BTF_MAX_TYPE,
851 btf->types_size + expand_by);
852
853 new_types = kvcalloc(new_size, sizeof(*new_types),
854 GFP_KERNEL | __GFP_NOWARN);
855 if (!new_types)
856 return -ENOMEM;
857
858 if (btf->nr_types == 0)
859 new_types[0] = &btf_void;
860 else
861 memcpy(new_types, btf->types,
862 sizeof(*btf->types) * (btf->nr_types + 1));
863
864 kvfree(btf->types);
865 btf->types = new_types;
866 btf->types_size = new_size;
867 }
868
869 btf->types[++(btf->nr_types)] = t;
870
871 return 0;
872 }
873
874 static int btf_alloc_id(struct btf *btf)
875 {
876 int id;
877
878 idr_preload(GFP_KERNEL);
879 spin_lock_bh(&btf_idr_lock);
880 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
881 if (id > 0)
882 btf->id = id;
883 spin_unlock_bh(&btf_idr_lock);
884 idr_preload_end();
885
886 if (WARN_ON_ONCE(!id))
887 return -ENOSPC;
888
889 return id > 0 ? 0 : id;
890 }
891
892 static void btf_free_id(struct btf *btf)
893 {
894 unsigned long flags;
895
896 /*
897 * In map-in-map, calling map_delete_elem() on outer
898 * map will call bpf_map_put on the inner map.
899 * It will then eventually call btf_free_id()
900 * on the inner map. Some of the map_delete_elem()
901 * implementation may have irq disabled, so
902 * we need to use the _irqsave() version instead
903 * of the _bh() version.
904 */
905 spin_lock_irqsave(&btf_idr_lock, flags);
906 idr_remove(&btf_idr, btf->id);
907 spin_unlock_irqrestore(&btf_idr_lock, flags);
908 }
909
910 static void btf_free(struct btf *btf)
911 {
912 kvfree(btf->types);
913 kvfree(btf->resolved_sizes);
914 kvfree(btf->resolved_ids);
915 kvfree(btf->data);
916 kfree(btf);
917 }
918
919 static void btf_free_rcu(struct rcu_head *rcu)
920 {
921 struct btf *btf = container_of(rcu, struct btf, rcu);
922
923 btf_free(btf);
924 }
925
926 void btf_put(struct btf *btf)
927 {
928 if (btf && refcount_dec_and_test(&btf->refcnt)) {
929 btf_free_id(btf);
930 call_rcu(&btf->rcu, btf_free_rcu);
931 }
932 }
933
934 static int env_resolve_init(struct btf_verifier_env *env)
935 {
936 struct btf *btf = env->btf;
937 u32 nr_types = btf->nr_types;
938 u32 *resolved_sizes = NULL;
939 u32 *resolved_ids = NULL;
940 u8 *visit_states = NULL;
941
942 /* +1 for btf_void */
943 resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
944 GFP_KERNEL | __GFP_NOWARN);
945 if (!resolved_sizes)
946 goto nomem;
947
948 resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
949 GFP_KERNEL | __GFP_NOWARN);
950 if (!resolved_ids)
951 goto nomem;
952
953 visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
954 GFP_KERNEL | __GFP_NOWARN);
955 if (!visit_states)
956 goto nomem;
957
958 btf->resolved_sizes = resolved_sizes;
959 btf->resolved_ids = resolved_ids;
960 env->visit_states = visit_states;
961
962 return 0;
963
964 nomem:
965 kvfree(resolved_sizes);
966 kvfree(resolved_ids);
967 kvfree(visit_states);
968 return -ENOMEM;
969 }
970
971 static void btf_verifier_env_free(struct btf_verifier_env *env)
972 {
973 kvfree(env->visit_states);
974 kfree(env);
975 }
976
977 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
978 const struct btf_type *next_type)
979 {
980 switch (env->resolve_mode) {
981 case RESOLVE_TBD:
982 /* int, enum or void is a sink */
983 return !btf_type_needs_resolve(next_type);
984 case RESOLVE_PTR:
985 /* int, enum, void, struct, array, func or func_proto is a sink
986 * for ptr
987 */
988 return !btf_type_is_modifier(next_type) &&
989 !btf_type_is_ptr(next_type);
990 case RESOLVE_STRUCT_OR_ARRAY:
991 /* int, enum, void, ptr, func or func_proto is a sink
992 * for struct and array
993 */
994 return !btf_type_is_modifier(next_type) &&
995 !btf_type_is_array(next_type) &&
996 !btf_type_is_struct(next_type);
997 default:
998 BUG();
999 }
1000 }
1001
1002 static bool env_type_is_resolved(const struct btf_verifier_env *env,
1003 u32 type_id)
1004 {
1005 return env->visit_states[type_id] == RESOLVED;
1006 }
1007
1008 static int env_stack_push(struct btf_verifier_env *env,
1009 const struct btf_type *t, u32 type_id)
1010 {
1011 struct resolve_vertex *v;
1012
1013 if (env->top_stack == MAX_RESOLVE_DEPTH)
1014 return -E2BIG;
1015
1016 if (env->visit_states[type_id] != NOT_VISITED)
1017 return -EEXIST;
1018
1019 env->visit_states[type_id] = VISITED;
1020
1021 v = &env->stack[env->top_stack++];
1022 v->t = t;
1023 v->type_id = type_id;
1024 v->next_member = 0;
1025
1026 if (env->resolve_mode == RESOLVE_TBD) {
1027 if (btf_type_is_ptr(t))
1028 env->resolve_mode = RESOLVE_PTR;
1029 else if (btf_type_is_struct(t) || btf_type_is_array(t))
1030 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1031 }
1032
1033 return 0;
1034 }
1035
1036 static void env_stack_set_next_member(struct btf_verifier_env *env,
1037 u16 next_member)
1038 {
1039 env->stack[env->top_stack - 1].next_member = next_member;
1040 }
1041
1042 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1043 u32 resolved_type_id,
1044 u32 resolved_size)
1045 {
1046 u32 type_id = env->stack[--(env->top_stack)].type_id;
1047 struct btf *btf = env->btf;
1048
1049 btf->resolved_sizes[type_id] = resolved_size;
1050 btf->resolved_ids[type_id] = resolved_type_id;
1051 env->visit_states[type_id] = RESOLVED;
1052 }
1053
1054 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1055 {
1056 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1057 }
1058
1059 /* The input param "type_id" must point to a needs_resolve type */
1060 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1061 u32 *type_id)
1062 {
1063 *type_id = btf->resolved_ids[*type_id];
1064 return btf_type_by_id(btf, *type_id);
1065 }
1066
1067 const struct btf_type *btf_type_id_size(const struct btf *btf,
1068 u32 *type_id, u32 *ret_size)
1069 {
1070 const struct btf_type *size_type;
1071 u32 size_type_id = *type_id;
1072 u32 size = 0;
1073
1074 size_type = btf_type_by_id(btf, size_type_id);
1075 if (btf_type_nosize_or_null(size_type))
1076 return NULL;
1077
1078 if (btf_type_has_size(size_type)) {
1079 size = size_type->size;
1080 } else if (btf_type_is_array(size_type)) {
1081 size = btf->resolved_sizes[size_type_id];
1082 } else if (btf_type_is_ptr(size_type)) {
1083 size = sizeof(void *);
1084 } else {
1085 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1086 !btf_type_is_var(size_type)))
1087 return NULL;
1088
1089 size_type_id = btf->resolved_ids[size_type_id];
1090 size_type = btf_type_by_id(btf, size_type_id);
1091 if (btf_type_nosize_or_null(size_type))
1092 return NULL;
1093 else if (btf_type_has_size(size_type))
1094 size = size_type->size;
1095 else if (btf_type_is_array(size_type))
1096 size = btf->resolved_sizes[size_type_id];
1097 else if (btf_type_is_ptr(size_type))
1098 size = sizeof(void *);
1099 else
1100 return NULL;
1101 }
1102
1103 *type_id = size_type_id;
1104 if (ret_size)
1105 *ret_size = size;
1106
1107 return size_type;
1108 }
1109
1110 static int btf_df_check_member(struct btf_verifier_env *env,
1111 const struct btf_type *struct_type,
1112 const struct btf_member *member,
1113 const struct btf_type *member_type)
1114 {
1115 btf_verifier_log_basic(env, struct_type,
1116 "Unsupported check_member");
1117 return -EINVAL;
1118 }
1119
1120 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1121 const struct btf_type *struct_type,
1122 const struct btf_member *member,
1123 const struct btf_type *member_type)
1124 {
1125 btf_verifier_log_basic(env, struct_type,
1126 "Unsupported check_kflag_member");
1127 return -EINVAL;
1128 }
1129
1130 /* Used for ptr, array and struct/union type members.
1131 * int, enum and modifier types have their specific callback functions.
1132 */
1133 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1134 const struct btf_type *struct_type,
1135 const struct btf_member *member,
1136 const struct btf_type *member_type)
1137 {
1138 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1139 btf_verifier_log_member(env, struct_type, member,
1140 "Invalid member bitfield_size");
1141 return -EINVAL;
1142 }
1143
1144 /* bitfield size is 0, so member->offset represents bit offset only.
1145 * It is safe to call non kflag check_member variants.
1146 */
1147 return btf_type_ops(member_type)->check_member(env, struct_type,
1148 member,
1149 member_type);
1150 }
1151
1152 static int btf_df_resolve(struct btf_verifier_env *env,
1153 const struct resolve_vertex *v)
1154 {
1155 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1156 return -EINVAL;
1157 }
1158
1159 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
1160 u32 type_id, void *data, u8 bits_offsets,
1161 struct seq_file *m)
1162 {
1163 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1164 }
1165
1166 static int btf_int_check_member(struct btf_verifier_env *env,
1167 const struct btf_type *struct_type,
1168 const struct btf_member *member,
1169 const struct btf_type *member_type)
1170 {
1171 u32 int_data = btf_type_int(member_type);
1172 u32 struct_bits_off = member->offset;
1173 u32 struct_size = struct_type->size;
1174 u32 nr_copy_bits;
1175 u32 bytes_offset;
1176
1177 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1178 btf_verifier_log_member(env, struct_type, member,
1179 "bits_offset exceeds U32_MAX");
1180 return -EINVAL;
1181 }
1182
1183 struct_bits_off += BTF_INT_OFFSET(int_data);
1184 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1185 nr_copy_bits = BTF_INT_BITS(int_data) +
1186 BITS_PER_BYTE_MASKED(struct_bits_off);
1187
1188 if (nr_copy_bits > BITS_PER_U128) {
1189 btf_verifier_log_member(env, struct_type, member,
1190 "nr_copy_bits exceeds 128");
1191 return -EINVAL;
1192 }
1193
1194 if (struct_size < bytes_offset ||
1195 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1196 btf_verifier_log_member(env, struct_type, member,
1197 "Member exceeds struct_size");
1198 return -EINVAL;
1199 }
1200
1201 return 0;
1202 }
1203
1204 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1205 const struct btf_type *struct_type,
1206 const struct btf_member *member,
1207 const struct btf_type *member_type)
1208 {
1209 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1210 u32 int_data = btf_type_int(member_type);
1211 u32 struct_size = struct_type->size;
1212 u32 nr_copy_bits;
1213
1214 /* a regular int type is required for the kflag int member */
1215 if (!btf_type_int_is_regular(member_type)) {
1216 btf_verifier_log_member(env, struct_type, member,
1217 "Invalid member base type");
1218 return -EINVAL;
1219 }
1220
1221 /* check sanity of bitfield size */
1222 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1223 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1224 nr_int_data_bits = BTF_INT_BITS(int_data);
1225 if (!nr_bits) {
1226 /* Not a bitfield member, member offset must be at byte
1227 * boundary.
1228 */
1229 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1230 btf_verifier_log_member(env, struct_type, member,
1231 "Invalid member offset");
1232 return -EINVAL;
1233 }
1234
1235 nr_bits = nr_int_data_bits;
1236 } else if (nr_bits > nr_int_data_bits) {
1237 btf_verifier_log_member(env, struct_type, member,
1238 "Invalid member bitfield_size");
1239 return -EINVAL;
1240 }
1241
1242 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1243 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
1244 if (nr_copy_bits > BITS_PER_U128) {
1245 btf_verifier_log_member(env, struct_type, member,
1246 "nr_copy_bits exceeds 128");
1247 return -EINVAL;
1248 }
1249
1250 if (struct_size < bytes_offset ||
1251 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1252 btf_verifier_log_member(env, struct_type, member,
1253 "Member exceeds struct_size");
1254 return -EINVAL;
1255 }
1256
1257 return 0;
1258 }
1259
1260 static s32 btf_int_check_meta(struct btf_verifier_env *env,
1261 const struct btf_type *t,
1262 u32 meta_left)
1263 {
1264 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
1265 u16 encoding;
1266
1267 if (meta_left < meta_needed) {
1268 btf_verifier_log_basic(env, t,
1269 "meta_left:%u meta_needed:%u",
1270 meta_left, meta_needed);
1271 return -EINVAL;
1272 }
1273
1274 if (btf_type_vlen(t)) {
1275 btf_verifier_log_type(env, t, "vlen != 0");
1276 return -EINVAL;
1277 }
1278
1279 if (btf_type_kflag(t)) {
1280 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1281 return -EINVAL;
1282 }
1283
1284 int_data = btf_type_int(t);
1285 if (int_data & ~BTF_INT_MASK) {
1286 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
1287 int_data);
1288 return -EINVAL;
1289 }
1290
1291 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
1292
1293 if (nr_bits > BITS_PER_U128) {
1294 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
1295 BITS_PER_U128);
1296 return -EINVAL;
1297 }
1298
1299 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1300 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1301 return -EINVAL;
1302 }
1303
1304 /*
1305 * Only one of the encoding bits is allowed and it
1306 * should be sufficient for the pretty print purpose (i.e. decoding).
1307 * Multiple bits can be allowed later if it is found
1308 * to be insufficient.
1309 */
1310 encoding = BTF_INT_ENCODING(int_data);
1311 if (encoding &&
1312 encoding != BTF_INT_SIGNED &&
1313 encoding != BTF_INT_CHAR &&
1314 encoding != BTF_INT_BOOL) {
1315 btf_verifier_log_type(env, t, "Unsupported encoding");
1316 return -ENOTSUPP;
1317 }
1318
1319 btf_verifier_log_type(env, t, NULL);
1320
1321 return meta_needed;
1322 }
1323
1324 static void btf_int_log(struct btf_verifier_env *env,
1325 const struct btf_type *t)
1326 {
1327 int int_data = btf_type_int(t);
1328
1329 btf_verifier_log(env,
1330 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1331 t->size, BTF_INT_OFFSET(int_data),
1332 BTF_INT_BITS(int_data),
1333 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1334 }
1335
1336 static void btf_int128_print(struct seq_file *m, void *data)
1337 {
1338 /* data points to a __int128 number.
1339 * Suppose
1340 * int128_num = *(__int128 *)data;
1341 * The below formulas shows what upper_num and lower_num represents:
1342 * upper_num = int128_num >> 64;
1343 * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
1344 */
1345 u64 upper_num, lower_num;
1346
1347 #ifdef __BIG_ENDIAN_BITFIELD
1348 upper_num = *(u64 *)data;
1349 lower_num = *(u64 *)(data + 8);
1350 #else
1351 upper_num = *(u64 *)(data + 8);
1352 lower_num = *(u64 *)data;
1353 #endif
1354 if (upper_num == 0)
1355 seq_printf(m, "0x%llx", lower_num);
1356 else
1357 seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
1358 }
1359
1360 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
1361 u16 right_shift_bits)
1362 {
1363 u64 upper_num, lower_num;
1364
1365 #ifdef __BIG_ENDIAN_BITFIELD
1366 upper_num = print_num[0];
1367 lower_num = print_num[1];
1368 #else
1369 upper_num = print_num[1];
1370 lower_num = print_num[0];
1371 #endif
1372
1373 /* shake out un-needed bits by shift/or operations */
1374 if (left_shift_bits >= 64) {
1375 upper_num = lower_num << (left_shift_bits - 64);
1376 lower_num = 0;
1377 } else {
1378 upper_num = (upper_num << left_shift_bits) |
1379 (lower_num >> (64 - left_shift_bits));
1380 lower_num = lower_num << left_shift_bits;
1381 }
1382
1383 if (right_shift_bits >= 64) {
1384 lower_num = upper_num >> (right_shift_bits - 64);
1385 upper_num = 0;
1386 } else {
1387 lower_num = (lower_num >> right_shift_bits) |
1388 (upper_num << (64 - right_shift_bits));
1389 upper_num = upper_num >> right_shift_bits;
1390 }
1391
1392 #ifdef __BIG_ENDIAN_BITFIELD
1393 print_num[0] = upper_num;
1394 print_num[1] = lower_num;
1395 #else
1396 print_num[0] = lower_num;
1397 print_num[1] = upper_num;
1398 #endif
1399 }
1400
1401 static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1402 u8 nr_bits, struct seq_file *m)
1403 {
1404 u16 left_shift_bits, right_shift_bits;
1405 u8 nr_copy_bytes;
1406 u8 nr_copy_bits;
1407 u64 print_num[2] = {};
1408
1409 nr_copy_bits = nr_bits + bits_offset;
1410 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1411
1412 memcpy(print_num, data, nr_copy_bytes);
1413
1414 #ifdef __BIG_ENDIAN_BITFIELD
1415 left_shift_bits = bits_offset;
1416 #else
1417 left_shift_bits = BITS_PER_U128 - nr_copy_bits;
1418 #endif
1419 right_shift_bits = BITS_PER_U128 - nr_bits;
1420
1421 btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
1422 btf_int128_print(m, print_num);
1423 }
1424
1425
1426 static void btf_int_bits_seq_show(const struct btf *btf,
1427 const struct btf_type *t,
1428 void *data, u8 bits_offset,
1429 struct seq_file *m)
1430 {
1431 u32 int_data = btf_type_int(t);
1432 u8 nr_bits = BTF_INT_BITS(int_data);
1433 u8 total_bits_offset;
1434
1435 /*
1436 * bits_offset is at most 7.
1437 * BTF_INT_OFFSET() cannot exceed 128 bits.
1438 */
1439 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1440 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1441 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1442 btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
1443 }
1444
1445 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1446 u32 type_id, void *data, u8 bits_offset,
1447 struct seq_file *m)
1448 {
1449 u32 int_data = btf_type_int(t);
1450 u8 encoding = BTF_INT_ENCODING(int_data);
1451 bool sign = encoding & BTF_INT_SIGNED;
1452 u8 nr_bits = BTF_INT_BITS(int_data);
1453
1454 if (bits_offset || BTF_INT_OFFSET(int_data) ||
1455 BITS_PER_BYTE_MASKED(nr_bits)) {
1456 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1457 return;
1458 }
1459
1460 switch (nr_bits) {
1461 case 128:
1462 btf_int128_print(m, data);
1463 break;
1464 case 64:
1465 if (sign)
1466 seq_printf(m, "%lld", *(s64 *)data);
1467 else
1468 seq_printf(m, "%llu", *(u64 *)data);
1469 break;
1470 case 32:
1471 if (sign)
1472 seq_printf(m, "%d", *(s32 *)data);
1473 else
1474 seq_printf(m, "%u", *(u32 *)data);
1475 break;
1476 case 16:
1477 if (sign)
1478 seq_printf(m, "%d", *(s16 *)data);
1479 else
1480 seq_printf(m, "%u", *(u16 *)data);
1481 break;
1482 case 8:
1483 if (sign)
1484 seq_printf(m, "%d", *(s8 *)data);
1485 else
1486 seq_printf(m, "%u", *(u8 *)data);
1487 break;
1488 default:
1489 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1490 }
1491 }
1492
1493 static const struct btf_kind_operations int_ops = {
1494 .check_meta = btf_int_check_meta,
1495 .resolve = btf_df_resolve,
1496 .check_member = btf_int_check_member,
1497 .check_kflag_member = btf_int_check_kflag_member,
1498 .log_details = btf_int_log,
1499 .seq_show = btf_int_seq_show,
1500 };
1501
1502 static int btf_modifier_check_member(struct btf_verifier_env *env,
1503 const struct btf_type *struct_type,
1504 const struct btf_member *member,
1505 const struct btf_type *member_type)
1506 {
1507 const struct btf_type *resolved_type;
1508 u32 resolved_type_id = member->type;
1509 struct btf_member resolved_member;
1510 struct btf *btf = env->btf;
1511
1512 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1513 if (!resolved_type) {
1514 btf_verifier_log_member(env, struct_type, member,
1515 "Invalid member");
1516 return -EINVAL;
1517 }
1518
1519 resolved_member = *member;
1520 resolved_member.type = resolved_type_id;
1521
1522 return btf_type_ops(resolved_type)->check_member(env, struct_type,
1523 &resolved_member,
1524 resolved_type);
1525 }
1526
1527 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
1528 const struct btf_type *struct_type,
1529 const struct btf_member *member,
1530 const struct btf_type *member_type)
1531 {
1532 const struct btf_type *resolved_type;
1533 u32 resolved_type_id = member->type;
1534 struct btf_member resolved_member;
1535 struct btf *btf = env->btf;
1536
1537 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1538 if (!resolved_type) {
1539 btf_verifier_log_member(env, struct_type, member,
1540 "Invalid member");
1541 return -EINVAL;
1542 }
1543
1544 resolved_member = *member;
1545 resolved_member.type = resolved_type_id;
1546
1547 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
1548 &resolved_member,
1549 resolved_type);
1550 }
1551
1552 static int btf_ptr_check_member(struct btf_verifier_env *env,
1553 const struct btf_type *struct_type,
1554 const struct btf_member *member,
1555 const struct btf_type *member_type)
1556 {
1557 u32 struct_size, struct_bits_off, bytes_offset;
1558
1559 struct_size = struct_type->size;
1560 struct_bits_off = member->offset;
1561 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1562
1563 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1564 btf_verifier_log_member(env, struct_type, member,
1565 "Member is not byte aligned");
1566 return -EINVAL;
1567 }
1568
1569 if (struct_size - bytes_offset < sizeof(void *)) {
1570 btf_verifier_log_member(env, struct_type, member,
1571 "Member exceeds struct_size");
1572 return -EINVAL;
1573 }
1574
1575 return 0;
1576 }
1577
1578 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1579 const struct btf_type *t,
1580 u32 meta_left)
1581 {
1582 if (btf_type_vlen(t)) {
1583 btf_verifier_log_type(env, t, "vlen != 0");
1584 return -EINVAL;
1585 }
1586
1587 if (btf_type_kflag(t)) {
1588 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1589 return -EINVAL;
1590 }
1591
1592 if (!BTF_TYPE_ID_VALID(t->type)) {
1593 btf_verifier_log_type(env, t, "Invalid type_id");
1594 return -EINVAL;
1595 }
1596
1597 /* typedef type must have a valid name, and other ref types,
1598 * volatile, const, restrict, should have a null name.
1599 */
1600 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
1601 if (!t->name_off ||
1602 !btf_name_valid_identifier(env->btf, t->name_off)) {
1603 btf_verifier_log_type(env, t, "Invalid name");
1604 return -EINVAL;
1605 }
1606 } else {
1607 if (t->name_off) {
1608 btf_verifier_log_type(env, t, "Invalid name");
1609 return -EINVAL;
1610 }
1611 }
1612
1613 btf_verifier_log_type(env, t, NULL);
1614
1615 return 0;
1616 }
1617
1618 static int btf_modifier_resolve(struct btf_verifier_env *env,
1619 const struct resolve_vertex *v)
1620 {
1621 const struct btf_type *t = v->t;
1622 const struct btf_type *next_type;
1623 u32 next_type_id = t->type;
1624 struct btf *btf = env->btf;
1625
1626 next_type = btf_type_by_id(btf, next_type_id);
1627 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1628 btf_verifier_log_type(env, v->t, "Invalid type_id");
1629 return -EINVAL;
1630 }
1631
1632 if (!env_type_is_resolve_sink(env, next_type) &&
1633 !env_type_is_resolved(env, next_type_id))
1634 return env_stack_push(env, next_type, next_type_id);
1635
1636 /* Figure out the resolved next_type_id with size.
1637 * They will be stored in the current modifier's
1638 * resolved_ids and resolved_sizes such that it can
1639 * save us a few type-following when we use it later (e.g. in
1640 * pretty print).
1641 */
1642 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1643 if (env_type_is_resolved(env, next_type_id))
1644 next_type = btf_type_id_resolve(btf, &next_type_id);
1645
1646 /* "typedef void new_void", "const void"...etc */
1647 if (!btf_type_is_void(next_type) &&
1648 !btf_type_is_fwd(next_type) &&
1649 !btf_type_is_func_proto(next_type)) {
1650 btf_verifier_log_type(env, v->t, "Invalid type_id");
1651 return -EINVAL;
1652 }
1653 }
1654
1655 env_stack_pop_resolved(env, next_type_id, 0);
1656
1657 return 0;
1658 }
1659
1660 static int btf_var_resolve(struct btf_verifier_env *env,
1661 const struct resolve_vertex *v)
1662 {
1663 const struct btf_type *next_type;
1664 const struct btf_type *t = v->t;
1665 u32 next_type_id = t->type;
1666 struct btf *btf = env->btf;
1667
1668 next_type = btf_type_by_id(btf, next_type_id);
1669 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1670 btf_verifier_log_type(env, v->t, "Invalid type_id");
1671 return -EINVAL;
1672 }
1673
1674 if (!env_type_is_resolve_sink(env, next_type) &&
1675 !env_type_is_resolved(env, next_type_id))
1676 return env_stack_push(env, next_type, next_type_id);
1677
1678 if (btf_type_is_modifier(next_type)) {
1679 const struct btf_type *resolved_type;
1680 u32 resolved_type_id;
1681
1682 resolved_type_id = next_type_id;
1683 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1684
1685 if (btf_type_is_ptr(resolved_type) &&
1686 !env_type_is_resolve_sink(env, resolved_type) &&
1687 !env_type_is_resolved(env, resolved_type_id))
1688 return env_stack_push(env, resolved_type,
1689 resolved_type_id);
1690 }
1691
1692 /* We must resolve to something concrete at this point, no
1693 * forward types or similar that would resolve to size of
1694 * zero is allowed.
1695 */
1696 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1697 btf_verifier_log_type(env, v->t, "Invalid type_id");
1698 return -EINVAL;
1699 }
1700
1701 env_stack_pop_resolved(env, next_type_id, 0);
1702
1703 return 0;
1704 }
1705
1706 static int btf_ptr_resolve(struct btf_verifier_env *env,
1707 const struct resolve_vertex *v)
1708 {
1709 const struct btf_type *next_type;
1710 const struct btf_type *t = v->t;
1711 u32 next_type_id = t->type;
1712 struct btf *btf = env->btf;
1713
1714 next_type = btf_type_by_id(btf, next_type_id);
1715 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1716 btf_verifier_log_type(env, v->t, "Invalid type_id");
1717 return -EINVAL;
1718 }
1719
1720 if (!env_type_is_resolve_sink(env, next_type) &&
1721 !env_type_is_resolved(env, next_type_id))
1722 return env_stack_push(env, next_type, next_type_id);
1723
1724 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1725 * the modifier may have stopped resolving when it was resolved
1726 * to a ptr (last-resolved-ptr).
1727 *
1728 * We now need to continue from the last-resolved-ptr to
1729 * ensure the last-resolved-ptr will not referring back to
1730 * the currenct ptr (t).
1731 */
1732 if (btf_type_is_modifier(next_type)) {
1733 const struct btf_type *resolved_type;
1734 u32 resolved_type_id;
1735
1736 resolved_type_id = next_type_id;
1737 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1738
1739 if (btf_type_is_ptr(resolved_type) &&
1740 !env_type_is_resolve_sink(env, resolved_type) &&
1741 !env_type_is_resolved(env, resolved_type_id))
1742 return env_stack_push(env, resolved_type,
1743 resolved_type_id);
1744 }
1745
1746 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1747 if (env_type_is_resolved(env, next_type_id))
1748 next_type = btf_type_id_resolve(btf, &next_type_id);
1749
1750 if (!btf_type_is_void(next_type) &&
1751 !btf_type_is_fwd(next_type) &&
1752 !btf_type_is_func_proto(next_type)) {
1753 btf_verifier_log_type(env, v->t, "Invalid type_id");
1754 return -EINVAL;
1755 }
1756 }
1757
1758 env_stack_pop_resolved(env, next_type_id, 0);
1759
1760 return 0;
1761 }
1762
1763 static void btf_modifier_seq_show(const struct btf *btf,
1764 const struct btf_type *t,
1765 u32 type_id, void *data,
1766 u8 bits_offset, struct seq_file *m)
1767 {
1768 t = btf_type_id_resolve(btf, &type_id);
1769
1770 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1771 }
1772
1773 static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
1774 u32 type_id, void *data, u8 bits_offset,
1775 struct seq_file *m)
1776 {
1777 t = btf_type_id_resolve(btf, &type_id);
1778
1779 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1780 }
1781
1782 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1783 u32 type_id, void *data, u8 bits_offset,
1784 struct seq_file *m)
1785 {
1786 /* It is a hashed value */
1787 seq_printf(m, "%p", *(void **)data);
1788 }
1789
1790 static void btf_ref_type_log(struct btf_verifier_env *env,
1791 const struct btf_type *t)
1792 {
1793 btf_verifier_log(env, "type_id=%u", t->type);
1794 }
1795
1796 static struct btf_kind_operations modifier_ops = {
1797 .check_meta = btf_ref_type_check_meta,
1798 .resolve = btf_modifier_resolve,
1799 .check_member = btf_modifier_check_member,
1800 .check_kflag_member = btf_modifier_check_kflag_member,
1801 .log_details = btf_ref_type_log,
1802 .seq_show = btf_modifier_seq_show,
1803 };
1804
1805 static struct btf_kind_operations ptr_ops = {
1806 .check_meta = btf_ref_type_check_meta,
1807 .resolve = btf_ptr_resolve,
1808 .check_member = btf_ptr_check_member,
1809 .check_kflag_member = btf_generic_check_kflag_member,
1810 .log_details = btf_ref_type_log,
1811 .seq_show = btf_ptr_seq_show,
1812 };
1813
1814 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1815 const struct btf_type *t,
1816 u32 meta_left)
1817 {
1818 if (btf_type_vlen(t)) {
1819 btf_verifier_log_type(env, t, "vlen != 0");
1820 return -EINVAL;
1821 }
1822
1823 if (t->type) {
1824 btf_verifier_log_type(env, t, "type != 0");
1825 return -EINVAL;
1826 }
1827
1828 /* fwd type must have a valid name */
1829 if (!t->name_off ||
1830 !btf_name_valid_identifier(env->btf, t->name_off)) {
1831 btf_verifier_log_type(env, t, "Invalid name");
1832 return -EINVAL;
1833 }
1834
1835 btf_verifier_log_type(env, t, NULL);
1836
1837 return 0;
1838 }
1839
1840 static void btf_fwd_type_log(struct btf_verifier_env *env,
1841 const struct btf_type *t)
1842 {
1843 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
1844 }
1845
1846 static struct btf_kind_operations fwd_ops = {
1847 .check_meta = btf_fwd_check_meta,
1848 .resolve = btf_df_resolve,
1849 .check_member = btf_df_check_member,
1850 .check_kflag_member = btf_df_check_kflag_member,
1851 .log_details = btf_fwd_type_log,
1852 .seq_show = btf_df_seq_show,
1853 };
1854
1855 static int btf_array_check_member(struct btf_verifier_env *env,
1856 const struct btf_type *struct_type,
1857 const struct btf_member *member,
1858 const struct btf_type *member_type)
1859 {
1860 u32 struct_bits_off = member->offset;
1861 u32 struct_size, bytes_offset;
1862 u32 array_type_id, array_size;
1863 struct btf *btf = env->btf;
1864
1865 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1866 btf_verifier_log_member(env, struct_type, member,
1867 "Member is not byte aligned");
1868 return -EINVAL;
1869 }
1870
1871 array_type_id = member->type;
1872 btf_type_id_size(btf, &array_type_id, &array_size);
1873 struct_size = struct_type->size;
1874 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1875 if (struct_size - bytes_offset < array_size) {
1876 btf_verifier_log_member(env, struct_type, member,
1877 "Member exceeds struct_size");
1878 return -EINVAL;
1879 }
1880
1881 return 0;
1882 }
1883
1884 static s32 btf_array_check_meta(struct btf_verifier_env *env,
1885 const struct btf_type *t,
1886 u32 meta_left)
1887 {
1888 const struct btf_array *array = btf_type_array(t);
1889 u32 meta_needed = sizeof(*array);
1890
1891 if (meta_left < meta_needed) {
1892 btf_verifier_log_basic(env, t,
1893 "meta_left:%u meta_needed:%u",
1894 meta_left, meta_needed);
1895 return -EINVAL;
1896 }
1897
1898 /* array type should not have a name */
1899 if (t->name_off) {
1900 btf_verifier_log_type(env, t, "Invalid name");
1901 return -EINVAL;
1902 }
1903
1904 if (btf_type_vlen(t)) {
1905 btf_verifier_log_type(env, t, "vlen != 0");
1906 return -EINVAL;
1907 }
1908
1909 if (btf_type_kflag(t)) {
1910 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1911 return -EINVAL;
1912 }
1913
1914 if (t->size) {
1915 btf_verifier_log_type(env, t, "size != 0");
1916 return -EINVAL;
1917 }
1918
1919 /* Array elem type and index type cannot be in type void,
1920 * so !array->type and !array->index_type are not allowed.
1921 */
1922 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
1923 btf_verifier_log_type(env, t, "Invalid elem");
1924 return -EINVAL;
1925 }
1926
1927 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
1928 btf_verifier_log_type(env, t, "Invalid index");
1929 return -EINVAL;
1930 }
1931
1932 btf_verifier_log_type(env, t, NULL);
1933
1934 return meta_needed;
1935 }
1936
1937 static int btf_array_resolve(struct btf_verifier_env *env,
1938 const struct resolve_vertex *v)
1939 {
1940 const struct btf_array *array = btf_type_array(v->t);
1941 const struct btf_type *elem_type, *index_type;
1942 u32 elem_type_id, index_type_id;
1943 struct btf *btf = env->btf;
1944 u32 elem_size;
1945
1946 /* Check array->index_type */
1947 index_type_id = array->index_type;
1948 index_type = btf_type_by_id(btf, index_type_id);
1949 if (btf_type_nosize_or_null(index_type) ||
1950 btf_type_is_resolve_source_only(index_type)) {
1951 btf_verifier_log_type(env, v->t, "Invalid index");
1952 return -EINVAL;
1953 }
1954
1955 if (!env_type_is_resolve_sink(env, index_type) &&
1956 !env_type_is_resolved(env, index_type_id))
1957 return env_stack_push(env, index_type, index_type_id);
1958
1959 index_type = btf_type_id_size(btf, &index_type_id, NULL);
1960 if (!index_type || !btf_type_is_int(index_type) ||
1961 !btf_type_int_is_regular(index_type)) {
1962 btf_verifier_log_type(env, v->t, "Invalid index");
1963 return -EINVAL;
1964 }
1965
1966 /* Check array->type */
1967 elem_type_id = array->type;
1968 elem_type = btf_type_by_id(btf, elem_type_id);
1969 if (btf_type_nosize_or_null(elem_type) ||
1970 btf_type_is_resolve_source_only(elem_type)) {
1971 btf_verifier_log_type(env, v->t,
1972 "Invalid elem");
1973 return -EINVAL;
1974 }
1975
1976 if (!env_type_is_resolve_sink(env, elem_type) &&
1977 !env_type_is_resolved(env, elem_type_id))
1978 return env_stack_push(env, elem_type, elem_type_id);
1979
1980 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1981 if (!elem_type) {
1982 btf_verifier_log_type(env, v->t, "Invalid elem");
1983 return -EINVAL;
1984 }
1985
1986 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
1987 btf_verifier_log_type(env, v->t, "Invalid array of int");
1988 return -EINVAL;
1989 }
1990
1991 if (array->nelems && elem_size > U32_MAX / array->nelems) {
1992 btf_verifier_log_type(env, v->t,
1993 "Array size overflows U32_MAX");
1994 return -EINVAL;
1995 }
1996
1997 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
1998
1999 return 0;
2000 }
2001
2002 static void btf_array_log(struct btf_verifier_env *env,
2003 const struct btf_type *t)
2004 {
2005 const struct btf_array *array = btf_type_array(t);
2006
2007 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
2008 array->type, array->index_type, array->nelems);
2009 }
2010
2011 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
2012 u32 type_id, void *data, u8 bits_offset,
2013 struct seq_file *m)
2014 {
2015 const struct btf_array *array = btf_type_array(t);
2016 const struct btf_kind_operations *elem_ops;
2017 const struct btf_type *elem_type;
2018 u32 i, elem_size, elem_type_id;
2019
2020 elem_type_id = array->type;
2021 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2022 elem_ops = btf_type_ops(elem_type);
2023 seq_puts(m, "[");
2024 for (i = 0; i < array->nelems; i++) {
2025 if (i)
2026 seq_puts(m, ",");
2027
2028 elem_ops->seq_show(btf, elem_type, elem_type_id, data,
2029 bits_offset, m);
2030 data += elem_size;
2031 }
2032 seq_puts(m, "]");
2033 }
2034
2035 static struct btf_kind_operations array_ops = {
2036 .check_meta = btf_array_check_meta,
2037 .resolve = btf_array_resolve,
2038 .check_member = btf_array_check_member,
2039 .check_kflag_member = btf_generic_check_kflag_member,
2040 .log_details = btf_array_log,
2041 .seq_show = btf_array_seq_show,
2042 };
2043
2044 static int btf_struct_check_member(struct btf_verifier_env *env,
2045 const struct btf_type *struct_type,
2046 const struct btf_member *member,
2047 const struct btf_type *member_type)
2048 {
2049 u32 struct_bits_off = member->offset;
2050 u32 struct_size, bytes_offset;
2051
2052 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2053 btf_verifier_log_member(env, struct_type, member,
2054 "Member is not byte aligned");
2055 return -EINVAL;
2056 }
2057
2058 struct_size = struct_type->size;
2059 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2060 if (struct_size - bytes_offset < member_type->size) {
2061 btf_verifier_log_member(env, struct_type, member,
2062 "Member exceeds struct_size");
2063 return -EINVAL;
2064 }
2065
2066 return 0;
2067 }
2068
2069 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
2070 const struct btf_type *t,
2071 u32 meta_left)
2072 {
2073 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
2074 const struct btf_member *member;
2075 u32 meta_needed, last_offset;
2076 struct btf *btf = env->btf;
2077 u32 struct_size = t->size;
2078 u32 offset;
2079 u16 i;
2080
2081 meta_needed = btf_type_vlen(t) * sizeof(*member);
2082 if (meta_left < meta_needed) {
2083 btf_verifier_log_basic(env, t,
2084 "meta_left:%u meta_needed:%u",
2085 meta_left, meta_needed);
2086 return -EINVAL;
2087 }
2088
2089 /* struct type either no name or a valid one */
2090 if (t->name_off &&
2091 !btf_name_valid_identifier(env->btf, t->name_off)) {
2092 btf_verifier_log_type(env, t, "Invalid name");
2093 return -EINVAL;
2094 }
2095
2096 btf_verifier_log_type(env, t, NULL);
2097
2098 last_offset = 0;
2099 for_each_member(i, t, member) {
2100 if (!btf_name_offset_valid(btf, member->name_off)) {
2101 btf_verifier_log_member(env, t, member,
2102 "Invalid member name_offset:%u",
2103 member->name_off);
2104 return -EINVAL;
2105 }
2106
2107 /* struct member either no name or a valid one */
2108 if (member->name_off &&
2109 !btf_name_valid_identifier(btf, member->name_off)) {
2110 btf_verifier_log_member(env, t, member, "Invalid name");
2111 return -EINVAL;
2112 }
2113 /* A member cannot be in type void */
2114 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
2115 btf_verifier_log_member(env, t, member,
2116 "Invalid type_id");
2117 return -EINVAL;
2118 }
2119
2120 offset = btf_member_bit_offset(t, member);
2121 if (is_union && offset) {
2122 btf_verifier_log_member(env, t, member,
2123 "Invalid member bits_offset");
2124 return -EINVAL;
2125 }
2126
2127 /*
2128 * ">" instead of ">=" because the last member could be
2129 * "char a[0];"
2130 */
2131 if (last_offset > offset) {
2132 btf_verifier_log_member(env, t, member,
2133 "Invalid member bits_offset");
2134 return -EINVAL;
2135 }
2136
2137 if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
2138 btf_verifier_log_member(env, t, member,
2139 "Member bits_offset exceeds its struct size");
2140 return -EINVAL;
2141 }
2142
2143 btf_verifier_log_member(env, t, member, NULL);
2144 last_offset = offset;
2145 }
2146
2147 return meta_needed;
2148 }
2149
2150 static int btf_struct_resolve(struct btf_verifier_env *env,
2151 const struct resolve_vertex *v)
2152 {
2153 const struct btf_member *member;
2154 int err;
2155 u16 i;
2156
2157 /* Before continue resolving the next_member,
2158 * ensure the last member is indeed resolved to a
2159 * type with size info.
2160 */
2161 if (v->next_member) {
2162 const struct btf_type *last_member_type;
2163 const struct btf_member *last_member;
2164 u16 last_member_type_id;
2165
2166 last_member = btf_type_member(v->t) + v->next_member - 1;
2167 last_member_type_id = last_member->type;
2168 if (WARN_ON_ONCE(!env_type_is_resolved(env,
2169 last_member_type_id)))
2170 return -EINVAL;
2171
2172 last_member_type = btf_type_by_id(env->btf,
2173 last_member_type_id);
2174 if (btf_type_kflag(v->t))
2175 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
2176 last_member,
2177 last_member_type);
2178 else
2179 err = btf_type_ops(last_member_type)->check_member(env, v->t,
2180 last_member,
2181 last_member_type);
2182 if (err)
2183 return err;
2184 }
2185
2186 for_each_member_from(i, v->next_member, v->t, member) {
2187 u32 member_type_id = member->type;
2188 const struct btf_type *member_type = btf_type_by_id(env->btf,
2189 member_type_id);
2190
2191 if (btf_type_nosize_or_null(member_type) ||
2192 btf_type_is_resolve_source_only(member_type)) {
2193 btf_verifier_log_member(env, v->t, member,
2194 "Invalid member");
2195 return -EINVAL;
2196 }
2197
2198 if (!env_type_is_resolve_sink(env, member_type) &&
2199 !env_type_is_resolved(env, member_type_id)) {
2200 env_stack_set_next_member(env, i + 1);
2201 return env_stack_push(env, member_type, member_type_id);
2202 }
2203
2204 if (btf_type_kflag(v->t))
2205 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
2206 member,
2207 member_type);
2208 else
2209 err = btf_type_ops(member_type)->check_member(env, v->t,
2210 member,
2211 member_type);
2212 if (err)
2213 return err;
2214 }
2215
2216 env_stack_pop_resolved(env, 0, 0);
2217
2218 return 0;
2219 }
2220
2221 static void btf_struct_log(struct btf_verifier_env *env,
2222 const struct btf_type *t)
2223 {
2224 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2225 }
2226
2227 /* find 'struct bpf_spin_lock' in map value.
2228 * return >= 0 offset if found
2229 * and < 0 in case of error
2230 */
2231 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
2232 {
2233 const struct btf_member *member;
2234 u32 i, off = -ENOENT;
2235
2236 if (!__btf_type_is_struct(t))
2237 return -EINVAL;
2238
2239 for_each_member(i, t, member) {
2240 const struct btf_type *member_type = btf_type_by_id(btf,
2241 member->type);
2242 if (!__btf_type_is_struct(member_type))
2243 continue;
2244 if (member_type->size != sizeof(struct bpf_spin_lock))
2245 continue;
2246 if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
2247 "bpf_spin_lock"))
2248 continue;
2249 if (off != -ENOENT)
2250 /* only one 'struct bpf_spin_lock' is allowed */
2251 return -E2BIG;
2252 off = btf_member_bit_offset(t, member);
2253 if (off % 8)
2254 /* valid C code cannot generate such BTF */
2255 return -EINVAL;
2256 off /= 8;
2257 if (off % __alignof__(struct bpf_spin_lock))
2258 /* valid struct bpf_spin_lock will be 4 byte aligned */
2259 return -EINVAL;
2260 }
2261 return off;
2262 }
2263
2264 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2265 u32 type_id, void *data, u8 bits_offset,
2266 struct seq_file *m)
2267 {
2268 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
2269 const struct btf_member *member;
2270 u32 i;
2271
2272 seq_puts(m, "{");
2273 for_each_member(i, t, member) {
2274 const struct btf_type *member_type = btf_type_by_id(btf,
2275 member->type);
2276 const struct btf_kind_operations *ops;
2277 u32 member_offset, bitfield_size;
2278 u32 bytes_offset;
2279 u8 bits8_offset;
2280
2281 if (i)
2282 seq_puts(m, seq);
2283
2284 member_offset = btf_member_bit_offset(t, member);
2285 bitfield_size = btf_member_bitfield_size(t, member);
2286 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2287 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2288 if (bitfield_size) {
2289 btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
2290 bitfield_size, m);
2291 } else {
2292 ops = btf_type_ops(member_type);
2293 ops->seq_show(btf, member_type, member->type,
2294 data + bytes_offset, bits8_offset, m);
2295 }
2296 }
2297 seq_puts(m, "}");
2298 }
2299
2300 static struct btf_kind_operations struct_ops = {
2301 .check_meta = btf_struct_check_meta,
2302 .resolve = btf_struct_resolve,
2303 .check_member = btf_struct_check_member,
2304 .check_kflag_member = btf_generic_check_kflag_member,
2305 .log_details = btf_struct_log,
2306 .seq_show = btf_struct_seq_show,
2307 };
2308
2309 static int btf_enum_check_member(struct btf_verifier_env *env,
2310 const struct btf_type *struct_type,
2311 const struct btf_member *member,
2312 const struct btf_type *member_type)
2313 {
2314 u32 struct_bits_off = member->offset;
2315 u32 struct_size, bytes_offset;
2316
2317 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2318 btf_verifier_log_member(env, struct_type, member,
2319 "Member is not byte aligned");
2320 return -EINVAL;
2321 }
2322
2323 struct_size = struct_type->size;
2324 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2325 if (struct_size - bytes_offset < sizeof(int)) {
2326 btf_verifier_log_member(env, struct_type, member,
2327 "Member exceeds struct_size");
2328 return -EINVAL;
2329 }
2330
2331 return 0;
2332 }
2333
2334 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
2335 const struct btf_type *struct_type,
2336 const struct btf_member *member,
2337 const struct btf_type *member_type)
2338 {
2339 u32 struct_bits_off, nr_bits, bytes_end, struct_size;
2340 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
2341
2342 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2343 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2344 if (!nr_bits) {
2345 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2346 btf_verifier_log_member(env, struct_type, member,
2347 "Member is not byte aligned");
2348 return -EINVAL;
2349 }
2350
2351 nr_bits = int_bitsize;
2352 } else if (nr_bits > int_bitsize) {
2353 btf_verifier_log_member(env, struct_type, member,
2354 "Invalid member bitfield_size");
2355 return -EINVAL;
2356 }
2357
2358 struct_size = struct_type->size;
2359 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
2360 if (struct_size < bytes_end) {
2361 btf_verifier_log_member(env, struct_type, member,
2362 "Member exceeds struct_size");
2363 return -EINVAL;
2364 }
2365
2366 return 0;
2367 }
2368
2369 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
2370 const struct btf_type *t,
2371 u32 meta_left)
2372 {
2373 const struct btf_enum *enums = btf_type_enum(t);
2374 struct btf *btf = env->btf;
2375 u16 i, nr_enums;
2376 u32 meta_needed;
2377
2378 nr_enums = btf_type_vlen(t);
2379 meta_needed = nr_enums * sizeof(*enums);
2380
2381 if (meta_left < meta_needed) {
2382 btf_verifier_log_basic(env, t,
2383 "meta_left:%u meta_needed:%u",
2384 meta_left, meta_needed);
2385 return -EINVAL;
2386 }
2387
2388 if (btf_type_kflag(t)) {
2389 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2390 return -EINVAL;
2391 }
2392
2393 if (t->size > 8 || !is_power_of_2(t->size)) {
2394 btf_verifier_log_type(env, t, "Unexpected size");
2395 return -EINVAL;
2396 }
2397
2398 /* enum type either no name or a valid one */
2399 if (t->name_off &&
2400 !btf_name_valid_identifier(env->btf, t->name_off)) {
2401 btf_verifier_log_type(env, t, "Invalid name");
2402 return -EINVAL;
2403 }
2404
2405 btf_verifier_log_type(env, t, NULL);
2406
2407 for (i = 0; i < nr_enums; i++) {
2408 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
2409 btf_verifier_log(env, "\tInvalid name_offset:%u",
2410 enums[i].name_off);
2411 return -EINVAL;
2412 }
2413
2414 /* enum member must have a valid name */
2415 if (!enums[i].name_off ||
2416 !btf_name_valid_identifier(btf, enums[i].name_off)) {
2417 btf_verifier_log_type(env, t, "Invalid name");
2418 return -EINVAL;
2419 }
2420
2421 if (env->log.level == BPF_LOG_KERNEL)
2422 continue;
2423 btf_verifier_log(env, "\t%s val=%d\n",
2424 __btf_name_by_offset(btf, enums[i].name_off),
2425 enums[i].val);
2426 }
2427
2428 return meta_needed;
2429 }
2430
2431 static void btf_enum_log(struct btf_verifier_env *env,
2432 const struct btf_type *t)
2433 {
2434 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2435 }
2436
2437 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
2438 u32 type_id, void *data, u8 bits_offset,
2439 struct seq_file *m)
2440 {
2441 const struct btf_enum *enums = btf_type_enum(t);
2442 u32 i, nr_enums = btf_type_vlen(t);
2443 int v = *(int *)data;
2444
2445 for (i = 0; i < nr_enums; i++) {
2446 if (v == enums[i].val) {
2447 seq_printf(m, "%s",
2448 __btf_name_by_offset(btf,
2449 enums[i].name_off));
2450 return;
2451 }
2452 }
2453
2454 seq_printf(m, "%d", v);
2455 }
2456
2457 static struct btf_kind_operations enum_ops = {
2458 .check_meta = btf_enum_check_meta,
2459 .resolve = btf_df_resolve,
2460 .check_member = btf_enum_check_member,
2461 .check_kflag_member = btf_enum_check_kflag_member,
2462 .log_details = btf_enum_log,
2463 .seq_show = btf_enum_seq_show,
2464 };
2465
2466 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
2467 const struct btf_type *t,
2468 u32 meta_left)
2469 {
2470 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
2471
2472 if (meta_left < meta_needed) {
2473 btf_verifier_log_basic(env, t,
2474 "meta_left:%u meta_needed:%u",
2475 meta_left, meta_needed);
2476 return -EINVAL;
2477 }
2478
2479 if (t->name_off) {
2480 btf_verifier_log_type(env, t, "Invalid name");
2481 return -EINVAL;
2482 }
2483
2484 if (btf_type_kflag(t)) {
2485 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2486 return -EINVAL;
2487 }
2488
2489 btf_verifier_log_type(env, t, NULL);
2490
2491 return meta_needed;
2492 }
2493
2494 static void btf_func_proto_log(struct btf_verifier_env *env,
2495 const struct btf_type *t)
2496 {
2497 const struct btf_param *args = (const struct btf_param *)(t + 1);
2498 u16 nr_args = btf_type_vlen(t), i;
2499
2500 btf_verifier_log(env, "return=%u args=(", t->type);
2501 if (!nr_args) {
2502 btf_verifier_log(env, "void");
2503 goto done;
2504 }
2505
2506 if (nr_args == 1 && !args[0].type) {
2507 /* Only one vararg */
2508 btf_verifier_log(env, "vararg");
2509 goto done;
2510 }
2511
2512 btf_verifier_log(env, "%u %s", args[0].type,
2513 __btf_name_by_offset(env->btf,
2514 args[0].name_off));
2515 for (i = 1; i < nr_args - 1; i++)
2516 btf_verifier_log(env, ", %u %s", args[i].type,
2517 __btf_name_by_offset(env->btf,
2518 args[i].name_off));
2519
2520 if (nr_args > 1) {
2521 const struct btf_param *last_arg = &args[nr_args - 1];
2522
2523 if (last_arg->type)
2524 btf_verifier_log(env, ", %u %s", last_arg->type,
2525 __btf_name_by_offset(env->btf,
2526 last_arg->name_off));
2527 else
2528 btf_verifier_log(env, ", vararg");
2529 }
2530
2531 done:
2532 btf_verifier_log(env, ")");
2533 }
2534
2535 static struct btf_kind_operations func_proto_ops = {
2536 .check_meta = btf_func_proto_check_meta,
2537 .resolve = btf_df_resolve,
2538 /*
2539 * BTF_KIND_FUNC_PROTO cannot be directly referred by
2540 * a struct's member.
2541 *
2542 * It should be a funciton pointer instead.
2543 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
2544 *
2545 * Hence, there is no btf_func_check_member().
2546 */
2547 .check_member = btf_df_check_member,
2548 .check_kflag_member = btf_df_check_kflag_member,
2549 .log_details = btf_func_proto_log,
2550 .seq_show = btf_df_seq_show,
2551 };
2552
2553 static s32 btf_func_check_meta(struct btf_verifier_env *env,
2554 const struct btf_type *t,
2555 u32 meta_left)
2556 {
2557 if (!t->name_off ||
2558 !btf_name_valid_identifier(env->btf, t->name_off)) {
2559 btf_verifier_log_type(env, t, "Invalid name");
2560 return -EINVAL;
2561 }
2562
2563 if (btf_type_vlen(t)) {
2564 btf_verifier_log_type(env, t, "vlen != 0");
2565 return -EINVAL;
2566 }
2567
2568 if (btf_type_kflag(t)) {
2569 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2570 return -EINVAL;
2571 }
2572
2573 btf_verifier_log_type(env, t, NULL);
2574
2575 return 0;
2576 }
2577
2578 static struct btf_kind_operations func_ops = {
2579 .check_meta = btf_func_check_meta,
2580 .resolve = btf_df_resolve,
2581 .check_member = btf_df_check_member,
2582 .check_kflag_member = btf_df_check_kflag_member,
2583 .log_details = btf_ref_type_log,
2584 .seq_show = btf_df_seq_show,
2585 };
2586
2587 static s32 btf_var_check_meta(struct btf_verifier_env *env,
2588 const struct btf_type *t,
2589 u32 meta_left)
2590 {
2591 const struct btf_var *var;
2592 u32 meta_needed = sizeof(*var);
2593
2594 if (meta_left < meta_needed) {
2595 btf_verifier_log_basic(env, t,
2596 "meta_left:%u meta_needed:%u",
2597 meta_left, meta_needed);
2598 return -EINVAL;
2599 }
2600
2601 if (btf_type_vlen(t)) {
2602 btf_verifier_log_type(env, t, "vlen != 0");
2603 return -EINVAL;
2604 }
2605
2606 if (btf_type_kflag(t)) {
2607 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2608 return -EINVAL;
2609 }
2610
2611 if (!t->name_off ||
2612 !__btf_name_valid(env->btf, t->name_off, true)) {
2613 btf_verifier_log_type(env, t, "Invalid name");
2614 return -EINVAL;
2615 }
2616
2617 /* A var cannot be in type void */
2618 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
2619 btf_verifier_log_type(env, t, "Invalid type_id");
2620 return -EINVAL;
2621 }
2622
2623 var = btf_type_var(t);
2624 if (var->linkage != BTF_VAR_STATIC &&
2625 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2626 btf_verifier_log_type(env, t, "Linkage not supported");
2627 return -EINVAL;
2628 }
2629
2630 btf_verifier_log_type(env, t, NULL);
2631
2632 return meta_needed;
2633 }
2634
2635 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
2636 {
2637 const struct btf_var *var = btf_type_var(t);
2638
2639 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
2640 }
2641
2642 static const struct btf_kind_operations var_ops = {
2643 .check_meta = btf_var_check_meta,
2644 .resolve = btf_var_resolve,
2645 .check_member = btf_df_check_member,
2646 .check_kflag_member = btf_df_check_kflag_member,
2647 .log_details = btf_var_log,
2648 .seq_show = btf_var_seq_show,
2649 };
2650
2651 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
2652 const struct btf_type *t,
2653 u32 meta_left)
2654 {
2655 const struct btf_var_secinfo *vsi;
2656 u64 last_vsi_end_off = 0, sum = 0;
2657 u32 i, meta_needed;
2658
2659 meta_needed = btf_type_vlen(t) * sizeof(*vsi);
2660 if (meta_left < meta_needed) {
2661 btf_verifier_log_basic(env, t,
2662 "meta_left:%u meta_needed:%u",
2663 meta_left, meta_needed);
2664 return -EINVAL;
2665 }
2666
2667 if (!btf_type_vlen(t)) {
2668 btf_verifier_log_type(env, t, "vlen == 0");
2669 return -EINVAL;
2670 }
2671
2672 if (!t->size) {
2673 btf_verifier_log_type(env, t, "size == 0");
2674 return -EINVAL;
2675 }
2676
2677 if (btf_type_kflag(t)) {
2678 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2679 return -EINVAL;
2680 }
2681
2682 if (!t->name_off ||
2683 !btf_name_valid_section(env->btf, t->name_off)) {
2684 btf_verifier_log_type(env, t, "Invalid name");
2685 return -EINVAL;
2686 }
2687
2688 btf_verifier_log_type(env, t, NULL);
2689
2690 for_each_vsi(i, t, vsi) {
2691 /* A var cannot be in type void */
2692 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
2693 btf_verifier_log_vsi(env, t, vsi,
2694 "Invalid type_id");
2695 return -EINVAL;
2696 }
2697
2698 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
2699 btf_verifier_log_vsi(env, t, vsi,
2700 "Invalid offset");
2701 return -EINVAL;
2702 }
2703
2704 if (!vsi->size || vsi->size > t->size) {
2705 btf_verifier_log_vsi(env, t, vsi,
2706 "Invalid size");
2707 return -EINVAL;
2708 }
2709
2710 last_vsi_end_off = vsi->offset + vsi->size;
2711 if (last_vsi_end_off > t->size) {
2712 btf_verifier_log_vsi(env, t, vsi,
2713 "Invalid offset+size");
2714 return -EINVAL;
2715 }
2716
2717 btf_verifier_log_vsi(env, t, vsi, NULL);
2718 sum += vsi->size;
2719 }
2720
2721 if (t->size < sum) {
2722 btf_verifier_log_type(env, t, "Invalid btf_info size");
2723 return -EINVAL;
2724 }
2725
2726 return meta_needed;
2727 }
2728
2729 static int btf_datasec_resolve(struct btf_verifier_env *env,
2730 const struct resolve_vertex *v)
2731 {
2732 const struct btf_var_secinfo *vsi;
2733 struct btf *btf = env->btf;
2734 u16 i;
2735
2736 for_each_vsi_from(i, v->next_member, v->t, vsi) {
2737 u32 var_type_id = vsi->type, type_id, type_size = 0;
2738 const struct btf_type *var_type = btf_type_by_id(env->btf,
2739 var_type_id);
2740 if (!var_type || !btf_type_is_var(var_type)) {
2741 btf_verifier_log_vsi(env, v->t, vsi,
2742 "Not a VAR kind member");
2743 return -EINVAL;
2744 }
2745
2746 if (!env_type_is_resolve_sink(env, var_type) &&
2747 !env_type_is_resolved(env, var_type_id)) {
2748 env_stack_set_next_member(env, i + 1);
2749 return env_stack_push(env, var_type, var_type_id);
2750 }
2751
2752 type_id = var_type->type;
2753 if (!btf_type_id_size(btf, &type_id, &type_size)) {
2754 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
2755 return -EINVAL;
2756 }
2757
2758 if (vsi->size < type_size) {
2759 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
2760 return -EINVAL;
2761 }
2762 }
2763
2764 env_stack_pop_resolved(env, 0, 0);
2765 return 0;
2766 }
2767
2768 static void btf_datasec_log(struct btf_verifier_env *env,
2769 const struct btf_type *t)
2770 {
2771 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2772 }
2773
2774 static void btf_datasec_seq_show(const struct btf *btf,
2775 const struct btf_type *t, u32 type_id,
2776 void *data, u8 bits_offset,
2777 struct seq_file *m)
2778 {
2779 const struct btf_var_secinfo *vsi;
2780 const struct btf_type *var;
2781 u32 i;
2782
2783 seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
2784 for_each_vsi(i, t, vsi) {
2785 var = btf_type_by_id(btf, vsi->type);
2786 if (i)
2787 seq_puts(m, ",");
2788 btf_type_ops(var)->seq_show(btf, var, vsi->type,
2789 data + vsi->offset, bits_offset, m);
2790 }
2791 seq_puts(m, "}");
2792 }
2793
2794 static const struct btf_kind_operations datasec_ops = {
2795 .check_meta = btf_datasec_check_meta,
2796 .resolve = btf_datasec_resolve,
2797 .check_member = btf_df_check_member,
2798 .check_kflag_member = btf_df_check_kflag_member,
2799 .log_details = btf_datasec_log,
2800 .seq_show = btf_datasec_seq_show,
2801 };
2802
2803 static int btf_func_proto_check(struct btf_verifier_env *env,
2804 const struct btf_type *t)
2805 {
2806 const struct btf_type *ret_type;
2807 const struct btf_param *args;
2808 const struct btf *btf;
2809 u16 nr_args, i;
2810 int err;
2811
2812 btf = env->btf;
2813 args = (const struct btf_param *)(t + 1);
2814 nr_args = btf_type_vlen(t);
2815
2816 /* Check func return type which could be "void" (t->type == 0) */
2817 if (t->type) {
2818 u32 ret_type_id = t->type;
2819
2820 ret_type = btf_type_by_id(btf, ret_type_id);
2821 if (!ret_type) {
2822 btf_verifier_log_type(env, t, "Invalid return type");
2823 return -EINVAL;
2824 }
2825
2826 if (btf_type_needs_resolve(ret_type) &&
2827 !env_type_is_resolved(env, ret_type_id)) {
2828 err = btf_resolve(env, ret_type, ret_type_id);
2829 if (err)
2830 return err;
2831 }
2832
2833 /* Ensure the return type is a type that has a size */
2834 if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
2835 btf_verifier_log_type(env, t, "Invalid return type");
2836 return -EINVAL;
2837 }
2838 }
2839
2840 if (!nr_args)
2841 return 0;
2842
2843 /* Last func arg type_id could be 0 if it is a vararg */
2844 if (!args[nr_args - 1].type) {
2845 if (args[nr_args - 1].name_off) {
2846 btf_verifier_log_type(env, t, "Invalid arg#%u",
2847 nr_args);
2848 return -EINVAL;
2849 }
2850 nr_args--;
2851 }
2852
2853 err = 0;
2854 for (i = 0; i < nr_args; i++) {
2855 const struct btf_type *arg_type;
2856 u32 arg_type_id;
2857
2858 arg_type_id = args[i].type;
2859 arg_type = btf_type_by_id(btf, arg_type_id);
2860 if (!arg_type) {
2861 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2862 err = -EINVAL;
2863 break;
2864 }
2865
2866 if (args[i].name_off &&
2867 (!btf_name_offset_valid(btf, args[i].name_off) ||
2868 !btf_name_valid_identifier(btf, args[i].name_off))) {
2869 btf_verifier_log_type(env, t,
2870 "Invalid arg#%u", i + 1);
2871 err = -EINVAL;
2872 break;
2873 }
2874
2875 if (btf_type_needs_resolve(arg_type) &&
2876 !env_type_is_resolved(env, arg_type_id)) {
2877 err = btf_resolve(env, arg_type, arg_type_id);
2878 if (err)
2879 break;
2880 }
2881
2882 if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
2883 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2884 err = -EINVAL;
2885 break;
2886 }
2887 }
2888
2889 return err;
2890 }
2891
2892 static int btf_func_check(struct btf_verifier_env *env,
2893 const struct btf_type *t)
2894 {
2895 const struct btf_type *proto_type;
2896 const struct btf_param *args;
2897 const struct btf *btf;
2898 u16 nr_args, i;
2899
2900 btf = env->btf;
2901 proto_type = btf_type_by_id(btf, t->type);
2902
2903 if (!proto_type || !btf_type_is_func_proto(proto_type)) {
2904 btf_verifier_log_type(env, t, "Invalid type_id");
2905 return -EINVAL;
2906 }
2907
2908 args = (const struct btf_param *)(proto_type + 1);
2909 nr_args = btf_type_vlen(proto_type);
2910 for (i = 0; i < nr_args; i++) {
2911 if (!args[i].name_off && args[i].type) {
2912 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2913 return -EINVAL;
2914 }
2915 }
2916
2917 return 0;
2918 }
2919
2920 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
2921 [BTF_KIND_INT] = &int_ops,
2922 [BTF_KIND_PTR] = &ptr_ops,
2923 [BTF_KIND_ARRAY] = &array_ops,
2924 [BTF_KIND_STRUCT] = &struct_ops,
2925 [BTF_KIND_UNION] = &struct_ops,
2926 [BTF_KIND_ENUM] = &enum_ops,
2927 [BTF_KIND_FWD] = &fwd_ops,
2928 [BTF_KIND_TYPEDEF] = &modifier_ops,
2929 [BTF_KIND_VOLATILE] = &modifier_ops,
2930 [BTF_KIND_CONST] = &modifier_ops,
2931 [BTF_KIND_RESTRICT] = &modifier_ops,
2932 [BTF_KIND_FUNC] = &func_ops,
2933 [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
2934 [BTF_KIND_VAR] = &var_ops,
2935 [BTF_KIND_DATASEC] = &datasec_ops,
2936 };
2937
2938 static s32 btf_check_meta(struct btf_verifier_env *env,
2939 const struct btf_type *t,
2940 u32 meta_left)
2941 {
2942 u32 saved_meta_left = meta_left;
2943 s32 var_meta_size;
2944
2945 if (meta_left < sizeof(*t)) {
2946 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
2947 env->log_type_id, meta_left, sizeof(*t));
2948 return -EINVAL;
2949 }
2950 meta_left -= sizeof(*t);
2951
2952 if (t->info & ~BTF_INFO_MASK) {
2953 btf_verifier_log(env, "[%u] Invalid btf_info:%x",
2954 env->log_type_id, t->info);
2955 return -EINVAL;
2956 }
2957
2958 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
2959 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
2960 btf_verifier_log(env, "[%u] Invalid kind:%u",
2961 env->log_type_id, BTF_INFO_KIND(t->info));
2962 return -EINVAL;
2963 }
2964
2965 if (!btf_name_offset_valid(env->btf, t->name_off)) {
2966 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
2967 env->log_type_id, t->name_off);
2968 return -EINVAL;
2969 }
2970
2971 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
2972 if (var_meta_size < 0)
2973 return var_meta_size;
2974
2975 meta_left -= var_meta_size;
2976
2977 return saved_meta_left - meta_left;
2978 }
2979
2980 static int btf_check_all_metas(struct btf_verifier_env *env)
2981 {
2982 struct btf *btf = env->btf;
2983 struct btf_header *hdr;
2984 void *cur, *end;
2985
2986 hdr = &btf->hdr;
2987 cur = btf->nohdr_data + hdr->type_off;
2988 end = cur + hdr->type_len;
2989
2990 env->log_type_id = 1;
2991 while (cur < end) {
2992 struct btf_type *t = cur;
2993 s32 meta_size;
2994
2995 meta_size = btf_check_meta(env, t, end - cur);
2996 if (meta_size < 0)
2997 return meta_size;
2998
2999 btf_add_type(env, t);
3000 cur += meta_size;
3001 env->log_type_id++;
3002 }
3003
3004 return 0;
3005 }
3006
3007 static bool btf_resolve_valid(struct btf_verifier_env *env,
3008 const struct btf_type *t,
3009 u32 type_id)
3010 {
3011 struct btf *btf = env->btf;
3012
3013 if (!env_type_is_resolved(env, type_id))
3014 return false;
3015
3016 if (btf_type_is_struct(t) || btf_type_is_datasec(t))
3017 return !btf->resolved_ids[type_id] &&
3018 !btf->resolved_sizes[type_id];
3019
3020 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
3021 btf_type_is_var(t)) {
3022 t = btf_type_id_resolve(btf, &type_id);
3023 return t &&
3024 !btf_type_is_modifier(t) &&
3025 !btf_type_is_var(t) &&
3026 !btf_type_is_datasec(t);
3027 }
3028
3029 if (btf_type_is_array(t)) {
3030 const struct btf_array *array = btf_type_array(t);
3031 const struct btf_type *elem_type;
3032 u32 elem_type_id = array->type;
3033 u32 elem_size;
3034
3035 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3036 return elem_type && !btf_type_is_modifier(elem_type) &&
3037 (array->nelems * elem_size ==
3038 btf->resolved_sizes[type_id]);
3039 }
3040
3041 return false;
3042 }
3043
3044 static int btf_resolve(struct btf_verifier_env *env,
3045 const struct btf_type *t, u32 type_id)
3046 {
3047 u32 save_log_type_id = env->log_type_id;
3048 const struct resolve_vertex *v;
3049 int err = 0;
3050
3051 env->resolve_mode = RESOLVE_TBD;
3052 env_stack_push(env, t, type_id);
3053 while (!err && (v = env_stack_peak(env))) {
3054 env->log_type_id = v->type_id;
3055 err = btf_type_ops(v->t)->resolve(env, v);
3056 }
3057
3058 env->log_type_id = type_id;
3059 if (err == -E2BIG) {
3060 btf_verifier_log_type(env, t,
3061 "Exceeded max resolving depth:%u",
3062 MAX_RESOLVE_DEPTH);
3063 } else if (err == -EEXIST) {
3064 btf_verifier_log_type(env, t, "Loop detected");
3065 }
3066
3067 /* Final sanity check */
3068 if (!err && !btf_resolve_valid(env, t, type_id)) {
3069 btf_verifier_log_type(env, t, "Invalid resolve state");
3070 err = -EINVAL;
3071 }
3072
3073 env->log_type_id = save_log_type_id;
3074 return err;
3075 }
3076
3077 static int btf_check_all_types(struct btf_verifier_env *env)
3078 {
3079 struct btf *btf = env->btf;
3080 u32 type_id;
3081 int err;
3082
3083 err = env_resolve_init(env);
3084 if (err)
3085 return err;
3086
3087 env->phase++;
3088 for (type_id = 1; type_id <= btf->nr_types; type_id++) {
3089 const struct btf_type *t = btf_type_by_id(btf, type_id);
3090
3091 env->log_type_id = type_id;
3092 if (btf_type_needs_resolve(t) &&
3093 !env_type_is_resolved(env, type_id)) {
3094 err = btf_resolve(env, t, type_id);
3095 if (err)
3096 return err;
3097 }
3098
3099 if (btf_type_is_func_proto(t)) {
3100 err = btf_func_proto_check(env, t);
3101 if (err)
3102 return err;
3103 }
3104
3105 if (btf_type_is_func(t)) {
3106 err = btf_func_check(env, t);
3107 if (err)
3108 return err;
3109 }
3110 }
3111
3112 return 0;
3113 }
3114
3115 static int btf_parse_type_sec(struct btf_verifier_env *env)
3116 {
3117 const struct btf_header *hdr = &env->btf->hdr;
3118 int err;
3119
3120 /* Type section must align to 4 bytes */
3121 if (hdr->type_off & (sizeof(u32) - 1)) {
3122 btf_verifier_log(env, "Unaligned type_off");
3123 return -EINVAL;
3124 }
3125
3126 if (!hdr->type_len) {
3127 btf_verifier_log(env, "No type found");
3128 return -EINVAL;
3129 }
3130
3131 err = btf_check_all_metas(env);
3132 if (err)
3133 return err;
3134
3135 return btf_check_all_types(env);
3136 }
3137
3138 static int btf_parse_str_sec(struct btf_verifier_env *env)
3139 {
3140 const struct btf_header *hdr;
3141 struct btf *btf = env->btf;
3142 const char *start, *end;
3143
3144 hdr = &btf->hdr;
3145 start = btf->nohdr_data + hdr->str_off;
3146 end = start + hdr->str_len;
3147
3148 if (end != btf->data + btf->data_size) {
3149 btf_verifier_log(env, "String section is not at the end");
3150 return -EINVAL;
3151 }
3152
3153 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
3154 start[0] || end[-1]) {
3155 btf_verifier_log(env, "Invalid string section");
3156 return -EINVAL;
3157 }
3158
3159 btf->strings = start;
3160
3161 return 0;
3162 }
3163
3164 static const size_t btf_sec_info_offset[] = {
3165 offsetof(struct btf_header, type_off),
3166 offsetof(struct btf_header, str_off),
3167 };
3168
3169 static int btf_sec_info_cmp(const void *a, const void *b)
3170 {
3171 const struct btf_sec_info *x = a;
3172 const struct btf_sec_info *y = b;
3173
3174 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
3175 }
3176
3177 static int btf_check_sec_info(struct btf_verifier_env *env,
3178 u32 btf_data_size)
3179 {
3180 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
3181 u32 total, expected_total, i;
3182 const struct btf_header *hdr;
3183 const struct btf *btf;
3184
3185 btf = env->btf;
3186 hdr = &btf->hdr;
3187
3188 /* Populate the secs from hdr */
3189 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
3190 secs[i] = *(struct btf_sec_info *)((void *)hdr +
3191 btf_sec_info_offset[i]);
3192
3193 sort(secs, ARRAY_SIZE(btf_sec_info_offset),
3194 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
3195
3196 /* Check for gaps and overlap among sections */
3197 total = 0;
3198 expected_total = btf_data_size - hdr->hdr_len;
3199 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
3200 if (expected_total < secs[i].off) {
3201 btf_verifier_log(env, "Invalid section offset");
3202 return -EINVAL;
3203 }
3204 if (total < secs[i].off) {
3205 /* gap */
3206 btf_verifier_log(env, "Unsupported section found");
3207 return -EINVAL;
3208 }
3209 if (total > secs[i].off) {
3210 btf_verifier_log(env, "Section overlap found");
3211 return -EINVAL;
3212 }
3213 if (expected_total - total < secs[i].len) {
3214 btf_verifier_log(env,
3215 "Total section length too long");
3216 return -EINVAL;
3217 }
3218 total += secs[i].len;
3219 }
3220
3221 /* There is data other than hdr and known sections */
3222 if (expected_total != total) {
3223 btf_verifier_log(env, "Unsupported section found");
3224 return -EINVAL;
3225 }
3226
3227 return 0;
3228 }
3229
3230 static int btf_parse_hdr(struct btf_verifier_env *env)
3231 {
3232 u32 hdr_len, hdr_copy, btf_data_size;
3233 const struct btf_header *hdr;
3234 struct btf *btf;
3235 int err;
3236
3237 btf = env->btf;
3238 btf_data_size = btf->data_size;
3239
3240 if (btf_data_size <
3241 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
3242 btf_verifier_log(env, "hdr_len not found");
3243 return -EINVAL;
3244 }
3245
3246 hdr = btf->data;
3247 hdr_len = hdr->hdr_len;
3248 if (btf_data_size < hdr_len) {
3249 btf_verifier_log(env, "btf_header not found");
3250 return -EINVAL;
3251 }
3252
3253 /* Ensure the unsupported header fields are zero */
3254 if (hdr_len > sizeof(btf->hdr)) {
3255 u8 *expected_zero = btf->data + sizeof(btf->hdr);
3256 u8 *end = btf->data + hdr_len;
3257
3258 for (; expected_zero < end; expected_zero++) {
3259 if (*expected_zero) {
3260 btf_verifier_log(env, "Unsupported btf_header");
3261 return -E2BIG;
3262 }
3263 }
3264 }
3265
3266 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
3267 memcpy(&btf->hdr, btf->data, hdr_copy);
3268
3269 hdr = &btf->hdr;
3270
3271 btf_verifier_log_hdr(env, btf_data_size);
3272
3273 if (hdr->magic != BTF_MAGIC) {
3274 btf_verifier_log(env, "Invalid magic");
3275 return -EINVAL;
3276 }
3277
3278 if (hdr->version != BTF_VERSION) {
3279 btf_verifier_log(env, "Unsupported version");
3280 return -ENOTSUPP;
3281 }
3282
3283 if (hdr->flags) {
3284 btf_verifier_log(env, "Unsupported flags");
3285 return -ENOTSUPP;
3286 }
3287
3288 if (btf_data_size == hdr->hdr_len) {
3289 btf_verifier_log(env, "No data");
3290 return -EINVAL;
3291 }
3292
3293 err = btf_check_sec_info(env, btf_data_size);
3294 if (err)
3295 return err;
3296
3297 return 0;
3298 }
3299
3300 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
3301 u32 log_level, char __user *log_ubuf, u32 log_size)
3302 {
3303 struct btf_verifier_env *env = NULL;
3304 struct bpf_verifier_log *log;
3305 struct btf *btf = NULL;
3306 u8 *data;
3307 int err;
3308
3309 if (btf_data_size > BTF_MAX_SIZE)
3310 return ERR_PTR(-E2BIG);
3311
3312 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3313 if (!env)
3314 return ERR_PTR(-ENOMEM);
3315
3316 log = &env->log;
3317 if (log_level || log_ubuf || log_size) {
3318 /* user requested verbose verifier output
3319 * and supplied buffer to store the verification trace
3320 */
3321 log->level = log_level;
3322 log->ubuf = log_ubuf;
3323 log->len_total = log_size;
3324
3325 /* log attributes have to be sane */
3326 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
3327 !log->level || !log->ubuf) {
3328 err = -EINVAL;
3329 goto errout;
3330 }
3331 }
3332
3333 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3334 if (!btf) {
3335 err = -ENOMEM;
3336 goto errout;
3337 }
3338 env->btf = btf;
3339
3340 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
3341 if (!data) {
3342 err = -ENOMEM;
3343 goto errout;
3344 }
3345
3346 btf->data = data;
3347 btf->data_size = btf_data_size;
3348
3349 if (copy_from_user(data, btf_data, btf_data_size)) {
3350 err = -EFAULT;
3351 goto errout;
3352 }
3353
3354 err = btf_parse_hdr(env);
3355 if (err)
3356 goto errout;
3357
3358 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3359
3360 err = btf_parse_str_sec(env);
3361 if (err)
3362 goto errout;
3363
3364 err = btf_parse_type_sec(env);
3365 if (err)
3366 goto errout;
3367
3368 if (log->level && bpf_verifier_log_full(log)) {
3369 err = -ENOSPC;
3370 goto errout;
3371 }
3372
3373 btf_verifier_env_free(env);
3374 refcount_set(&btf->refcnt, 1);
3375 return btf;
3376
3377 errout:
3378 btf_verifier_env_free(env);
3379 if (btf)
3380 btf_free(btf);
3381 return ERR_PTR(err);
3382 }
3383
3384 extern char __weak _binary__btf_vmlinux_bin_start[];
3385 extern char __weak _binary__btf_vmlinux_bin_end[];
3386
3387 struct btf *btf_parse_vmlinux(void)
3388 {
3389 struct btf_verifier_env *env = NULL;
3390 struct bpf_verifier_log *log;
3391 struct btf *btf = NULL;
3392 int err;
3393
3394 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3395 if (!env)
3396 return ERR_PTR(-ENOMEM);
3397
3398 log = &env->log;
3399 log->level = BPF_LOG_KERNEL;
3400
3401 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3402 if (!btf) {
3403 err = -ENOMEM;
3404 goto errout;
3405 }
3406 env->btf = btf;
3407
3408 btf->data = _binary__btf_vmlinux_bin_start;
3409 btf->data_size = _binary__btf_vmlinux_bin_end -
3410 _binary__btf_vmlinux_bin_start;
3411
3412 err = btf_parse_hdr(env);
3413 if (err)
3414 goto errout;
3415
3416 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3417
3418 err = btf_parse_str_sec(env);
3419 if (err)
3420 goto errout;
3421
3422 err = btf_check_all_metas(env);
3423 if (err)
3424 goto errout;
3425
3426 btf_verifier_env_free(env);
3427 refcount_set(&btf->refcnt, 1);
3428 return btf;
3429
3430 errout:
3431 btf_verifier_env_free(env);
3432 if (btf) {
3433 kvfree(btf->types);
3434 kfree(btf);
3435 }
3436 return ERR_PTR(err);
3437 }
3438
3439 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
3440 struct seq_file *m)
3441 {
3442 const struct btf_type *t = btf_type_by_id(btf, type_id);
3443
3444 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
3445 }
3446
3447 #ifdef CONFIG_PROC_FS
3448 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
3449 {
3450 const struct btf *btf = filp->private_data;
3451
3452 seq_printf(m, "btf_id:\t%u\n", btf->id);
3453 }
3454 #endif
3455
3456 static int btf_release(struct inode *inode, struct file *filp)
3457 {
3458 btf_put(filp->private_data);
3459 return 0;
3460 }
3461
3462 const struct file_operations btf_fops = {
3463 #ifdef CONFIG_PROC_FS
3464 .show_fdinfo = bpf_btf_show_fdinfo,
3465 #endif
3466 .release = btf_release,
3467 };
3468
3469 static int __btf_new_fd(struct btf *btf)
3470 {
3471 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
3472 }
3473
3474 int btf_new_fd(const union bpf_attr *attr)
3475 {
3476 struct btf *btf;
3477 int ret;
3478
3479 btf = btf_parse(u64_to_user_ptr(attr->btf),
3480 attr->btf_size, attr->btf_log_level,
3481 u64_to_user_ptr(attr->btf_log_buf),
3482 attr->btf_log_size);
3483 if (IS_ERR(btf))
3484 return PTR_ERR(btf);
3485
3486 ret = btf_alloc_id(btf);
3487 if (ret) {
3488 btf_free(btf);
3489 return ret;
3490 }
3491
3492 /*
3493 * The BTF ID is published to the userspace.
3494 * All BTF free must go through call_rcu() from
3495 * now on (i.e. free by calling btf_put()).
3496 */
3497
3498 ret = __btf_new_fd(btf);
3499 if (ret < 0)
3500 btf_put(btf);
3501
3502 return ret;
3503 }
3504
3505 struct btf *btf_get_by_fd(int fd)
3506 {
3507 struct btf *btf;
3508 struct fd f;
3509
3510 f = fdget(fd);
3511
3512 if (!f.file)
3513 return ERR_PTR(-EBADF);
3514
3515 if (f.file->f_op != &btf_fops) {
3516 fdput(f);
3517 return ERR_PTR(-EINVAL);
3518 }
3519
3520 btf = f.file->private_data;
3521 refcount_inc(&btf->refcnt);
3522 fdput(f);
3523
3524 return btf;
3525 }
3526
3527 int btf_get_info_by_fd(const struct btf *btf,
3528 const union bpf_attr *attr,
3529 union bpf_attr __user *uattr)
3530 {
3531 struct bpf_btf_info __user *uinfo;
3532 struct bpf_btf_info info = {};
3533 u32 info_copy, btf_copy;
3534 void __user *ubtf;
3535 u32 uinfo_len;
3536
3537 uinfo = u64_to_user_ptr(attr->info.info);
3538 uinfo_len = attr->info.info_len;
3539
3540 info_copy = min_t(u32, uinfo_len, sizeof(info));
3541 if (copy_from_user(&info, uinfo, info_copy))
3542 return -EFAULT;
3543
3544 info.id = btf->id;
3545 ubtf = u64_to_user_ptr(info.btf);
3546 btf_copy = min_t(u32, btf->data_size, info.btf_size);
3547 if (copy_to_user(ubtf, btf->data, btf_copy))
3548 return -EFAULT;
3549 info.btf_size = btf->data_size;
3550
3551 if (copy_to_user(uinfo, &info, info_copy) ||
3552 put_user(info_copy, &uattr->info.info_len))
3553 return -EFAULT;
3554
3555 return 0;
3556 }
3557
3558 int btf_get_fd_by_id(u32 id)
3559 {
3560 struct btf *btf;
3561 int fd;
3562
3563 rcu_read_lock();
3564 btf = idr_find(&btf_idr, id);
3565 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
3566 btf = ERR_PTR(-ENOENT);
3567 rcu_read_unlock();
3568
3569 if (IS_ERR(btf))
3570 return PTR_ERR(btf);
3571
3572 fd = __btf_new_fd(btf);
3573 if (fd < 0)
3574 btf_put(btf);
3575
3576 return fd;
3577 }
3578
3579 u32 btf_id(const struct btf *btf)
3580 {
3581 return btf->id;
3582 }