1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/types.h>
6 #include <linux/seq_file.h>
7 #include <linux/compiler.h>
8 #include <linux/ctype.h>
9 #include <linux/errno.h>
10 #include <linux/slab.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/uaccess.h>
14 #include <linux/kernel.h>
15 #include <linux/idr.h>
16 #include <linux/sort.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/btf.h>
20 /* BTF (BPF Type Format) is the meta data format which describes
21 * the data types of BPF program/map. Hence, it basically focus
22 * on the C programming language which the modern BPF is primary
27 * The BTF data is stored under the ".BTF" ELF section
31 * Each 'struct btf_type' object describes a C data type.
32 * Depending on the type it is describing, a 'struct btf_type'
33 * object may be followed by more data. F.e.
34 * To describe an array, 'struct btf_type' is followed by
37 * 'struct btf_type' and any extra data following it are
42 * The BTF type section contains a list of 'struct btf_type' objects.
43 * Each one describes a C type. Recall from the above section
44 * that a 'struct btf_type' object could be immediately followed by extra
45 * data in order to desribe some particular C types.
49 * Each btf_type object is identified by a type_id. The type_id
50 * is implicitly implied by the location of the btf_type object in
51 * the BTF type section. The first one has type_id 1. The second
52 * one has type_id 2...etc. Hence, an earlier btf_type has
55 * A btf_type object may refer to another btf_type object by using
56 * type_id (i.e. the "type" in the "struct btf_type").
58 * NOTE that we cannot assume any reference-order.
59 * A btf_type object can refer to an earlier btf_type object
60 * but it can also refer to a later btf_type object.
62 * For example, to describe "const void *". A btf_type
63 * object describing "const" may refer to another btf_type
64 * object describing "void *". This type-reference is done
65 * by specifying type_id:
67 * [1] CONST (anon) type_id=2
68 * [2] PTR (anon) type_id=0
70 * The above is the btf_verifier debug log:
71 * - Each line started with "[?]" is a btf_type object
72 * - [?] is the type_id of the btf_type object.
73 * - CONST/PTR is the BTF_KIND_XXX
74 * - "(anon)" is the name of the type. It just
75 * happens that CONST and PTR has no name.
76 * - type_id=XXX is the 'u32 type' in btf_type
78 * NOTE: "void" has type_id 0
82 * The BTF string section contains the names used by the type section.
83 * Each string is referred by an "offset" from the beginning of the
86 * Each string is '\0' terminated.
88 * The first character in the string section must be '\0'
89 * which is used to mean 'anonymous'. Some btf_type may not
95 * To verify BTF data, two passes are needed.
99 * The first pass is to collect all btf_type objects to
100 * an array: "btf->types".
102 * Depending on the C type that a btf_type is describing,
103 * a btf_type may be followed by extra data. We don't know
104 * how many btf_type is there, and more importantly we don't
105 * know where each btf_type is located in the type section.
107 * Without knowing the location of each type_id, most verifications
108 * cannot be done. e.g. an earlier btf_type may refer to a later
109 * btf_type (recall the "const void *" above), so we cannot
110 * check this type-reference in the first pass.
112 * In the first pass, it still does some verifications (e.g.
113 * checking the name is a valid offset to the string section).
117 * The main focus is to resolve a btf_type that is referring
120 * We have to ensure the referring type:
121 * 1) does exist in the BTF (i.e. in btf->types[])
122 * 2) does not cause a loop:
131 * btf_type_needs_resolve() decides if a btf_type needs
134 * The needs_resolve type implements the "resolve()" ops which
135 * essentially does a DFS and detects backedge.
137 * During resolve (or DFS), different C types have different
138 * "RESOLVED" conditions.
140 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
141 * members because a member is always referring to another
142 * type. A struct's member can be treated as "RESOLVED" if
143 * it is referring to a BTF_KIND_PTR. Otherwise, the
144 * following valid C struct would be rejected:
151 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
152 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
153 * detect a pointer loop, e.g.:
154 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
156 * +-----------------------------------------+
160 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
161 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
162 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
163 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
164 #define BITS_ROUNDUP_BYTES(bits) \
165 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
167 #define BTF_INFO_MASK 0x8f00ffff
168 #define BTF_INT_MASK 0x0fffffff
169 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
170 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
172 /* 16MB for 64k structs and each has 16 members and
173 * a few MB spaces for the string section.
174 * The hard limit is S32_MAX.
176 #define BTF_MAX_SIZE (16 * 1024 * 1024)
178 #define for_each_member(i, struct_type, member) \
179 for (i = 0, member = btf_type_member(struct_type); \
180 i < btf_type_vlen(struct_type); \
183 #define for_each_member_from(i, from, struct_type, member) \
184 for (i = from, member = btf_type_member(struct_type) + from; \
185 i < btf_type_vlen(struct_type); \
188 #define for_each_vsi(i, struct_type, member) \
189 for (i = 0, member = btf_type_var_secinfo(struct_type); \
190 i < btf_type_vlen(struct_type); \
193 #define for_each_vsi_from(i, from, struct_type, member) \
194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
195 i < btf_type_vlen(struct_type); \
199 DEFINE_SPINLOCK(btf_idr_lock
);
203 struct btf_type
**types
;
208 struct btf_header hdr
;
217 enum verifier_phase
{
222 struct resolve_vertex
{
223 const struct btf_type
*t
;
235 RESOLVE_TBD
, /* To Be Determined */
236 RESOLVE_PTR
, /* Resolving for Pointer */
237 RESOLVE_STRUCT_OR_ARRAY
, /* Resolving for struct/union
242 #define MAX_RESOLVE_DEPTH 32
244 struct btf_sec_info
{
249 struct btf_verifier_env
{
252 struct resolve_vertex stack
[MAX_RESOLVE_DEPTH
];
253 struct bpf_verifier_log log
;
256 enum verifier_phase phase
;
257 enum resolve_mode resolve_mode
;
260 static const char * const btf_kind_str
[NR_BTF_KINDS
] = {
261 [BTF_KIND_UNKN
] = "UNKNOWN",
262 [BTF_KIND_INT
] = "INT",
263 [BTF_KIND_PTR
] = "PTR",
264 [BTF_KIND_ARRAY
] = "ARRAY",
265 [BTF_KIND_STRUCT
] = "STRUCT",
266 [BTF_KIND_UNION
] = "UNION",
267 [BTF_KIND_ENUM
] = "ENUM",
268 [BTF_KIND_FWD
] = "FWD",
269 [BTF_KIND_TYPEDEF
] = "TYPEDEF",
270 [BTF_KIND_VOLATILE
] = "VOLATILE",
271 [BTF_KIND_CONST
] = "CONST",
272 [BTF_KIND_RESTRICT
] = "RESTRICT",
273 [BTF_KIND_FUNC
] = "FUNC",
274 [BTF_KIND_FUNC_PROTO
] = "FUNC_PROTO",
275 [BTF_KIND_VAR
] = "VAR",
276 [BTF_KIND_DATASEC
] = "DATASEC",
279 struct btf_kind_operations
{
280 s32 (*check_meta
)(struct btf_verifier_env
*env
,
281 const struct btf_type
*t
,
283 int (*resolve
)(struct btf_verifier_env
*env
,
284 const struct resolve_vertex
*v
);
285 int (*check_member
)(struct btf_verifier_env
*env
,
286 const struct btf_type
*struct_type
,
287 const struct btf_member
*member
,
288 const struct btf_type
*member_type
);
289 int (*check_kflag_member
)(struct btf_verifier_env
*env
,
290 const struct btf_type
*struct_type
,
291 const struct btf_member
*member
,
292 const struct btf_type
*member_type
);
293 void (*log_details
)(struct btf_verifier_env
*env
,
294 const struct btf_type
*t
);
295 void (*seq_show
)(const struct btf
*btf
, const struct btf_type
*t
,
296 u32 type_id
, void *data
, u8 bits_offsets
,
300 static const struct btf_kind_operations
* const kind_ops
[NR_BTF_KINDS
];
301 static struct btf_type btf_void
;
303 static int btf_resolve(struct btf_verifier_env
*env
,
304 const struct btf_type
*t
, u32 type_id
);
306 static bool btf_type_is_modifier(const struct btf_type
*t
)
308 /* Some of them is not strictly a C modifier
309 * but they are grouped into the same bucket
311 * A type (t) that refers to another
312 * type through t->type AND its size cannot
313 * be determined without following the t->type.
315 * ptr does not fall into this bucket
316 * because its size is always sizeof(void *).
318 switch (BTF_INFO_KIND(t
->info
)) {
319 case BTF_KIND_TYPEDEF
:
320 case BTF_KIND_VOLATILE
:
322 case BTF_KIND_RESTRICT
:
329 bool btf_type_is_void(const struct btf_type
*t
)
331 return t
== &btf_void
;
334 static bool btf_type_is_fwd(const struct btf_type
*t
)
336 return BTF_INFO_KIND(t
->info
) == BTF_KIND_FWD
;
339 static bool btf_type_is_func(const struct btf_type
*t
)
341 return BTF_INFO_KIND(t
->info
) == BTF_KIND_FUNC
;
344 static bool btf_type_is_func_proto(const struct btf_type
*t
)
346 return BTF_INFO_KIND(t
->info
) == BTF_KIND_FUNC_PROTO
;
349 static bool btf_type_nosize(const struct btf_type
*t
)
351 return btf_type_is_void(t
) || btf_type_is_fwd(t
) ||
352 btf_type_is_func(t
) || btf_type_is_func_proto(t
);
355 static bool btf_type_nosize_or_null(const struct btf_type
*t
)
357 return !t
|| btf_type_nosize(t
);
360 /* union is only a special case of struct:
361 * all its offsetof(member) == 0
363 static bool btf_type_is_struct(const struct btf_type
*t
)
365 u8 kind
= BTF_INFO_KIND(t
->info
);
367 return kind
== BTF_KIND_STRUCT
|| kind
== BTF_KIND_UNION
;
370 static bool __btf_type_is_struct(const struct btf_type
*t
)
372 return BTF_INFO_KIND(t
->info
) == BTF_KIND_STRUCT
;
375 static bool btf_type_is_array(const struct btf_type
*t
)
377 return BTF_INFO_KIND(t
->info
) == BTF_KIND_ARRAY
;
380 static bool btf_type_is_ptr(const struct btf_type
*t
)
382 return BTF_INFO_KIND(t
->info
) == BTF_KIND_PTR
;
385 static bool btf_type_is_int(const struct btf_type
*t
)
387 return BTF_INFO_KIND(t
->info
) == BTF_KIND_INT
;
390 static bool btf_type_is_var(const struct btf_type
*t
)
392 return BTF_INFO_KIND(t
->info
) == BTF_KIND_VAR
;
395 static bool btf_type_is_datasec(const struct btf_type
*t
)
397 return BTF_INFO_KIND(t
->info
) == BTF_KIND_DATASEC
;
400 /* Types that act only as a source, not sink or intermediate
401 * type when resolving.
403 static bool btf_type_is_resolve_source_only(const struct btf_type
*t
)
405 return btf_type_is_var(t
) ||
406 btf_type_is_datasec(t
);
409 /* What types need to be resolved?
411 * btf_type_is_modifier() is an obvious one.
413 * btf_type_is_struct() because its member refers to
414 * another type (through member->type).
416 * btf_type_is_var() because the variable refers to
417 * another type. btf_type_is_datasec() holds multiple
418 * btf_type_is_var() types that need resolving.
420 * btf_type_is_array() because its element (array->type)
421 * refers to another type. Array can be thought of a
422 * special case of struct while array just has the same
423 * member-type repeated by array->nelems of times.
425 static bool btf_type_needs_resolve(const struct btf_type
*t
)
427 return btf_type_is_modifier(t
) ||
428 btf_type_is_ptr(t
) ||
429 btf_type_is_struct(t
) ||
430 btf_type_is_array(t
) ||
431 btf_type_is_var(t
) ||
432 btf_type_is_datasec(t
);
435 /* t->size can be used */
436 static bool btf_type_has_size(const struct btf_type
*t
)
438 switch (BTF_INFO_KIND(t
->info
)) {
440 case BTF_KIND_STRUCT
:
443 case BTF_KIND_DATASEC
:
450 static const char *btf_int_encoding_str(u8 encoding
)
454 else if (encoding
== BTF_INT_SIGNED
)
456 else if (encoding
== BTF_INT_CHAR
)
458 else if (encoding
== BTF_INT_BOOL
)
464 static u16
btf_type_vlen(const struct btf_type
*t
)
466 return BTF_INFO_VLEN(t
->info
);
469 static bool btf_type_kflag(const struct btf_type
*t
)
471 return BTF_INFO_KFLAG(t
->info
);
474 static u32
btf_member_bit_offset(const struct btf_type
*struct_type
,
475 const struct btf_member
*member
)
477 return btf_type_kflag(struct_type
) ? BTF_MEMBER_BIT_OFFSET(member
->offset
)
481 static u32
btf_member_bitfield_size(const struct btf_type
*struct_type
,
482 const struct btf_member
*member
)
484 return btf_type_kflag(struct_type
) ? BTF_MEMBER_BITFIELD_SIZE(member
->offset
)
488 static u32
btf_type_int(const struct btf_type
*t
)
490 return *(u32
*)(t
+ 1);
493 static const struct btf_array
*btf_type_array(const struct btf_type
*t
)
495 return (const struct btf_array
*)(t
+ 1);
498 static const struct btf_member
*btf_type_member(const struct btf_type
*t
)
500 return (const struct btf_member
*)(t
+ 1);
503 static const struct btf_enum
*btf_type_enum(const struct btf_type
*t
)
505 return (const struct btf_enum
*)(t
+ 1);
508 static const struct btf_var
*btf_type_var(const struct btf_type
*t
)
510 return (const struct btf_var
*)(t
+ 1);
513 static const struct btf_var_secinfo
*btf_type_var_secinfo(const struct btf_type
*t
)
515 return (const struct btf_var_secinfo
*)(t
+ 1);
518 static const struct btf_kind_operations
*btf_type_ops(const struct btf_type
*t
)
520 return kind_ops
[BTF_INFO_KIND(t
->info
)];
523 static bool btf_name_offset_valid(const struct btf
*btf
, u32 offset
)
525 return BTF_STR_OFFSET_VALID(offset
) &&
526 offset
< btf
->hdr
.str_len
;
529 static bool __btf_name_char_ok(char c
, bool first
, bool dot_ok
)
531 if ((first
? !isalpha(c
) :
534 ((c
== '.' && !dot_ok
) ||
540 static bool __btf_name_valid(const struct btf
*btf
, u32 offset
, bool dot_ok
)
542 /* offset must be valid */
543 const char *src
= &btf
->strings
[offset
];
544 const char *src_limit
;
546 if (!__btf_name_char_ok(*src
, true, dot_ok
))
549 /* set a limit on identifier length */
550 src_limit
= src
+ KSYM_NAME_LEN
;
552 while (*src
&& src
< src_limit
) {
553 if (!__btf_name_char_ok(*src
, false, dot_ok
))
561 /* Only C-style identifier is permitted. This can be relaxed if
564 static bool btf_name_valid_identifier(const struct btf
*btf
, u32 offset
)
566 return __btf_name_valid(btf
, offset
, false);
569 static bool btf_name_valid_section(const struct btf
*btf
, u32 offset
)
571 return __btf_name_valid(btf
, offset
, true);
574 static const char *__btf_name_by_offset(const struct btf
*btf
, u32 offset
)
578 else if (offset
< btf
->hdr
.str_len
)
579 return &btf
->strings
[offset
];
581 return "(invalid-name-offset)";
584 const char *btf_name_by_offset(const struct btf
*btf
, u32 offset
)
586 if (offset
< btf
->hdr
.str_len
)
587 return &btf
->strings
[offset
];
592 const struct btf_type
*btf_type_by_id(const struct btf
*btf
, u32 type_id
)
594 if (type_id
> btf
->nr_types
)
597 return btf
->types
[type_id
];
601 * Regular int is not a bit field and it must be either
602 * u8/u16/u32/u64 or __int128.
604 static bool btf_type_int_is_regular(const struct btf_type
*t
)
606 u8 nr_bits
, nr_bytes
;
609 int_data
= btf_type_int(t
);
610 nr_bits
= BTF_INT_BITS(int_data
);
611 nr_bytes
= BITS_ROUNDUP_BYTES(nr_bits
);
612 if (BITS_PER_BYTE_MASKED(nr_bits
) ||
613 BTF_INT_OFFSET(int_data
) ||
614 (nr_bytes
!= sizeof(u8
) && nr_bytes
!= sizeof(u16
) &&
615 nr_bytes
!= sizeof(u32
) && nr_bytes
!= sizeof(u64
) &&
616 nr_bytes
!= (2 * sizeof(u64
)))) {
624 * Check that given struct member is a regular int with expected
627 bool btf_member_is_reg_int(const struct btf
*btf
, const struct btf_type
*s
,
628 const struct btf_member
*m
,
629 u32 expected_offset
, u32 expected_size
)
631 const struct btf_type
*t
;
636 t
= btf_type_id_size(btf
, &id
, NULL
);
637 if (!t
|| !btf_type_is_int(t
))
640 int_data
= btf_type_int(t
);
641 nr_bits
= BTF_INT_BITS(int_data
);
642 if (btf_type_kflag(s
)) {
643 u32 bitfield_size
= BTF_MEMBER_BITFIELD_SIZE(m
->offset
);
644 u32 bit_offset
= BTF_MEMBER_BIT_OFFSET(m
->offset
);
646 /* if kflag set, int should be a regular int and
647 * bit offset should be at byte boundary.
649 return !bitfield_size
&&
650 BITS_ROUNDUP_BYTES(bit_offset
) == expected_offset
&&
651 BITS_ROUNDUP_BYTES(nr_bits
) == expected_size
;
654 if (BTF_INT_OFFSET(int_data
) ||
655 BITS_PER_BYTE_MASKED(m
->offset
) ||
656 BITS_ROUNDUP_BYTES(m
->offset
) != expected_offset
||
657 BITS_PER_BYTE_MASKED(nr_bits
) ||
658 BITS_ROUNDUP_BYTES(nr_bits
) != expected_size
)
664 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log
*log
,
665 const char *fmt
, ...)
670 bpf_verifier_vlog(log
, fmt
, args
);
674 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env
*env
,
675 const char *fmt
, ...)
677 struct bpf_verifier_log
*log
= &env
->log
;
680 if (!bpf_verifier_log_needed(log
))
684 bpf_verifier_vlog(log
, fmt
, args
);
688 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env
*env
,
689 const struct btf_type
*t
,
691 const char *fmt
, ...)
693 struct bpf_verifier_log
*log
= &env
->log
;
694 u8 kind
= BTF_INFO_KIND(t
->info
);
695 struct btf
*btf
= env
->btf
;
698 if (!bpf_verifier_log_needed(log
))
701 /* btf verifier prints all types it is processing via
702 * btf_verifier_log_type(..., fmt = NULL).
703 * Skip those prints for in-kernel BTF verification.
705 if (log
->level
== BPF_LOG_KERNEL
&& !fmt
)
708 __btf_verifier_log(log
, "[%u] %s %s%s",
711 __btf_name_by_offset(btf
, t
->name_off
),
712 log_details
? " " : "");
715 btf_type_ops(t
)->log_details(env
, t
);
718 __btf_verifier_log(log
, " ");
720 bpf_verifier_vlog(log
, fmt
, args
);
724 __btf_verifier_log(log
, "\n");
727 #define btf_verifier_log_type(env, t, ...) \
728 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
729 #define btf_verifier_log_basic(env, t, ...) \
730 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
733 static void btf_verifier_log_member(struct btf_verifier_env
*env
,
734 const struct btf_type
*struct_type
,
735 const struct btf_member
*member
,
736 const char *fmt
, ...)
738 struct bpf_verifier_log
*log
= &env
->log
;
739 struct btf
*btf
= env
->btf
;
742 if (!bpf_verifier_log_needed(log
))
745 if (log
->level
== BPF_LOG_KERNEL
&& !fmt
)
747 /* The CHECK_META phase already did a btf dump.
749 * If member is logged again, it must hit an error in
750 * parsing this member. It is useful to print out which
751 * struct this member belongs to.
753 if (env
->phase
!= CHECK_META
)
754 btf_verifier_log_type(env
, struct_type
, NULL
);
756 if (btf_type_kflag(struct_type
))
757 __btf_verifier_log(log
,
758 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
759 __btf_name_by_offset(btf
, member
->name_off
),
761 BTF_MEMBER_BITFIELD_SIZE(member
->offset
),
762 BTF_MEMBER_BIT_OFFSET(member
->offset
));
764 __btf_verifier_log(log
, "\t%s type_id=%u bits_offset=%u",
765 __btf_name_by_offset(btf
, member
->name_off
),
766 member
->type
, member
->offset
);
769 __btf_verifier_log(log
, " ");
771 bpf_verifier_vlog(log
, fmt
, args
);
775 __btf_verifier_log(log
, "\n");
779 static void btf_verifier_log_vsi(struct btf_verifier_env
*env
,
780 const struct btf_type
*datasec_type
,
781 const struct btf_var_secinfo
*vsi
,
782 const char *fmt
, ...)
784 struct bpf_verifier_log
*log
= &env
->log
;
787 if (!bpf_verifier_log_needed(log
))
789 if (log
->level
== BPF_LOG_KERNEL
&& !fmt
)
791 if (env
->phase
!= CHECK_META
)
792 btf_verifier_log_type(env
, datasec_type
, NULL
);
794 __btf_verifier_log(log
, "\t type_id=%u offset=%u size=%u",
795 vsi
->type
, vsi
->offset
, vsi
->size
);
797 __btf_verifier_log(log
, " ");
799 bpf_verifier_vlog(log
, fmt
, args
);
803 __btf_verifier_log(log
, "\n");
806 static void btf_verifier_log_hdr(struct btf_verifier_env
*env
,
809 struct bpf_verifier_log
*log
= &env
->log
;
810 const struct btf
*btf
= env
->btf
;
811 const struct btf_header
*hdr
;
813 if (!bpf_verifier_log_needed(log
))
816 if (log
->level
== BPF_LOG_KERNEL
)
819 __btf_verifier_log(log
, "magic: 0x%x\n", hdr
->magic
);
820 __btf_verifier_log(log
, "version: %u\n", hdr
->version
);
821 __btf_verifier_log(log
, "flags: 0x%x\n", hdr
->flags
);
822 __btf_verifier_log(log
, "hdr_len: %u\n", hdr
->hdr_len
);
823 __btf_verifier_log(log
, "type_off: %u\n", hdr
->type_off
);
824 __btf_verifier_log(log
, "type_len: %u\n", hdr
->type_len
);
825 __btf_verifier_log(log
, "str_off: %u\n", hdr
->str_off
);
826 __btf_verifier_log(log
, "str_len: %u\n", hdr
->str_len
);
827 __btf_verifier_log(log
, "btf_total_size: %u\n", btf_data_size
);
830 static int btf_add_type(struct btf_verifier_env
*env
, struct btf_type
*t
)
832 struct btf
*btf
= env
->btf
;
834 /* < 2 because +1 for btf_void which is always in btf->types[0].
835 * btf_void is not accounted in btf->nr_types because btf_void
836 * does not come from the BTF file.
838 if (btf
->types_size
- btf
->nr_types
< 2) {
839 /* Expand 'types' array */
841 struct btf_type
**new_types
;
842 u32 expand_by
, new_size
;
844 if (btf
->types_size
== BTF_MAX_TYPE
) {
845 btf_verifier_log(env
, "Exceeded max num of types");
849 expand_by
= max_t(u32
, btf
->types_size
>> 2, 16);
850 new_size
= min_t(u32
, BTF_MAX_TYPE
,
851 btf
->types_size
+ expand_by
);
853 new_types
= kvcalloc(new_size
, sizeof(*new_types
),
854 GFP_KERNEL
| __GFP_NOWARN
);
858 if (btf
->nr_types
== 0)
859 new_types
[0] = &btf_void
;
861 memcpy(new_types
, btf
->types
,
862 sizeof(*btf
->types
) * (btf
->nr_types
+ 1));
865 btf
->types
= new_types
;
866 btf
->types_size
= new_size
;
869 btf
->types
[++(btf
->nr_types
)] = t
;
874 static int btf_alloc_id(struct btf
*btf
)
878 idr_preload(GFP_KERNEL
);
879 spin_lock_bh(&btf_idr_lock
);
880 id
= idr_alloc_cyclic(&btf_idr
, btf
, 1, INT_MAX
, GFP_ATOMIC
);
883 spin_unlock_bh(&btf_idr_lock
);
886 if (WARN_ON_ONCE(!id
))
889 return id
> 0 ? 0 : id
;
892 static void btf_free_id(struct btf
*btf
)
897 * In map-in-map, calling map_delete_elem() on outer
898 * map will call bpf_map_put on the inner map.
899 * It will then eventually call btf_free_id()
900 * on the inner map. Some of the map_delete_elem()
901 * implementation may have irq disabled, so
902 * we need to use the _irqsave() version instead
903 * of the _bh() version.
905 spin_lock_irqsave(&btf_idr_lock
, flags
);
906 idr_remove(&btf_idr
, btf
->id
);
907 spin_unlock_irqrestore(&btf_idr_lock
, flags
);
910 static void btf_free(struct btf
*btf
)
913 kvfree(btf
->resolved_sizes
);
914 kvfree(btf
->resolved_ids
);
919 static void btf_free_rcu(struct rcu_head
*rcu
)
921 struct btf
*btf
= container_of(rcu
, struct btf
, rcu
);
926 void btf_put(struct btf
*btf
)
928 if (btf
&& refcount_dec_and_test(&btf
->refcnt
)) {
930 call_rcu(&btf
->rcu
, btf_free_rcu
);
934 static int env_resolve_init(struct btf_verifier_env
*env
)
936 struct btf
*btf
= env
->btf
;
937 u32 nr_types
= btf
->nr_types
;
938 u32
*resolved_sizes
= NULL
;
939 u32
*resolved_ids
= NULL
;
940 u8
*visit_states
= NULL
;
942 /* +1 for btf_void */
943 resolved_sizes
= kvcalloc(nr_types
+ 1, sizeof(*resolved_sizes
),
944 GFP_KERNEL
| __GFP_NOWARN
);
948 resolved_ids
= kvcalloc(nr_types
+ 1, sizeof(*resolved_ids
),
949 GFP_KERNEL
| __GFP_NOWARN
);
953 visit_states
= kvcalloc(nr_types
+ 1, sizeof(*visit_states
),
954 GFP_KERNEL
| __GFP_NOWARN
);
958 btf
->resolved_sizes
= resolved_sizes
;
959 btf
->resolved_ids
= resolved_ids
;
960 env
->visit_states
= visit_states
;
965 kvfree(resolved_sizes
);
966 kvfree(resolved_ids
);
967 kvfree(visit_states
);
971 static void btf_verifier_env_free(struct btf_verifier_env
*env
)
973 kvfree(env
->visit_states
);
977 static bool env_type_is_resolve_sink(const struct btf_verifier_env
*env
,
978 const struct btf_type
*next_type
)
980 switch (env
->resolve_mode
) {
982 /* int, enum or void is a sink */
983 return !btf_type_needs_resolve(next_type
);
985 /* int, enum, void, struct, array, func or func_proto is a sink
988 return !btf_type_is_modifier(next_type
) &&
989 !btf_type_is_ptr(next_type
);
990 case RESOLVE_STRUCT_OR_ARRAY
:
991 /* int, enum, void, ptr, func or func_proto is a sink
992 * for struct and array
994 return !btf_type_is_modifier(next_type
) &&
995 !btf_type_is_array(next_type
) &&
996 !btf_type_is_struct(next_type
);
1002 static bool env_type_is_resolved(const struct btf_verifier_env
*env
,
1005 return env
->visit_states
[type_id
] == RESOLVED
;
1008 static int env_stack_push(struct btf_verifier_env
*env
,
1009 const struct btf_type
*t
, u32 type_id
)
1011 struct resolve_vertex
*v
;
1013 if (env
->top_stack
== MAX_RESOLVE_DEPTH
)
1016 if (env
->visit_states
[type_id
] != NOT_VISITED
)
1019 env
->visit_states
[type_id
] = VISITED
;
1021 v
= &env
->stack
[env
->top_stack
++];
1023 v
->type_id
= type_id
;
1026 if (env
->resolve_mode
== RESOLVE_TBD
) {
1027 if (btf_type_is_ptr(t
))
1028 env
->resolve_mode
= RESOLVE_PTR
;
1029 else if (btf_type_is_struct(t
) || btf_type_is_array(t
))
1030 env
->resolve_mode
= RESOLVE_STRUCT_OR_ARRAY
;
1036 static void env_stack_set_next_member(struct btf_verifier_env
*env
,
1039 env
->stack
[env
->top_stack
- 1].next_member
= next_member
;
1042 static void env_stack_pop_resolved(struct btf_verifier_env
*env
,
1043 u32 resolved_type_id
,
1046 u32 type_id
= env
->stack
[--(env
->top_stack
)].type_id
;
1047 struct btf
*btf
= env
->btf
;
1049 btf
->resolved_sizes
[type_id
] = resolved_size
;
1050 btf
->resolved_ids
[type_id
] = resolved_type_id
;
1051 env
->visit_states
[type_id
] = RESOLVED
;
1054 static const struct resolve_vertex
*env_stack_peak(struct btf_verifier_env
*env
)
1056 return env
->top_stack
? &env
->stack
[env
->top_stack
- 1] : NULL
;
1059 /* The input param "type_id" must point to a needs_resolve type */
1060 static const struct btf_type
*btf_type_id_resolve(const struct btf
*btf
,
1063 *type_id
= btf
->resolved_ids
[*type_id
];
1064 return btf_type_by_id(btf
, *type_id
);
1067 const struct btf_type
*btf_type_id_size(const struct btf
*btf
,
1068 u32
*type_id
, u32
*ret_size
)
1070 const struct btf_type
*size_type
;
1071 u32 size_type_id
= *type_id
;
1074 size_type
= btf_type_by_id(btf
, size_type_id
);
1075 if (btf_type_nosize_or_null(size_type
))
1078 if (btf_type_has_size(size_type
)) {
1079 size
= size_type
->size
;
1080 } else if (btf_type_is_array(size_type
)) {
1081 size
= btf
->resolved_sizes
[size_type_id
];
1082 } else if (btf_type_is_ptr(size_type
)) {
1083 size
= sizeof(void *);
1085 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type
) &&
1086 !btf_type_is_var(size_type
)))
1089 size_type_id
= btf
->resolved_ids
[size_type_id
];
1090 size_type
= btf_type_by_id(btf
, size_type_id
);
1091 if (btf_type_nosize_or_null(size_type
))
1093 else if (btf_type_has_size(size_type
))
1094 size
= size_type
->size
;
1095 else if (btf_type_is_array(size_type
))
1096 size
= btf
->resolved_sizes
[size_type_id
];
1097 else if (btf_type_is_ptr(size_type
))
1098 size
= sizeof(void *);
1103 *type_id
= size_type_id
;
1110 static int btf_df_check_member(struct btf_verifier_env
*env
,
1111 const struct btf_type
*struct_type
,
1112 const struct btf_member
*member
,
1113 const struct btf_type
*member_type
)
1115 btf_verifier_log_basic(env
, struct_type
,
1116 "Unsupported check_member");
1120 static int btf_df_check_kflag_member(struct btf_verifier_env
*env
,
1121 const struct btf_type
*struct_type
,
1122 const struct btf_member
*member
,
1123 const struct btf_type
*member_type
)
1125 btf_verifier_log_basic(env
, struct_type
,
1126 "Unsupported check_kflag_member");
1130 /* Used for ptr, array and struct/union type members.
1131 * int, enum and modifier types have their specific callback functions.
1133 static int btf_generic_check_kflag_member(struct btf_verifier_env
*env
,
1134 const struct btf_type
*struct_type
,
1135 const struct btf_member
*member
,
1136 const struct btf_type
*member_type
)
1138 if (BTF_MEMBER_BITFIELD_SIZE(member
->offset
)) {
1139 btf_verifier_log_member(env
, struct_type
, member
,
1140 "Invalid member bitfield_size");
1144 /* bitfield size is 0, so member->offset represents bit offset only.
1145 * It is safe to call non kflag check_member variants.
1147 return btf_type_ops(member_type
)->check_member(env
, struct_type
,
1152 static int btf_df_resolve(struct btf_verifier_env
*env
,
1153 const struct resolve_vertex
*v
)
1155 btf_verifier_log_basic(env
, v
->t
, "Unsupported resolve");
1159 static void btf_df_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
1160 u32 type_id
, void *data
, u8 bits_offsets
,
1163 seq_printf(m
, "<unsupported kind:%u>", BTF_INFO_KIND(t
->info
));
1166 static int btf_int_check_member(struct btf_verifier_env
*env
,
1167 const struct btf_type
*struct_type
,
1168 const struct btf_member
*member
,
1169 const struct btf_type
*member_type
)
1171 u32 int_data
= btf_type_int(member_type
);
1172 u32 struct_bits_off
= member
->offset
;
1173 u32 struct_size
= struct_type
->size
;
1177 if (U32_MAX
- struct_bits_off
< BTF_INT_OFFSET(int_data
)) {
1178 btf_verifier_log_member(env
, struct_type
, member
,
1179 "bits_offset exceeds U32_MAX");
1183 struct_bits_off
+= BTF_INT_OFFSET(int_data
);
1184 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
1185 nr_copy_bits
= BTF_INT_BITS(int_data
) +
1186 BITS_PER_BYTE_MASKED(struct_bits_off
);
1188 if (nr_copy_bits
> BITS_PER_U128
) {
1189 btf_verifier_log_member(env
, struct_type
, member
,
1190 "nr_copy_bits exceeds 128");
1194 if (struct_size
< bytes_offset
||
1195 struct_size
- bytes_offset
< BITS_ROUNDUP_BYTES(nr_copy_bits
)) {
1196 btf_verifier_log_member(env
, struct_type
, member
,
1197 "Member exceeds struct_size");
1204 static int btf_int_check_kflag_member(struct btf_verifier_env
*env
,
1205 const struct btf_type
*struct_type
,
1206 const struct btf_member
*member
,
1207 const struct btf_type
*member_type
)
1209 u32 struct_bits_off
, nr_bits
, nr_int_data_bits
, bytes_offset
;
1210 u32 int_data
= btf_type_int(member_type
);
1211 u32 struct_size
= struct_type
->size
;
1214 /* a regular int type is required for the kflag int member */
1215 if (!btf_type_int_is_regular(member_type
)) {
1216 btf_verifier_log_member(env
, struct_type
, member
,
1217 "Invalid member base type");
1221 /* check sanity of bitfield size */
1222 nr_bits
= BTF_MEMBER_BITFIELD_SIZE(member
->offset
);
1223 struct_bits_off
= BTF_MEMBER_BIT_OFFSET(member
->offset
);
1224 nr_int_data_bits
= BTF_INT_BITS(int_data
);
1226 /* Not a bitfield member, member offset must be at byte
1229 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
1230 btf_verifier_log_member(env
, struct_type
, member
,
1231 "Invalid member offset");
1235 nr_bits
= nr_int_data_bits
;
1236 } else if (nr_bits
> nr_int_data_bits
) {
1237 btf_verifier_log_member(env
, struct_type
, member
,
1238 "Invalid member bitfield_size");
1242 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
1243 nr_copy_bits
= nr_bits
+ BITS_PER_BYTE_MASKED(struct_bits_off
);
1244 if (nr_copy_bits
> BITS_PER_U128
) {
1245 btf_verifier_log_member(env
, struct_type
, member
,
1246 "nr_copy_bits exceeds 128");
1250 if (struct_size
< bytes_offset
||
1251 struct_size
- bytes_offset
< BITS_ROUNDUP_BYTES(nr_copy_bits
)) {
1252 btf_verifier_log_member(env
, struct_type
, member
,
1253 "Member exceeds struct_size");
1260 static s32
btf_int_check_meta(struct btf_verifier_env
*env
,
1261 const struct btf_type
*t
,
1264 u32 int_data
, nr_bits
, meta_needed
= sizeof(int_data
);
1267 if (meta_left
< meta_needed
) {
1268 btf_verifier_log_basic(env
, t
,
1269 "meta_left:%u meta_needed:%u",
1270 meta_left
, meta_needed
);
1274 if (btf_type_vlen(t
)) {
1275 btf_verifier_log_type(env
, t
, "vlen != 0");
1279 if (btf_type_kflag(t
)) {
1280 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
1284 int_data
= btf_type_int(t
);
1285 if (int_data
& ~BTF_INT_MASK
) {
1286 btf_verifier_log_basic(env
, t
, "Invalid int_data:%x",
1291 nr_bits
= BTF_INT_BITS(int_data
) + BTF_INT_OFFSET(int_data
);
1293 if (nr_bits
> BITS_PER_U128
) {
1294 btf_verifier_log_type(env
, t
, "nr_bits exceeds %zu",
1299 if (BITS_ROUNDUP_BYTES(nr_bits
) > t
->size
) {
1300 btf_verifier_log_type(env
, t
, "nr_bits exceeds type_size");
1305 * Only one of the encoding bits is allowed and it
1306 * should be sufficient for the pretty print purpose (i.e. decoding).
1307 * Multiple bits can be allowed later if it is found
1308 * to be insufficient.
1310 encoding
= BTF_INT_ENCODING(int_data
);
1312 encoding
!= BTF_INT_SIGNED
&&
1313 encoding
!= BTF_INT_CHAR
&&
1314 encoding
!= BTF_INT_BOOL
) {
1315 btf_verifier_log_type(env
, t
, "Unsupported encoding");
1319 btf_verifier_log_type(env
, t
, NULL
);
1324 static void btf_int_log(struct btf_verifier_env
*env
,
1325 const struct btf_type
*t
)
1327 int int_data
= btf_type_int(t
);
1329 btf_verifier_log(env
,
1330 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1331 t
->size
, BTF_INT_OFFSET(int_data
),
1332 BTF_INT_BITS(int_data
),
1333 btf_int_encoding_str(BTF_INT_ENCODING(int_data
)));
1336 static void btf_int128_print(struct seq_file
*m
, void *data
)
1338 /* data points to a __int128 number.
1340 * int128_num = *(__int128 *)data;
1341 * The below formulas shows what upper_num and lower_num represents:
1342 * upper_num = int128_num >> 64;
1343 * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
1345 u64 upper_num
, lower_num
;
1347 #ifdef __BIG_ENDIAN_BITFIELD
1348 upper_num
= *(u64
*)data
;
1349 lower_num
= *(u64
*)(data
+ 8);
1351 upper_num
= *(u64
*)(data
+ 8);
1352 lower_num
= *(u64
*)data
;
1355 seq_printf(m
, "0x%llx", lower_num
);
1357 seq_printf(m
, "0x%llx%016llx", upper_num
, lower_num
);
1360 static void btf_int128_shift(u64
*print_num
, u16 left_shift_bits
,
1361 u16 right_shift_bits
)
1363 u64 upper_num
, lower_num
;
1365 #ifdef __BIG_ENDIAN_BITFIELD
1366 upper_num
= print_num
[0];
1367 lower_num
= print_num
[1];
1369 upper_num
= print_num
[1];
1370 lower_num
= print_num
[0];
1373 /* shake out un-needed bits by shift/or operations */
1374 if (left_shift_bits
>= 64) {
1375 upper_num
= lower_num
<< (left_shift_bits
- 64);
1378 upper_num
= (upper_num
<< left_shift_bits
) |
1379 (lower_num
>> (64 - left_shift_bits
));
1380 lower_num
= lower_num
<< left_shift_bits
;
1383 if (right_shift_bits
>= 64) {
1384 lower_num
= upper_num
>> (right_shift_bits
- 64);
1387 lower_num
= (lower_num
>> right_shift_bits
) |
1388 (upper_num
<< (64 - right_shift_bits
));
1389 upper_num
= upper_num
>> right_shift_bits
;
1392 #ifdef __BIG_ENDIAN_BITFIELD
1393 print_num
[0] = upper_num
;
1394 print_num
[1] = lower_num
;
1396 print_num
[0] = lower_num
;
1397 print_num
[1] = upper_num
;
1401 static void btf_bitfield_seq_show(void *data
, u8 bits_offset
,
1402 u8 nr_bits
, struct seq_file
*m
)
1404 u16 left_shift_bits
, right_shift_bits
;
1407 u64 print_num
[2] = {};
1409 nr_copy_bits
= nr_bits
+ bits_offset
;
1410 nr_copy_bytes
= BITS_ROUNDUP_BYTES(nr_copy_bits
);
1412 memcpy(print_num
, data
, nr_copy_bytes
);
1414 #ifdef __BIG_ENDIAN_BITFIELD
1415 left_shift_bits
= bits_offset
;
1417 left_shift_bits
= BITS_PER_U128
- nr_copy_bits
;
1419 right_shift_bits
= BITS_PER_U128
- nr_bits
;
1421 btf_int128_shift(print_num
, left_shift_bits
, right_shift_bits
);
1422 btf_int128_print(m
, print_num
);
1426 static void btf_int_bits_seq_show(const struct btf
*btf
,
1427 const struct btf_type
*t
,
1428 void *data
, u8 bits_offset
,
1431 u32 int_data
= btf_type_int(t
);
1432 u8 nr_bits
= BTF_INT_BITS(int_data
);
1433 u8 total_bits_offset
;
1436 * bits_offset is at most 7.
1437 * BTF_INT_OFFSET() cannot exceed 128 bits.
1439 total_bits_offset
= bits_offset
+ BTF_INT_OFFSET(int_data
);
1440 data
+= BITS_ROUNDDOWN_BYTES(total_bits_offset
);
1441 bits_offset
= BITS_PER_BYTE_MASKED(total_bits_offset
);
1442 btf_bitfield_seq_show(data
, bits_offset
, nr_bits
, m
);
1445 static void btf_int_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
1446 u32 type_id
, void *data
, u8 bits_offset
,
1449 u32 int_data
= btf_type_int(t
);
1450 u8 encoding
= BTF_INT_ENCODING(int_data
);
1451 bool sign
= encoding
& BTF_INT_SIGNED
;
1452 u8 nr_bits
= BTF_INT_BITS(int_data
);
1454 if (bits_offset
|| BTF_INT_OFFSET(int_data
) ||
1455 BITS_PER_BYTE_MASKED(nr_bits
)) {
1456 btf_int_bits_seq_show(btf
, t
, data
, bits_offset
, m
);
1462 btf_int128_print(m
, data
);
1466 seq_printf(m
, "%lld", *(s64
*)data
);
1468 seq_printf(m
, "%llu", *(u64
*)data
);
1472 seq_printf(m
, "%d", *(s32
*)data
);
1474 seq_printf(m
, "%u", *(u32
*)data
);
1478 seq_printf(m
, "%d", *(s16
*)data
);
1480 seq_printf(m
, "%u", *(u16
*)data
);
1484 seq_printf(m
, "%d", *(s8
*)data
);
1486 seq_printf(m
, "%u", *(u8
*)data
);
1489 btf_int_bits_seq_show(btf
, t
, data
, bits_offset
, m
);
1493 static const struct btf_kind_operations int_ops
= {
1494 .check_meta
= btf_int_check_meta
,
1495 .resolve
= btf_df_resolve
,
1496 .check_member
= btf_int_check_member
,
1497 .check_kflag_member
= btf_int_check_kflag_member
,
1498 .log_details
= btf_int_log
,
1499 .seq_show
= btf_int_seq_show
,
1502 static int btf_modifier_check_member(struct btf_verifier_env
*env
,
1503 const struct btf_type
*struct_type
,
1504 const struct btf_member
*member
,
1505 const struct btf_type
*member_type
)
1507 const struct btf_type
*resolved_type
;
1508 u32 resolved_type_id
= member
->type
;
1509 struct btf_member resolved_member
;
1510 struct btf
*btf
= env
->btf
;
1512 resolved_type
= btf_type_id_size(btf
, &resolved_type_id
, NULL
);
1513 if (!resolved_type
) {
1514 btf_verifier_log_member(env
, struct_type
, member
,
1519 resolved_member
= *member
;
1520 resolved_member
.type
= resolved_type_id
;
1522 return btf_type_ops(resolved_type
)->check_member(env
, struct_type
,
1527 static int btf_modifier_check_kflag_member(struct btf_verifier_env
*env
,
1528 const struct btf_type
*struct_type
,
1529 const struct btf_member
*member
,
1530 const struct btf_type
*member_type
)
1532 const struct btf_type
*resolved_type
;
1533 u32 resolved_type_id
= member
->type
;
1534 struct btf_member resolved_member
;
1535 struct btf
*btf
= env
->btf
;
1537 resolved_type
= btf_type_id_size(btf
, &resolved_type_id
, NULL
);
1538 if (!resolved_type
) {
1539 btf_verifier_log_member(env
, struct_type
, member
,
1544 resolved_member
= *member
;
1545 resolved_member
.type
= resolved_type_id
;
1547 return btf_type_ops(resolved_type
)->check_kflag_member(env
, struct_type
,
1552 static int btf_ptr_check_member(struct btf_verifier_env
*env
,
1553 const struct btf_type
*struct_type
,
1554 const struct btf_member
*member
,
1555 const struct btf_type
*member_type
)
1557 u32 struct_size
, struct_bits_off
, bytes_offset
;
1559 struct_size
= struct_type
->size
;
1560 struct_bits_off
= member
->offset
;
1561 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
1563 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
1564 btf_verifier_log_member(env
, struct_type
, member
,
1565 "Member is not byte aligned");
1569 if (struct_size
- bytes_offset
< sizeof(void *)) {
1570 btf_verifier_log_member(env
, struct_type
, member
,
1571 "Member exceeds struct_size");
1578 static int btf_ref_type_check_meta(struct btf_verifier_env
*env
,
1579 const struct btf_type
*t
,
1582 if (btf_type_vlen(t
)) {
1583 btf_verifier_log_type(env
, t
, "vlen != 0");
1587 if (btf_type_kflag(t
)) {
1588 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
1592 if (!BTF_TYPE_ID_VALID(t
->type
)) {
1593 btf_verifier_log_type(env
, t
, "Invalid type_id");
1597 /* typedef type must have a valid name, and other ref types,
1598 * volatile, const, restrict, should have a null name.
1600 if (BTF_INFO_KIND(t
->info
) == BTF_KIND_TYPEDEF
) {
1602 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
1603 btf_verifier_log_type(env
, t
, "Invalid name");
1608 btf_verifier_log_type(env
, t
, "Invalid name");
1613 btf_verifier_log_type(env
, t
, NULL
);
1618 static int btf_modifier_resolve(struct btf_verifier_env
*env
,
1619 const struct resolve_vertex
*v
)
1621 const struct btf_type
*t
= v
->t
;
1622 const struct btf_type
*next_type
;
1623 u32 next_type_id
= t
->type
;
1624 struct btf
*btf
= env
->btf
;
1626 next_type
= btf_type_by_id(btf
, next_type_id
);
1627 if (!next_type
|| btf_type_is_resolve_source_only(next_type
)) {
1628 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1632 if (!env_type_is_resolve_sink(env
, next_type
) &&
1633 !env_type_is_resolved(env
, next_type_id
))
1634 return env_stack_push(env
, next_type
, next_type_id
);
1636 /* Figure out the resolved next_type_id with size.
1637 * They will be stored in the current modifier's
1638 * resolved_ids and resolved_sizes such that it can
1639 * save us a few type-following when we use it later (e.g. in
1642 if (!btf_type_id_size(btf
, &next_type_id
, NULL
)) {
1643 if (env_type_is_resolved(env
, next_type_id
))
1644 next_type
= btf_type_id_resolve(btf
, &next_type_id
);
1646 /* "typedef void new_void", "const void"...etc */
1647 if (!btf_type_is_void(next_type
) &&
1648 !btf_type_is_fwd(next_type
) &&
1649 !btf_type_is_func_proto(next_type
)) {
1650 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1655 env_stack_pop_resolved(env
, next_type_id
, 0);
1660 static int btf_var_resolve(struct btf_verifier_env
*env
,
1661 const struct resolve_vertex
*v
)
1663 const struct btf_type
*next_type
;
1664 const struct btf_type
*t
= v
->t
;
1665 u32 next_type_id
= t
->type
;
1666 struct btf
*btf
= env
->btf
;
1668 next_type
= btf_type_by_id(btf
, next_type_id
);
1669 if (!next_type
|| btf_type_is_resolve_source_only(next_type
)) {
1670 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1674 if (!env_type_is_resolve_sink(env
, next_type
) &&
1675 !env_type_is_resolved(env
, next_type_id
))
1676 return env_stack_push(env
, next_type
, next_type_id
);
1678 if (btf_type_is_modifier(next_type
)) {
1679 const struct btf_type
*resolved_type
;
1680 u32 resolved_type_id
;
1682 resolved_type_id
= next_type_id
;
1683 resolved_type
= btf_type_id_resolve(btf
, &resolved_type_id
);
1685 if (btf_type_is_ptr(resolved_type
) &&
1686 !env_type_is_resolve_sink(env
, resolved_type
) &&
1687 !env_type_is_resolved(env
, resolved_type_id
))
1688 return env_stack_push(env
, resolved_type
,
1692 /* We must resolve to something concrete at this point, no
1693 * forward types or similar that would resolve to size of
1696 if (!btf_type_id_size(btf
, &next_type_id
, NULL
)) {
1697 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1701 env_stack_pop_resolved(env
, next_type_id
, 0);
1706 static int btf_ptr_resolve(struct btf_verifier_env
*env
,
1707 const struct resolve_vertex
*v
)
1709 const struct btf_type
*next_type
;
1710 const struct btf_type
*t
= v
->t
;
1711 u32 next_type_id
= t
->type
;
1712 struct btf
*btf
= env
->btf
;
1714 next_type
= btf_type_by_id(btf
, next_type_id
);
1715 if (!next_type
|| btf_type_is_resolve_source_only(next_type
)) {
1716 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1720 if (!env_type_is_resolve_sink(env
, next_type
) &&
1721 !env_type_is_resolved(env
, next_type_id
))
1722 return env_stack_push(env
, next_type
, next_type_id
);
1724 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1725 * the modifier may have stopped resolving when it was resolved
1726 * to a ptr (last-resolved-ptr).
1728 * We now need to continue from the last-resolved-ptr to
1729 * ensure the last-resolved-ptr will not referring back to
1730 * the currenct ptr (t).
1732 if (btf_type_is_modifier(next_type
)) {
1733 const struct btf_type
*resolved_type
;
1734 u32 resolved_type_id
;
1736 resolved_type_id
= next_type_id
;
1737 resolved_type
= btf_type_id_resolve(btf
, &resolved_type_id
);
1739 if (btf_type_is_ptr(resolved_type
) &&
1740 !env_type_is_resolve_sink(env
, resolved_type
) &&
1741 !env_type_is_resolved(env
, resolved_type_id
))
1742 return env_stack_push(env
, resolved_type
,
1746 if (!btf_type_id_size(btf
, &next_type_id
, NULL
)) {
1747 if (env_type_is_resolved(env
, next_type_id
))
1748 next_type
= btf_type_id_resolve(btf
, &next_type_id
);
1750 if (!btf_type_is_void(next_type
) &&
1751 !btf_type_is_fwd(next_type
) &&
1752 !btf_type_is_func_proto(next_type
)) {
1753 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
1758 env_stack_pop_resolved(env
, next_type_id
, 0);
1763 static void btf_modifier_seq_show(const struct btf
*btf
,
1764 const struct btf_type
*t
,
1765 u32 type_id
, void *data
,
1766 u8 bits_offset
, struct seq_file
*m
)
1768 t
= btf_type_id_resolve(btf
, &type_id
);
1770 btf_type_ops(t
)->seq_show(btf
, t
, type_id
, data
, bits_offset
, m
);
1773 static void btf_var_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
1774 u32 type_id
, void *data
, u8 bits_offset
,
1777 t
= btf_type_id_resolve(btf
, &type_id
);
1779 btf_type_ops(t
)->seq_show(btf
, t
, type_id
, data
, bits_offset
, m
);
1782 static void btf_ptr_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
1783 u32 type_id
, void *data
, u8 bits_offset
,
1786 /* It is a hashed value */
1787 seq_printf(m
, "%p", *(void **)data
);
1790 static void btf_ref_type_log(struct btf_verifier_env
*env
,
1791 const struct btf_type
*t
)
1793 btf_verifier_log(env
, "type_id=%u", t
->type
);
1796 static struct btf_kind_operations modifier_ops
= {
1797 .check_meta
= btf_ref_type_check_meta
,
1798 .resolve
= btf_modifier_resolve
,
1799 .check_member
= btf_modifier_check_member
,
1800 .check_kflag_member
= btf_modifier_check_kflag_member
,
1801 .log_details
= btf_ref_type_log
,
1802 .seq_show
= btf_modifier_seq_show
,
1805 static struct btf_kind_operations ptr_ops
= {
1806 .check_meta
= btf_ref_type_check_meta
,
1807 .resolve
= btf_ptr_resolve
,
1808 .check_member
= btf_ptr_check_member
,
1809 .check_kflag_member
= btf_generic_check_kflag_member
,
1810 .log_details
= btf_ref_type_log
,
1811 .seq_show
= btf_ptr_seq_show
,
1814 static s32
btf_fwd_check_meta(struct btf_verifier_env
*env
,
1815 const struct btf_type
*t
,
1818 if (btf_type_vlen(t
)) {
1819 btf_verifier_log_type(env
, t
, "vlen != 0");
1824 btf_verifier_log_type(env
, t
, "type != 0");
1828 /* fwd type must have a valid name */
1830 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
1831 btf_verifier_log_type(env
, t
, "Invalid name");
1835 btf_verifier_log_type(env
, t
, NULL
);
1840 static void btf_fwd_type_log(struct btf_verifier_env
*env
,
1841 const struct btf_type
*t
)
1843 btf_verifier_log(env
, "%s", btf_type_kflag(t
) ? "union" : "struct");
1846 static struct btf_kind_operations fwd_ops
= {
1847 .check_meta
= btf_fwd_check_meta
,
1848 .resolve
= btf_df_resolve
,
1849 .check_member
= btf_df_check_member
,
1850 .check_kflag_member
= btf_df_check_kflag_member
,
1851 .log_details
= btf_fwd_type_log
,
1852 .seq_show
= btf_df_seq_show
,
1855 static int btf_array_check_member(struct btf_verifier_env
*env
,
1856 const struct btf_type
*struct_type
,
1857 const struct btf_member
*member
,
1858 const struct btf_type
*member_type
)
1860 u32 struct_bits_off
= member
->offset
;
1861 u32 struct_size
, bytes_offset
;
1862 u32 array_type_id
, array_size
;
1863 struct btf
*btf
= env
->btf
;
1865 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
1866 btf_verifier_log_member(env
, struct_type
, member
,
1867 "Member is not byte aligned");
1871 array_type_id
= member
->type
;
1872 btf_type_id_size(btf
, &array_type_id
, &array_size
);
1873 struct_size
= struct_type
->size
;
1874 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
1875 if (struct_size
- bytes_offset
< array_size
) {
1876 btf_verifier_log_member(env
, struct_type
, member
,
1877 "Member exceeds struct_size");
1884 static s32
btf_array_check_meta(struct btf_verifier_env
*env
,
1885 const struct btf_type
*t
,
1888 const struct btf_array
*array
= btf_type_array(t
);
1889 u32 meta_needed
= sizeof(*array
);
1891 if (meta_left
< meta_needed
) {
1892 btf_verifier_log_basic(env
, t
,
1893 "meta_left:%u meta_needed:%u",
1894 meta_left
, meta_needed
);
1898 /* array type should not have a name */
1900 btf_verifier_log_type(env
, t
, "Invalid name");
1904 if (btf_type_vlen(t
)) {
1905 btf_verifier_log_type(env
, t
, "vlen != 0");
1909 if (btf_type_kflag(t
)) {
1910 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
1915 btf_verifier_log_type(env
, t
, "size != 0");
1919 /* Array elem type and index type cannot be in type void,
1920 * so !array->type and !array->index_type are not allowed.
1922 if (!array
->type
|| !BTF_TYPE_ID_VALID(array
->type
)) {
1923 btf_verifier_log_type(env
, t
, "Invalid elem");
1927 if (!array
->index_type
|| !BTF_TYPE_ID_VALID(array
->index_type
)) {
1928 btf_verifier_log_type(env
, t
, "Invalid index");
1932 btf_verifier_log_type(env
, t
, NULL
);
1937 static int btf_array_resolve(struct btf_verifier_env
*env
,
1938 const struct resolve_vertex
*v
)
1940 const struct btf_array
*array
= btf_type_array(v
->t
);
1941 const struct btf_type
*elem_type
, *index_type
;
1942 u32 elem_type_id
, index_type_id
;
1943 struct btf
*btf
= env
->btf
;
1946 /* Check array->index_type */
1947 index_type_id
= array
->index_type
;
1948 index_type
= btf_type_by_id(btf
, index_type_id
);
1949 if (btf_type_nosize_or_null(index_type
) ||
1950 btf_type_is_resolve_source_only(index_type
)) {
1951 btf_verifier_log_type(env
, v
->t
, "Invalid index");
1955 if (!env_type_is_resolve_sink(env
, index_type
) &&
1956 !env_type_is_resolved(env
, index_type_id
))
1957 return env_stack_push(env
, index_type
, index_type_id
);
1959 index_type
= btf_type_id_size(btf
, &index_type_id
, NULL
);
1960 if (!index_type
|| !btf_type_is_int(index_type
) ||
1961 !btf_type_int_is_regular(index_type
)) {
1962 btf_verifier_log_type(env
, v
->t
, "Invalid index");
1966 /* Check array->type */
1967 elem_type_id
= array
->type
;
1968 elem_type
= btf_type_by_id(btf
, elem_type_id
);
1969 if (btf_type_nosize_or_null(elem_type
) ||
1970 btf_type_is_resolve_source_only(elem_type
)) {
1971 btf_verifier_log_type(env
, v
->t
,
1976 if (!env_type_is_resolve_sink(env
, elem_type
) &&
1977 !env_type_is_resolved(env
, elem_type_id
))
1978 return env_stack_push(env
, elem_type
, elem_type_id
);
1980 elem_type
= btf_type_id_size(btf
, &elem_type_id
, &elem_size
);
1982 btf_verifier_log_type(env
, v
->t
, "Invalid elem");
1986 if (btf_type_is_int(elem_type
) && !btf_type_int_is_regular(elem_type
)) {
1987 btf_verifier_log_type(env
, v
->t
, "Invalid array of int");
1991 if (array
->nelems
&& elem_size
> U32_MAX
/ array
->nelems
) {
1992 btf_verifier_log_type(env
, v
->t
,
1993 "Array size overflows U32_MAX");
1997 env_stack_pop_resolved(env
, elem_type_id
, elem_size
* array
->nelems
);
2002 static void btf_array_log(struct btf_verifier_env
*env
,
2003 const struct btf_type
*t
)
2005 const struct btf_array
*array
= btf_type_array(t
);
2007 btf_verifier_log(env
, "type_id=%u index_type_id=%u nr_elems=%u",
2008 array
->type
, array
->index_type
, array
->nelems
);
2011 static void btf_array_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
2012 u32 type_id
, void *data
, u8 bits_offset
,
2015 const struct btf_array
*array
= btf_type_array(t
);
2016 const struct btf_kind_operations
*elem_ops
;
2017 const struct btf_type
*elem_type
;
2018 u32 i
, elem_size
, elem_type_id
;
2020 elem_type_id
= array
->type
;
2021 elem_type
= btf_type_id_size(btf
, &elem_type_id
, &elem_size
);
2022 elem_ops
= btf_type_ops(elem_type
);
2024 for (i
= 0; i
< array
->nelems
; i
++) {
2028 elem_ops
->seq_show(btf
, elem_type
, elem_type_id
, data
,
2035 static struct btf_kind_operations array_ops
= {
2036 .check_meta
= btf_array_check_meta
,
2037 .resolve
= btf_array_resolve
,
2038 .check_member
= btf_array_check_member
,
2039 .check_kflag_member
= btf_generic_check_kflag_member
,
2040 .log_details
= btf_array_log
,
2041 .seq_show
= btf_array_seq_show
,
2044 static int btf_struct_check_member(struct btf_verifier_env
*env
,
2045 const struct btf_type
*struct_type
,
2046 const struct btf_member
*member
,
2047 const struct btf_type
*member_type
)
2049 u32 struct_bits_off
= member
->offset
;
2050 u32 struct_size
, bytes_offset
;
2052 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
2053 btf_verifier_log_member(env
, struct_type
, member
,
2054 "Member is not byte aligned");
2058 struct_size
= struct_type
->size
;
2059 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
2060 if (struct_size
- bytes_offset
< member_type
->size
) {
2061 btf_verifier_log_member(env
, struct_type
, member
,
2062 "Member exceeds struct_size");
2069 static s32
btf_struct_check_meta(struct btf_verifier_env
*env
,
2070 const struct btf_type
*t
,
2073 bool is_union
= BTF_INFO_KIND(t
->info
) == BTF_KIND_UNION
;
2074 const struct btf_member
*member
;
2075 u32 meta_needed
, last_offset
;
2076 struct btf
*btf
= env
->btf
;
2077 u32 struct_size
= t
->size
;
2081 meta_needed
= btf_type_vlen(t
) * sizeof(*member
);
2082 if (meta_left
< meta_needed
) {
2083 btf_verifier_log_basic(env
, t
,
2084 "meta_left:%u meta_needed:%u",
2085 meta_left
, meta_needed
);
2089 /* struct type either no name or a valid one */
2091 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
2092 btf_verifier_log_type(env
, t
, "Invalid name");
2096 btf_verifier_log_type(env
, t
, NULL
);
2099 for_each_member(i
, t
, member
) {
2100 if (!btf_name_offset_valid(btf
, member
->name_off
)) {
2101 btf_verifier_log_member(env
, t
, member
,
2102 "Invalid member name_offset:%u",
2107 /* struct member either no name or a valid one */
2108 if (member
->name_off
&&
2109 !btf_name_valid_identifier(btf
, member
->name_off
)) {
2110 btf_verifier_log_member(env
, t
, member
, "Invalid name");
2113 /* A member cannot be in type void */
2114 if (!member
->type
|| !BTF_TYPE_ID_VALID(member
->type
)) {
2115 btf_verifier_log_member(env
, t
, member
,
2120 offset
= btf_member_bit_offset(t
, member
);
2121 if (is_union
&& offset
) {
2122 btf_verifier_log_member(env
, t
, member
,
2123 "Invalid member bits_offset");
2128 * ">" instead of ">=" because the last member could be
2131 if (last_offset
> offset
) {
2132 btf_verifier_log_member(env
, t
, member
,
2133 "Invalid member bits_offset");
2137 if (BITS_ROUNDUP_BYTES(offset
) > struct_size
) {
2138 btf_verifier_log_member(env
, t
, member
,
2139 "Member bits_offset exceeds its struct size");
2143 btf_verifier_log_member(env
, t
, member
, NULL
);
2144 last_offset
= offset
;
2150 static int btf_struct_resolve(struct btf_verifier_env
*env
,
2151 const struct resolve_vertex
*v
)
2153 const struct btf_member
*member
;
2157 /* Before continue resolving the next_member,
2158 * ensure the last member is indeed resolved to a
2159 * type with size info.
2161 if (v
->next_member
) {
2162 const struct btf_type
*last_member_type
;
2163 const struct btf_member
*last_member
;
2164 u16 last_member_type_id
;
2166 last_member
= btf_type_member(v
->t
) + v
->next_member
- 1;
2167 last_member_type_id
= last_member
->type
;
2168 if (WARN_ON_ONCE(!env_type_is_resolved(env
,
2169 last_member_type_id
)))
2172 last_member_type
= btf_type_by_id(env
->btf
,
2173 last_member_type_id
);
2174 if (btf_type_kflag(v
->t
))
2175 err
= btf_type_ops(last_member_type
)->check_kflag_member(env
, v
->t
,
2179 err
= btf_type_ops(last_member_type
)->check_member(env
, v
->t
,
2186 for_each_member_from(i
, v
->next_member
, v
->t
, member
) {
2187 u32 member_type_id
= member
->type
;
2188 const struct btf_type
*member_type
= btf_type_by_id(env
->btf
,
2191 if (btf_type_nosize_or_null(member_type
) ||
2192 btf_type_is_resolve_source_only(member_type
)) {
2193 btf_verifier_log_member(env
, v
->t
, member
,
2198 if (!env_type_is_resolve_sink(env
, member_type
) &&
2199 !env_type_is_resolved(env
, member_type_id
)) {
2200 env_stack_set_next_member(env
, i
+ 1);
2201 return env_stack_push(env
, member_type
, member_type_id
);
2204 if (btf_type_kflag(v
->t
))
2205 err
= btf_type_ops(member_type
)->check_kflag_member(env
, v
->t
,
2209 err
= btf_type_ops(member_type
)->check_member(env
, v
->t
,
2216 env_stack_pop_resolved(env
, 0, 0);
2221 static void btf_struct_log(struct btf_verifier_env
*env
,
2222 const struct btf_type
*t
)
2224 btf_verifier_log(env
, "size=%u vlen=%u", t
->size
, btf_type_vlen(t
));
2227 /* find 'struct bpf_spin_lock' in map value.
2228 * return >= 0 offset if found
2229 * and < 0 in case of error
2231 int btf_find_spin_lock(const struct btf
*btf
, const struct btf_type
*t
)
2233 const struct btf_member
*member
;
2234 u32 i
, off
= -ENOENT
;
2236 if (!__btf_type_is_struct(t
))
2239 for_each_member(i
, t
, member
) {
2240 const struct btf_type
*member_type
= btf_type_by_id(btf
,
2242 if (!__btf_type_is_struct(member_type
))
2244 if (member_type
->size
!= sizeof(struct bpf_spin_lock
))
2246 if (strcmp(__btf_name_by_offset(btf
, member_type
->name_off
),
2250 /* only one 'struct bpf_spin_lock' is allowed */
2252 off
= btf_member_bit_offset(t
, member
);
2254 /* valid C code cannot generate such BTF */
2257 if (off
% __alignof__(struct bpf_spin_lock
))
2258 /* valid struct bpf_spin_lock will be 4 byte aligned */
2264 static void btf_struct_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
2265 u32 type_id
, void *data
, u8 bits_offset
,
2268 const char *seq
= BTF_INFO_KIND(t
->info
) == BTF_KIND_UNION
? "|" : ",";
2269 const struct btf_member
*member
;
2273 for_each_member(i
, t
, member
) {
2274 const struct btf_type
*member_type
= btf_type_by_id(btf
,
2276 const struct btf_kind_operations
*ops
;
2277 u32 member_offset
, bitfield_size
;
2284 member_offset
= btf_member_bit_offset(t
, member
);
2285 bitfield_size
= btf_member_bitfield_size(t
, member
);
2286 bytes_offset
= BITS_ROUNDDOWN_BYTES(member_offset
);
2287 bits8_offset
= BITS_PER_BYTE_MASKED(member_offset
);
2288 if (bitfield_size
) {
2289 btf_bitfield_seq_show(data
+ bytes_offset
, bits8_offset
,
2292 ops
= btf_type_ops(member_type
);
2293 ops
->seq_show(btf
, member_type
, member
->type
,
2294 data
+ bytes_offset
, bits8_offset
, m
);
2300 static struct btf_kind_operations struct_ops
= {
2301 .check_meta
= btf_struct_check_meta
,
2302 .resolve
= btf_struct_resolve
,
2303 .check_member
= btf_struct_check_member
,
2304 .check_kflag_member
= btf_generic_check_kflag_member
,
2305 .log_details
= btf_struct_log
,
2306 .seq_show
= btf_struct_seq_show
,
2309 static int btf_enum_check_member(struct btf_verifier_env
*env
,
2310 const struct btf_type
*struct_type
,
2311 const struct btf_member
*member
,
2312 const struct btf_type
*member_type
)
2314 u32 struct_bits_off
= member
->offset
;
2315 u32 struct_size
, bytes_offset
;
2317 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
2318 btf_verifier_log_member(env
, struct_type
, member
,
2319 "Member is not byte aligned");
2323 struct_size
= struct_type
->size
;
2324 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
2325 if (struct_size
- bytes_offset
< sizeof(int)) {
2326 btf_verifier_log_member(env
, struct_type
, member
,
2327 "Member exceeds struct_size");
2334 static int btf_enum_check_kflag_member(struct btf_verifier_env
*env
,
2335 const struct btf_type
*struct_type
,
2336 const struct btf_member
*member
,
2337 const struct btf_type
*member_type
)
2339 u32 struct_bits_off
, nr_bits
, bytes_end
, struct_size
;
2340 u32 int_bitsize
= sizeof(int) * BITS_PER_BYTE
;
2342 struct_bits_off
= BTF_MEMBER_BIT_OFFSET(member
->offset
);
2343 nr_bits
= BTF_MEMBER_BITFIELD_SIZE(member
->offset
);
2345 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
2346 btf_verifier_log_member(env
, struct_type
, member
,
2347 "Member is not byte aligned");
2351 nr_bits
= int_bitsize
;
2352 } else if (nr_bits
> int_bitsize
) {
2353 btf_verifier_log_member(env
, struct_type
, member
,
2354 "Invalid member bitfield_size");
2358 struct_size
= struct_type
->size
;
2359 bytes_end
= BITS_ROUNDUP_BYTES(struct_bits_off
+ nr_bits
);
2360 if (struct_size
< bytes_end
) {
2361 btf_verifier_log_member(env
, struct_type
, member
,
2362 "Member exceeds struct_size");
2369 static s32
btf_enum_check_meta(struct btf_verifier_env
*env
,
2370 const struct btf_type
*t
,
2373 const struct btf_enum
*enums
= btf_type_enum(t
);
2374 struct btf
*btf
= env
->btf
;
2378 nr_enums
= btf_type_vlen(t
);
2379 meta_needed
= nr_enums
* sizeof(*enums
);
2381 if (meta_left
< meta_needed
) {
2382 btf_verifier_log_basic(env
, t
,
2383 "meta_left:%u meta_needed:%u",
2384 meta_left
, meta_needed
);
2388 if (btf_type_kflag(t
)) {
2389 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2393 if (t
->size
> 8 || !is_power_of_2(t
->size
)) {
2394 btf_verifier_log_type(env
, t
, "Unexpected size");
2398 /* enum type either no name or a valid one */
2400 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
2401 btf_verifier_log_type(env
, t
, "Invalid name");
2405 btf_verifier_log_type(env
, t
, NULL
);
2407 for (i
= 0; i
< nr_enums
; i
++) {
2408 if (!btf_name_offset_valid(btf
, enums
[i
].name_off
)) {
2409 btf_verifier_log(env
, "\tInvalid name_offset:%u",
2414 /* enum member must have a valid name */
2415 if (!enums
[i
].name_off
||
2416 !btf_name_valid_identifier(btf
, enums
[i
].name_off
)) {
2417 btf_verifier_log_type(env
, t
, "Invalid name");
2421 if (env
->log
.level
== BPF_LOG_KERNEL
)
2423 btf_verifier_log(env
, "\t%s val=%d\n",
2424 __btf_name_by_offset(btf
, enums
[i
].name_off
),
2431 static void btf_enum_log(struct btf_verifier_env
*env
,
2432 const struct btf_type
*t
)
2434 btf_verifier_log(env
, "size=%u vlen=%u", t
->size
, btf_type_vlen(t
));
2437 static void btf_enum_seq_show(const struct btf
*btf
, const struct btf_type
*t
,
2438 u32 type_id
, void *data
, u8 bits_offset
,
2441 const struct btf_enum
*enums
= btf_type_enum(t
);
2442 u32 i
, nr_enums
= btf_type_vlen(t
);
2443 int v
= *(int *)data
;
2445 for (i
= 0; i
< nr_enums
; i
++) {
2446 if (v
== enums
[i
].val
) {
2448 __btf_name_by_offset(btf
,
2449 enums
[i
].name_off
));
2454 seq_printf(m
, "%d", v
);
2457 static struct btf_kind_operations enum_ops
= {
2458 .check_meta
= btf_enum_check_meta
,
2459 .resolve
= btf_df_resolve
,
2460 .check_member
= btf_enum_check_member
,
2461 .check_kflag_member
= btf_enum_check_kflag_member
,
2462 .log_details
= btf_enum_log
,
2463 .seq_show
= btf_enum_seq_show
,
2466 static s32
btf_func_proto_check_meta(struct btf_verifier_env
*env
,
2467 const struct btf_type
*t
,
2470 u32 meta_needed
= btf_type_vlen(t
) * sizeof(struct btf_param
);
2472 if (meta_left
< meta_needed
) {
2473 btf_verifier_log_basic(env
, t
,
2474 "meta_left:%u meta_needed:%u",
2475 meta_left
, meta_needed
);
2480 btf_verifier_log_type(env
, t
, "Invalid name");
2484 if (btf_type_kflag(t
)) {
2485 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2489 btf_verifier_log_type(env
, t
, NULL
);
2494 static void btf_func_proto_log(struct btf_verifier_env
*env
,
2495 const struct btf_type
*t
)
2497 const struct btf_param
*args
= (const struct btf_param
*)(t
+ 1);
2498 u16 nr_args
= btf_type_vlen(t
), i
;
2500 btf_verifier_log(env
, "return=%u args=(", t
->type
);
2502 btf_verifier_log(env
, "void");
2506 if (nr_args
== 1 && !args
[0].type
) {
2507 /* Only one vararg */
2508 btf_verifier_log(env
, "vararg");
2512 btf_verifier_log(env
, "%u %s", args
[0].type
,
2513 __btf_name_by_offset(env
->btf
,
2515 for (i
= 1; i
< nr_args
- 1; i
++)
2516 btf_verifier_log(env
, ", %u %s", args
[i
].type
,
2517 __btf_name_by_offset(env
->btf
,
2521 const struct btf_param
*last_arg
= &args
[nr_args
- 1];
2524 btf_verifier_log(env
, ", %u %s", last_arg
->type
,
2525 __btf_name_by_offset(env
->btf
,
2526 last_arg
->name_off
));
2528 btf_verifier_log(env
, ", vararg");
2532 btf_verifier_log(env
, ")");
2535 static struct btf_kind_operations func_proto_ops
= {
2536 .check_meta
= btf_func_proto_check_meta
,
2537 .resolve
= btf_df_resolve
,
2539 * BTF_KIND_FUNC_PROTO cannot be directly referred by
2540 * a struct's member.
2542 * It should be a funciton pointer instead.
2543 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
2545 * Hence, there is no btf_func_check_member().
2547 .check_member
= btf_df_check_member
,
2548 .check_kflag_member
= btf_df_check_kflag_member
,
2549 .log_details
= btf_func_proto_log
,
2550 .seq_show
= btf_df_seq_show
,
2553 static s32
btf_func_check_meta(struct btf_verifier_env
*env
,
2554 const struct btf_type
*t
,
2558 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
2559 btf_verifier_log_type(env
, t
, "Invalid name");
2563 if (btf_type_vlen(t
)) {
2564 btf_verifier_log_type(env
, t
, "vlen != 0");
2568 if (btf_type_kflag(t
)) {
2569 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2573 btf_verifier_log_type(env
, t
, NULL
);
2578 static struct btf_kind_operations func_ops
= {
2579 .check_meta
= btf_func_check_meta
,
2580 .resolve
= btf_df_resolve
,
2581 .check_member
= btf_df_check_member
,
2582 .check_kflag_member
= btf_df_check_kflag_member
,
2583 .log_details
= btf_ref_type_log
,
2584 .seq_show
= btf_df_seq_show
,
2587 static s32
btf_var_check_meta(struct btf_verifier_env
*env
,
2588 const struct btf_type
*t
,
2591 const struct btf_var
*var
;
2592 u32 meta_needed
= sizeof(*var
);
2594 if (meta_left
< meta_needed
) {
2595 btf_verifier_log_basic(env
, t
,
2596 "meta_left:%u meta_needed:%u",
2597 meta_left
, meta_needed
);
2601 if (btf_type_vlen(t
)) {
2602 btf_verifier_log_type(env
, t
, "vlen != 0");
2606 if (btf_type_kflag(t
)) {
2607 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2612 !__btf_name_valid(env
->btf
, t
->name_off
, true)) {
2613 btf_verifier_log_type(env
, t
, "Invalid name");
2617 /* A var cannot be in type void */
2618 if (!t
->type
|| !BTF_TYPE_ID_VALID(t
->type
)) {
2619 btf_verifier_log_type(env
, t
, "Invalid type_id");
2623 var
= btf_type_var(t
);
2624 if (var
->linkage
!= BTF_VAR_STATIC
&&
2625 var
->linkage
!= BTF_VAR_GLOBAL_ALLOCATED
) {
2626 btf_verifier_log_type(env
, t
, "Linkage not supported");
2630 btf_verifier_log_type(env
, t
, NULL
);
2635 static void btf_var_log(struct btf_verifier_env
*env
, const struct btf_type
*t
)
2637 const struct btf_var
*var
= btf_type_var(t
);
2639 btf_verifier_log(env
, "type_id=%u linkage=%u", t
->type
, var
->linkage
);
2642 static const struct btf_kind_operations var_ops
= {
2643 .check_meta
= btf_var_check_meta
,
2644 .resolve
= btf_var_resolve
,
2645 .check_member
= btf_df_check_member
,
2646 .check_kflag_member
= btf_df_check_kflag_member
,
2647 .log_details
= btf_var_log
,
2648 .seq_show
= btf_var_seq_show
,
2651 static s32
btf_datasec_check_meta(struct btf_verifier_env
*env
,
2652 const struct btf_type
*t
,
2655 const struct btf_var_secinfo
*vsi
;
2656 u64 last_vsi_end_off
= 0, sum
= 0;
2659 meta_needed
= btf_type_vlen(t
) * sizeof(*vsi
);
2660 if (meta_left
< meta_needed
) {
2661 btf_verifier_log_basic(env
, t
,
2662 "meta_left:%u meta_needed:%u",
2663 meta_left
, meta_needed
);
2667 if (!btf_type_vlen(t
)) {
2668 btf_verifier_log_type(env
, t
, "vlen == 0");
2673 btf_verifier_log_type(env
, t
, "size == 0");
2677 if (btf_type_kflag(t
)) {
2678 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2683 !btf_name_valid_section(env
->btf
, t
->name_off
)) {
2684 btf_verifier_log_type(env
, t
, "Invalid name");
2688 btf_verifier_log_type(env
, t
, NULL
);
2690 for_each_vsi(i
, t
, vsi
) {
2691 /* A var cannot be in type void */
2692 if (!vsi
->type
|| !BTF_TYPE_ID_VALID(vsi
->type
)) {
2693 btf_verifier_log_vsi(env
, t
, vsi
,
2698 if (vsi
->offset
< last_vsi_end_off
|| vsi
->offset
>= t
->size
) {
2699 btf_verifier_log_vsi(env
, t
, vsi
,
2704 if (!vsi
->size
|| vsi
->size
> t
->size
) {
2705 btf_verifier_log_vsi(env
, t
, vsi
,
2710 last_vsi_end_off
= vsi
->offset
+ vsi
->size
;
2711 if (last_vsi_end_off
> t
->size
) {
2712 btf_verifier_log_vsi(env
, t
, vsi
,
2713 "Invalid offset+size");
2717 btf_verifier_log_vsi(env
, t
, vsi
, NULL
);
2721 if (t
->size
< sum
) {
2722 btf_verifier_log_type(env
, t
, "Invalid btf_info size");
2729 static int btf_datasec_resolve(struct btf_verifier_env
*env
,
2730 const struct resolve_vertex
*v
)
2732 const struct btf_var_secinfo
*vsi
;
2733 struct btf
*btf
= env
->btf
;
2736 for_each_vsi_from(i
, v
->next_member
, v
->t
, vsi
) {
2737 u32 var_type_id
= vsi
->type
, type_id
, type_size
= 0;
2738 const struct btf_type
*var_type
= btf_type_by_id(env
->btf
,
2740 if (!var_type
|| !btf_type_is_var(var_type
)) {
2741 btf_verifier_log_vsi(env
, v
->t
, vsi
,
2742 "Not a VAR kind member");
2746 if (!env_type_is_resolve_sink(env
, var_type
) &&
2747 !env_type_is_resolved(env
, var_type_id
)) {
2748 env_stack_set_next_member(env
, i
+ 1);
2749 return env_stack_push(env
, var_type
, var_type_id
);
2752 type_id
= var_type
->type
;
2753 if (!btf_type_id_size(btf
, &type_id
, &type_size
)) {
2754 btf_verifier_log_vsi(env
, v
->t
, vsi
, "Invalid type");
2758 if (vsi
->size
< type_size
) {
2759 btf_verifier_log_vsi(env
, v
->t
, vsi
, "Invalid size");
2764 env_stack_pop_resolved(env
, 0, 0);
2768 static void btf_datasec_log(struct btf_verifier_env
*env
,
2769 const struct btf_type
*t
)
2771 btf_verifier_log(env
, "size=%u vlen=%u", t
->size
, btf_type_vlen(t
));
2774 static void btf_datasec_seq_show(const struct btf
*btf
,
2775 const struct btf_type
*t
, u32 type_id
,
2776 void *data
, u8 bits_offset
,
2779 const struct btf_var_secinfo
*vsi
;
2780 const struct btf_type
*var
;
2783 seq_printf(m
, "section (\"%s\") = {", __btf_name_by_offset(btf
, t
->name_off
));
2784 for_each_vsi(i
, t
, vsi
) {
2785 var
= btf_type_by_id(btf
, vsi
->type
);
2788 btf_type_ops(var
)->seq_show(btf
, var
, vsi
->type
,
2789 data
+ vsi
->offset
, bits_offset
, m
);
2794 static const struct btf_kind_operations datasec_ops
= {
2795 .check_meta
= btf_datasec_check_meta
,
2796 .resolve
= btf_datasec_resolve
,
2797 .check_member
= btf_df_check_member
,
2798 .check_kflag_member
= btf_df_check_kflag_member
,
2799 .log_details
= btf_datasec_log
,
2800 .seq_show
= btf_datasec_seq_show
,
2803 static int btf_func_proto_check(struct btf_verifier_env
*env
,
2804 const struct btf_type
*t
)
2806 const struct btf_type
*ret_type
;
2807 const struct btf_param
*args
;
2808 const struct btf
*btf
;
2813 args
= (const struct btf_param
*)(t
+ 1);
2814 nr_args
= btf_type_vlen(t
);
2816 /* Check func return type which could be "void" (t->type == 0) */
2818 u32 ret_type_id
= t
->type
;
2820 ret_type
= btf_type_by_id(btf
, ret_type_id
);
2822 btf_verifier_log_type(env
, t
, "Invalid return type");
2826 if (btf_type_needs_resolve(ret_type
) &&
2827 !env_type_is_resolved(env
, ret_type_id
)) {
2828 err
= btf_resolve(env
, ret_type
, ret_type_id
);
2833 /* Ensure the return type is a type that has a size */
2834 if (!btf_type_id_size(btf
, &ret_type_id
, NULL
)) {
2835 btf_verifier_log_type(env
, t
, "Invalid return type");
2843 /* Last func arg type_id could be 0 if it is a vararg */
2844 if (!args
[nr_args
- 1].type
) {
2845 if (args
[nr_args
- 1].name_off
) {
2846 btf_verifier_log_type(env
, t
, "Invalid arg#%u",
2854 for (i
= 0; i
< nr_args
; i
++) {
2855 const struct btf_type
*arg_type
;
2858 arg_type_id
= args
[i
].type
;
2859 arg_type
= btf_type_by_id(btf
, arg_type_id
);
2861 btf_verifier_log_type(env
, t
, "Invalid arg#%u", i
+ 1);
2866 if (args
[i
].name_off
&&
2867 (!btf_name_offset_valid(btf
, args
[i
].name_off
) ||
2868 !btf_name_valid_identifier(btf
, args
[i
].name_off
))) {
2869 btf_verifier_log_type(env
, t
,
2870 "Invalid arg#%u", i
+ 1);
2875 if (btf_type_needs_resolve(arg_type
) &&
2876 !env_type_is_resolved(env
, arg_type_id
)) {
2877 err
= btf_resolve(env
, arg_type
, arg_type_id
);
2882 if (!btf_type_id_size(btf
, &arg_type_id
, NULL
)) {
2883 btf_verifier_log_type(env
, t
, "Invalid arg#%u", i
+ 1);
2892 static int btf_func_check(struct btf_verifier_env
*env
,
2893 const struct btf_type
*t
)
2895 const struct btf_type
*proto_type
;
2896 const struct btf_param
*args
;
2897 const struct btf
*btf
;
2901 proto_type
= btf_type_by_id(btf
, t
->type
);
2903 if (!proto_type
|| !btf_type_is_func_proto(proto_type
)) {
2904 btf_verifier_log_type(env
, t
, "Invalid type_id");
2908 args
= (const struct btf_param
*)(proto_type
+ 1);
2909 nr_args
= btf_type_vlen(proto_type
);
2910 for (i
= 0; i
< nr_args
; i
++) {
2911 if (!args
[i
].name_off
&& args
[i
].type
) {
2912 btf_verifier_log_type(env
, t
, "Invalid arg#%u", i
+ 1);
2920 static const struct btf_kind_operations
* const kind_ops
[NR_BTF_KINDS
] = {
2921 [BTF_KIND_INT
] = &int_ops
,
2922 [BTF_KIND_PTR
] = &ptr_ops
,
2923 [BTF_KIND_ARRAY
] = &array_ops
,
2924 [BTF_KIND_STRUCT
] = &struct_ops
,
2925 [BTF_KIND_UNION
] = &struct_ops
,
2926 [BTF_KIND_ENUM
] = &enum_ops
,
2927 [BTF_KIND_FWD
] = &fwd_ops
,
2928 [BTF_KIND_TYPEDEF
] = &modifier_ops
,
2929 [BTF_KIND_VOLATILE
] = &modifier_ops
,
2930 [BTF_KIND_CONST
] = &modifier_ops
,
2931 [BTF_KIND_RESTRICT
] = &modifier_ops
,
2932 [BTF_KIND_FUNC
] = &func_ops
,
2933 [BTF_KIND_FUNC_PROTO
] = &func_proto_ops
,
2934 [BTF_KIND_VAR
] = &var_ops
,
2935 [BTF_KIND_DATASEC
] = &datasec_ops
,
2938 static s32
btf_check_meta(struct btf_verifier_env
*env
,
2939 const struct btf_type
*t
,
2942 u32 saved_meta_left
= meta_left
;
2945 if (meta_left
< sizeof(*t
)) {
2946 btf_verifier_log(env
, "[%u] meta_left:%u meta_needed:%zu",
2947 env
->log_type_id
, meta_left
, sizeof(*t
));
2950 meta_left
-= sizeof(*t
);
2952 if (t
->info
& ~BTF_INFO_MASK
) {
2953 btf_verifier_log(env
, "[%u] Invalid btf_info:%x",
2954 env
->log_type_id
, t
->info
);
2958 if (BTF_INFO_KIND(t
->info
) > BTF_KIND_MAX
||
2959 BTF_INFO_KIND(t
->info
) == BTF_KIND_UNKN
) {
2960 btf_verifier_log(env
, "[%u] Invalid kind:%u",
2961 env
->log_type_id
, BTF_INFO_KIND(t
->info
));
2965 if (!btf_name_offset_valid(env
->btf
, t
->name_off
)) {
2966 btf_verifier_log(env
, "[%u] Invalid name_offset:%u",
2967 env
->log_type_id
, t
->name_off
);
2971 var_meta_size
= btf_type_ops(t
)->check_meta(env
, t
, meta_left
);
2972 if (var_meta_size
< 0)
2973 return var_meta_size
;
2975 meta_left
-= var_meta_size
;
2977 return saved_meta_left
- meta_left
;
2980 static int btf_check_all_metas(struct btf_verifier_env
*env
)
2982 struct btf
*btf
= env
->btf
;
2983 struct btf_header
*hdr
;
2987 cur
= btf
->nohdr_data
+ hdr
->type_off
;
2988 end
= cur
+ hdr
->type_len
;
2990 env
->log_type_id
= 1;
2992 struct btf_type
*t
= cur
;
2995 meta_size
= btf_check_meta(env
, t
, end
- cur
);
2999 btf_add_type(env
, t
);
3007 static bool btf_resolve_valid(struct btf_verifier_env
*env
,
3008 const struct btf_type
*t
,
3011 struct btf
*btf
= env
->btf
;
3013 if (!env_type_is_resolved(env
, type_id
))
3016 if (btf_type_is_struct(t
) || btf_type_is_datasec(t
))
3017 return !btf
->resolved_ids
[type_id
] &&
3018 !btf
->resolved_sizes
[type_id
];
3020 if (btf_type_is_modifier(t
) || btf_type_is_ptr(t
) ||
3021 btf_type_is_var(t
)) {
3022 t
= btf_type_id_resolve(btf
, &type_id
);
3024 !btf_type_is_modifier(t
) &&
3025 !btf_type_is_var(t
) &&
3026 !btf_type_is_datasec(t
);
3029 if (btf_type_is_array(t
)) {
3030 const struct btf_array
*array
= btf_type_array(t
);
3031 const struct btf_type
*elem_type
;
3032 u32 elem_type_id
= array
->type
;
3035 elem_type
= btf_type_id_size(btf
, &elem_type_id
, &elem_size
);
3036 return elem_type
&& !btf_type_is_modifier(elem_type
) &&
3037 (array
->nelems
* elem_size
==
3038 btf
->resolved_sizes
[type_id
]);
3044 static int btf_resolve(struct btf_verifier_env
*env
,
3045 const struct btf_type
*t
, u32 type_id
)
3047 u32 save_log_type_id
= env
->log_type_id
;
3048 const struct resolve_vertex
*v
;
3051 env
->resolve_mode
= RESOLVE_TBD
;
3052 env_stack_push(env
, t
, type_id
);
3053 while (!err
&& (v
= env_stack_peak(env
))) {
3054 env
->log_type_id
= v
->type_id
;
3055 err
= btf_type_ops(v
->t
)->resolve(env
, v
);
3058 env
->log_type_id
= type_id
;
3059 if (err
== -E2BIG
) {
3060 btf_verifier_log_type(env
, t
,
3061 "Exceeded max resolving depth:%u",
3063 } else if (err
== -EEXIST
) {
3064 btf_verifier_log_type(env
, t
, "Loop detected");
3067 /* Final sanity check */
3068 if (!err
&& !btf_resolve_valid(env
, t
, type_id
)) {
3069 btf_verifier_log_type(env
, t
, "Invalid resolve state");
3073 env
->log_type_id
= save_log_type_id
;
3077 static int btf_check_all_types(struct btf_verifier_env
*env
)
3079 struct btf
*btf
= env
->btf
;
3083 err
= env_resolve_init(env
);
3088 for (type_id
= 1; type_id
<= btf
->nr_types
; type_id
++) {
3089 const struct btf_type
*t
= btf_type_by_id(btf
, type_id
);
3091 env
->log_type_id
= type_id
;
3092 if (btf_type_needs_resolve(t
) &&
3093 !env_type_is_resolved(env
, type_id
)) {
3094 err
= btf_resolve(env
, t
, type_id
);
3099 if (btf_type_is_func_proto(t
)) {
3100 err
= btf_func_proto_check(env
, t
);
3105 if (btf_type_is_func(t
)) {
3106 err
= btf_func_check(env
, t
);
3115 static int btf_parse_type_sec(struct btf_verifier_env
*env
)
3117 const struct btf_header
*hdr
= &env
->btf
->hdr
;
3120 /* Type section must align to 4 bytes */
3121 if (hdr
->type_off
& (sizeof(u32
) - 1)) {
3122 btf_verifier_log(env
, "Unaligned type_off");
3126 if (!hdr
->type_len
) {
3127 btf_verifier_log(env
, "No type found");
3131 err
= btf_check_all_metas(env
);
3135 return btf_check_all_types(env
);
3138 static int btf_parse_str_sec(struct btf_verifier_env
*env
)
3140 const struct btf_header
*hdr
;
3141 struct btf
*btf
= env
->btf
;
3142 const char *start
, *end
;
3145 start
= btf
->nohdr_data
+ hdr
->str_off
;
3146 end
= start
+ hdr
->str_len
;
3148 if (end
!= btf
->data
+ btf
->data_size
) {
3149 btf_verifier_log(env
, "String section is not at the end");
3153 if (!hdr
->str_len
|| hdr
->str_len
- 1 > BTF_MAX_NAME_OFFSET
||
3154 start
[0] || end
[-1]) {
3155 btf_verifier_log(env
, "Invalid string section");
3159 btf
->strings
= start
;
3164 static const size_t btf_sec_info_offset
[] = {
3165 offsetof(struct btf_header
, type_off
),
3166 offsetof(struct btf_header
, str_off
),
3169 static int btf_sec_info_cmp(const void *a
, const void *b
)
3171 const struct btf_sec_info
*x
= a
;
3172 const struct btf_sec_info
*y
= b
;
3174 return (int)(x
->off
- y
->off
) ? : (int)(x
->len
- y
->len
);
3177 static int btf_check_sec_info(struct btf_verifier_env
*env
,
3180 struct btf_sec_info secs
[ARRAY_SIZE(btf_sec_info_offset
)];
3181 u32 total
, expected_total
, i
;
3182 const struct btf_header
*hdr
;
3183 const struct btf
*btf
;
3188 /* Populate the secs from hdr */
3189 for (i
= 0; i
< ARRAY_SIZE(btf_sec_info_offset
); i
++)
3190 secs
[i
] = *(struct btf_sec_info
*)((void *)hdr
+
3191 btf_sec_info_offset
[i
]);
3193 sort(secs
, ARRAY_SIZE(btf_sec_info_offset
),
3194 sizeof(struct btf_sec_info
), btf_sec_info_cmp
, NULL
);
3196 /* Check for gaps and overlap among sections */
3198 expected_total
= btf_data_size
- hdr
->hdr_len
;
3199 for (i
= 0; i
< ARRAY_SIZE(btf_sec_info_offset
); i
++) {
3200 if (expected_total
< secs
[i
].off
) {
3201 btf_verifier_log(env
, "Invalid section offset");
3204 if (total
< secs
[i
].off
) {
3206 btf_verifier_log(env
, "Unsupported section found");
3209 if (total
> secs
[i
].off
) {
3210 btf_verifier_log(env
, "Section overlap found");
3213 if (expected_total
- total
< secs
[i
].len
) {
3214 btf_verifier_log(env
,
3215 "Total section length too long");
3218 total
+= secs
[i
].len
;
3221 /* There is data other than hdr and known sections */
3222 if (expected_total
!= total
) {
3223 btf_verifier_log(env
, "Unsupported section found");
3230 static int btf_parse_hdr(struct btf_verifier_env
*env
)
3232 u32 hdr_len
, hdr_copy
, btf_data_size
;
3233 const struct btf_header
*hdr
;
3238 btf_data_size
= btf
->data_size
;
3241 offsetof(struct btf_header
, hdr_len
) + sizeof(hdr
->hdr_len
)) {
3242 btf_verifier_log(env
, "hdr_len not found");
3247 hdr_len
= hdr
->hdr_len
;
3248 if (btf_data_size
< hdr_len
) {
3249 btf_verifier_log(env
, "btf_header not found");
3253 /* Ensure the unsupported header fields are zero */
3254 if (hdr_len
> sizeof(btf
->hdr
)) {
3255 u8
*expected_zero
= btf
->data
+ sizeof(btf
->hdr
);
3256 u8
*end
= btf
->data
+ hdr_len
;
3258 for (; expected_zero
< end
; expected_zero
++) {
3259 if (*expected_zero
) {
3260 btf_verifier_log(env
, "Unsupported btf_header");
3266 hdr_copy
= min_t(u32
, hdr_len
, sizeof(btf
->hdr
));
3267 memcpy(&btf
->hdr
, btf
->data
, hdr_copy
);
3271 btf_verifier_log_hdr(env
, btf_data_size
);
3273 if (hdr
->magic
!= BTF_MAGIC
) {
3274 btf_verifier_log(env
, "Invalid magic");
3278 if (hdr
->version
!= BTF_VERSION
) {
3279 btf_verifier_log(env
, "Unsupported version");
3284 btf_verifier_log(env
, "Unsupported flags");
3288 if (btf_data_size
== hdr
->hdr_len
) {
3289 btf_verifier_log(env
, "No data");
3293 err
= btf_check_sec_info(env
, btf_data_size
);
3300 static struct btf
*btf_parse(void __user
*btf_data
, u32 btf_data_size
,
3301 u32 log_level
, char __user
*log_ubuf
, u32 log_size
)
3303 struct btf_verifier_env
*env
= NULL
;
3304 struct bpf_verifier_log
*log
;
3305 struct btf
*btf
= NULL
;
3309 if (btf_data_size
> BTF_MAX_SIZE
)
3310 return ERR_PTR(-E2BIG
);
3312 env
= kzalloc(sizeof(*env
), GFP_KERNEL
| __GFP_NOWARN
);
3314 return ERR_PTR(-ENOMEM
);
3317 if (log_level
|| log_ubuf
|| log_size
) {
3318 /* user requested verbose verifier output
3319 * and supplied buffer to store the verification trace
3321 log
->level
= log_level
;
3322 log
->ubuf
= log_ubuf
;
3323 log
->len_total
= log_size
;
3325 /* log attributes have to be sane */
3326 if (log
->len_total
< 128 || log
->len_total
> UINT_MAX
>> 8 ||
3327 !log
->level
|| !log
->ubuf
) {
3333 btf
= kzalloc(sizeof(*btf
), GFP_KERNEL
| __GFP_NOWARN
);
3340 data
= kvmalloc(btf_data_size
, GFP_KERNEL
| __GFP_NOWARN
);
3347 btf
->data_size
= btf_data_size
;
3349 if (copy_from_user(data
, btf_data
, btf_data_size
)) {
3354 err
= btf_parse_hdr(env
);
3358 btf
->nohdr_data
= btf
->data
+ btf
->hdr
.hdr_len
;
3360 err
= btf_parse_str_sec(env
);
3364 err
= btf_parse_type_sec(env
);
3368 if (log
->level
&& bpf_verifier_log_full(log
)) {
3373 btf_verifier_env_free(env
);
3374 refcount_set(&btf
->refcnt
, 1);
3378 btf_verifier_env_free(env
);
3381 return ERR_PTR(err
);
3384 extern char __weak _binary__btf_vmlinux_bin_start
[];
3385 extern char __weak _binary__btf_vmlinux_bin_end
[];
3387 struct btf
*btf_parse_vmlinux(void)
3389 struct btf_verifier_env
*env
= NULL
;
3390 struct bpf_verifier_log
*log
;
3391 struct btf
*btf
= NULL
;
3394 env
= kzalloc(sizeof(*env
), GFP_KERNEL
| __GFP_NOWARN
);
3396 return ERR_PTR(-ENOMEM
);
3399 log
->level
= BPF_LOG_KERNEL
;
3401 btf
= kzalloc(sizeof(*btf
), GFP_KERNEL
| __GFP_NOWARN
);
3408 btf
->data
= _binary__btf_vmlinux_bin_start
;
3409 btf
->data_size
= _binary__btf_vmlinux_bin_end
-
3410 _binary__btf_vmlinux_bin_start
;
3412 err
= btf_parse_hdr(env
);
3416 btf
->nohdr_data
= btf
->data
+ btf
->hdr
.hdr_len
;
3418 err
= btf_parse_str_sec(env
);
3422 err
= btf_check_all_metas(env
);
3426 btf_verifier_env_free(env
);
3427 refcount_set(&btf
->refcnt
, 1);
3431 btf_verifier_env_free(env
);
3436 return ERR_PTR(err
);
3439 void btf_type_seq_show(const struct btf
*btf
, u32 type_id
, void *obj
,
3442 const struct btf_type
*t
= btf_type_by_id(btf
, type_id
);
3444 btf_type_ops(t
)->seq_show(btf
, t
, type_id
, obj
, 0, m
);
3447 #ifdef CONFIG_PROC_FS
3448 static void bpf_btf_show_fdinfo(struct seq_file
*m
, struct file
*filp
)
3450 const struct btf
*btf
= filp
->private_data
;
3452 seq_printf(m
, "btf_id:\t%u\n", btf
->id
);
3456 static int btf_release(struct inode
*inode
, struct file
*filp
)
3458 btf_put(filp
->private_data
);
3462 const struct file_operations btf_fops
= {
3463 #ifdef CONFIG_PROC_FS
3464 .show_fdinfo
= bpf_btf_show_fdinfo
,
3466 .release
= btf_release
,
3469 static int __btf_new_fd(struct btf
*btf
)
3471 return anon_inode_getfd("btf", &btf_fops
, btf
, O_RDONLY
| O_CLOEXEC
);
3474 int btf_new_fd(const union bpf_attr
*attr
)
3479 btf
= btf_parse(u64_to_user_ptr(attr
->btf
),
3480 attr
->btf_size
, attr
->btf_log_level
,
3481 u64_to_user_ptr(attr
->btf_log_buf
),
3482 attr
->btf_log_size
);
3484 return PTR_ERR(btf
);
3486 ret
= btf_alloc_id(btf
);
3493 * The BTF ID is published to the userspace.
3494 * All BTF free must go through call_rcu() from
3495 * now on (i.e. free by calling btf_put()).
3498 ret
= __btf_new_fd(btf
);
3505 struct btf
*btf_get_by_fd(int fd
)
3513 return ERR_PTR(-EBADF
);
3515 if (f
.file
->f_op
!= &btf_fops
) {
3517 return ERR_PTR(-EINVAL
);
3520 btf
= f
.file
->private_data
;
3521 refcount_inc(&btf
->refcnt
);
3527 int btf_get_info_by_fd(const struct btf
*btf
,
3528 const union bpf_attr
*attr
,
3529 union bpf_attr __user
*uattr
)
3531 struct bpf_btf_info __user
*uinfo
;
3532 struct bpf_btf_info info
= {};
3533 u32 info_copy
, btf_copy
;
3537 uinfo
= u64_to_user_ptr(attr
->info
.info
);
3538 uinfo_len
= attr
->info
.info_len
;
3540 info_copy
= min_t(u32
, uinfo_len
, sizeof(info
));
3541 if (copy_from_user(&info
, uinfo
, info_copy
))
3545 ubtf
= u64_to_user_ptr(info
.btf
);
3546 btf_copy
= min_t(u32
, btf
->data_size
, info
.btf_size
);
3547 if (copy_to_user(ubtf
, btf
->data
, btf_copy
))
3549 info
.btf_size
= btf
->data_size
;
3551 if (copy_to_user(uinfo
, &info
, info_copy
) ||
3552 put_user(info_copy
, &uattr
->info
.info_len
))
3558 int btf_get_fd_by_id(u32 id
)
3564 btf
= idr_find(&btf_idr
, id
);
3565 if (!btf
|| !refcount_inc_not_zero(&btf
->refcnt
))
3566 btf
= ERR_PTR(-ENOENT
);
3570 return PTR_ERR(btf
);
3572 fd
= __btf_new_fd(btf
);
3579 u32
btf_id(const struct btf
*btf
)