]>
Commit | Line | Data |
---|---|---|
58e2af8b JK |
1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
2 | * | |
3 | * This program is free software; you can redistribute it and/or | |
4 | * modify it under the terms of version 2 of the GNU General Public | |
5 | * License as published by the Free Software Foundation. | |
6 | */ | |
7 | #ifndef _LINUX_BPF_VERIFIER_H | |
8 | #define _LINUX_BPF_VERIFIER_H 1 | |
9 | ||
10 | #include <linux/bpf.h> /* for enum bpf_reg_type */ | |
11 | #include <linux/filter.h> /* for MAX_BPF_STACK */ | |
f1174f77 | 12 | #include <linux/tnum.h> |
58e2af8b | 13 | |
b03c9f9f EC |
14 | /* Maximum variable offset umax_value permitted when resolving memory accesses. |
15 | * In practice this is far bigger than any realistic pointer offset; this limit | |
16 | * ensures that umax_value + (int)off + (int)size cannot overflow a u64. | |
17 | */ | |
bb7f0f98 | 18 | #define BPF_MAX_VAR_OFF (1 << 29) |
b03c9f9f EC |
19 | /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures |
20 | * that converting umax_value to int cannot overflow. | |
21 | */ | |
bb7f0f98 | 22 | #define BPF_MAX_VAR_SIZ (1 << 29) |
48461135 | 23 | |
8e9cd9ce EC |
24 | /* Liveness marks, used for registers and spilled-regs (in stack slots). |
25 | * Read marks propagate upwards until they find a write mark; they record that | |
26 | * "one of this state's descendants read this reg" (and therefore the reg is | |
27 | * relevant for states_equal() checks). | |
28 | * Write marks collect downwards and do not propagate; they record that "the | |
29 | * straight-line code that reached this state (from its parent) wrote this reg" | |
30 | * (and therefore that reads propagated from this state or its descendants | |
31 | * should not propagate to its parent). | |
32 | * A state with a write mark can receive read marks; it just won't propagate | |
33 | * them to its parent, since the write mark is a property, not of the state, | |
34 | * but of the link between it and its parent. See mark_reg_read() and | |
35 | * mark_stack_slot_read() in kernel/bpf/verifier.c. | |
36 | */ | |
dc503a8a EC |
37 | enum bpf_reg_liveness { |
38 | REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ | |
39 | REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ | |
40 | REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */ | |
41 | }; | |
42 | ||
58e2af8b | 43 | struct bpf_reg_state { |
679c782d | 44 | /* Ordering of fields matters. See states_equal() */ |
58e2af8b JK |
45 | enum bpf_reg_type type; |
46 | union { | |
f1174f77 EC |
47 | /* valid when type == PTR_TO_PACKET */ |
48 | u16 range; | |
58e2af8b JK |
49 | |
50 | /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | | |
51 | * PTR_TO_MAP_VALUE_OR_NULL | |
52 | */ | |
53 | struct bpf_map *map_ptr; | |
0962590e DB |
54 | |
55 | /* Max size from any of the above. */ | |
56 | unsigned long raw; | |
58e2af8b | 57 | }; |
f1174f77 EC |
58 | /* Fixed part of pointer offset, pointer types only */ |
59 | s32 off; | |
60 | /* For PTR_TO_PACKET, used to find other pointers with the same variable | |
61 | * offset, so they can share range knowledge. | |
62 | * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we | |
63 | * came from, when one is tested for != NULL. | |
c64b7983 JS |
64 | * For PTR_TO_SOCKET this is used to share which pointers retain the |
65 | * same reference to the socket, to determine proper reference freeing. | |
f1174f77 | 66 | */ |
d2a4dd37 | 67 | u32 id; |
f1174f77 EC |
68 | /* For scalar types (SCALAR_VALUE), this represents our knowledge of |
69 | * the actual value. | |
70 | * For pointer types, this represents the variable part of the offset | |
71 | * from the pointed-to object, and is shared with all bpf_reg_states | |
72 | * with the same id as us. | |
73 | */ | |
74 | struct tnum var_off; | |
d2a4dd37 | 75 | /* Used to determine if any memory access using this register will |
f1174f77 EC |
76 | * result in a bad access. |
77 | * These refer to the same value as var_off, not necessarily the actual | |
78 | * contents of the register. | |
d2a4dd37 | 79 | */ |
b03c9f9f EC |
80 | s64 smin_value; /* minimum possible (s64)value */ |
81 | s64 smax_value; /* maximum possible (s64)value */ | |
82 | u64 umin_value; /* minimum possible (u64)value */ | |
83 | u64 umax_value; /* maximum possible (u64)value */ | |
679c782d EC |
84 | /* parentage chain for liveness checking */ |
85 | struct bpf_reg_state *parent; | |
f4d7e40a AS |
86 | /* Inside the callee two registers can be both PTR_TO_STACK like |
87 | * R1=fp-8 and R2=fp-8, but one of them points to this function stack | |
88 | * while another to the caller's stack. To differentiate them 'frameno' | |
89 | * is used which is an index in bpf_verifier_state->frame[] array | |
90 | * pointing to bpf_func_state. | |
f4d7e40a AS |
91 | */ |
92 | u32 frameno; | |
dc503a8a | 93 | enum bpf_reg_liveness live; |
58e2af8b JK |
94 | }; |
95 | ||
96 | enum bpf_stack_slot_type { | |
97 | STACK_INVALID, /* nothing was stored in this stack slot */ | |
98 | STACK_SPILL, /* register spilled into stack */ | |
cc2b14d5 AS |
99 | STACK_MISC, /* BPF program wrote some data into this slot */ |
100 | STACK_ZERO, /* BPF program wrote constant zero */ | |
58e2af8b JK |
101 | }; |
102 | ||
103 | #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ | |
104 | ||
638f5b90 AS |
105 | struct bpf_stack_state { |
106 | struct bpf_reg_state spilled_ptr; | |
107 | u8 slot_type[BPF_REG_SIZE]; | |
108 | }; | |
109 | ||
fd978bf7 JS |
110 | struct bpf_reference_state { |
111 | /* Track each reference created with a unique id, even if the same | |
112 | * instruction creates the reference multiple times (eg, via CALL). | |
113 | */ | |
114 | int id; | |
115 | /* Instruction where the allocation of this reference occurred. This | |
116 | * is used purely to inform the user of a reference leak. | |
117 | */ | |
118 | int insn_idx; | |
119 | }; | |
120 | ||
58e2af8b JK |
121 | /* state of the program: |
122 | * type of all registers and stack info | |
123 | */ | |
f4d7e40a | 124 | struct bpf_func_state { |
58e2af8b | 125 | struct bpf_reg_state regs[MAX_BPF_REG]; |
f4d7e40a AS |
126 | /* index of call instruction that called into this func */ |
127 | int callsite; | |
128 | /* stack frame number of this function state from pov of | |
129 | * enclosing bpf_verifier_state. | |
130 | * 0 = main function, 1 = first callee. | |
131 | */ | |
132 | u32 frameno; | |
133 | /* subprog number == index within subprog_stack_depth | |
134 | * zero == main subprog | |
135 | */ | |
136 | u32 subprogno; | |
137 | ||
fd978bf7 JS |
138 | /* The following fields should be last. See copy_func_state() */ |
139 | int acquired_refs; | |
140 | struct bpf_reference_state *refs; | |
638f5b90 AS |
141 | int allocated_stack; |
142 | struct bpf_stack_state *stack; | |
58e2af8b JK |
143 | }; |
144 | ||
f4d7e40a AS |
145 | #define MAX_CALL_FRAMES 8 |
146 | struct bpf_verifier_state { | |
147 | /* call stack tracking */ | |
148 | struct bpf_func_state *frame[MAX_CALL_FRAMES]; | |
f4d7e40a AS |
149 | u32 curframe; |
150 | }; | |
151 | ||
f3709f69 JS |
152 | #define bpf_get_spilled_reg(slot, frame) \ |
153 | (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ | |
154 | (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ | |
155 | ? &frame->stack[slot].spilled_ptr : NULL) | |
156 | ||
157 | /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ | |
158 | #define bpf_for_each_spilled_reg(iter, frame, reg) \ | |
159 | for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ | |
160 | iter < frame->allocated_stack / BPF_REG_SIZE; \ | |
161 | iter++, reg = bpf_get_spilled_reg(iter, frame)) | |
162 | ||
58e2af8b JK |
163 | /* linked list of verifier states used to prune search */ |
164 | struct bpf_verifier_state_list { | |
165 | struct bpf_verifier_state state; | |
166 | struct bpf_verifier_state_list *next; | |
167 | }; | |
168 | ||
169 | struct bpf_insn_aux_data { | |
81ed18ab AS |
170 | union { |
171 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ | |
c93552c4 | 172 | unsigned long map_state; /* pointer/poison value for maps */ |
1c2a088a | 173 | s32 call_imm; /* saved imm field of call insn */ |
81ed18ab | 174 | }; |
23994631 | 175 | int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ |
af86ca4e | 176 | int sanitize_stack_off; /* stack slot to be cleared */ |
c131187d | 177 | bool seen; /* this insn was processed by the verifier */ |
58e2af8b JK |
178 | }; |
179 | ||
180 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ | |
181 | ||
a2a7d570 JK |
182 | #define BPF_VERIFIER_TMP_LOG_SIZE 1024 |
183 | ||
b9193c1b | 184 | struct bpf_verifier_log { |
e7bf8249 | 185 | u32 level; |
a2a7d570 | 186 | char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; |
e7bf8249 JK |
187 | char __user *ubuf; |
188 | u32 len_used; | |
189 | u32 len_total; | |
190 | }; | |
191 | ||
b9193c1b | 192 | static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) |
e7bf8249 JK |
193 | { |
194 | return log->len_used >= log->len_total - 1; | |
195 | } | |
196 | ||
77d2e05a MKL |
197 | static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) |
198 | { | |
199 | return log->level && log->ubuf && !bpf_verifier_log_full(log); | |
200 | } | |
201 | ||
cc8b0b92 AS |
202 | #define BPF_MAX_SUBPROGS 256 |
203 | ||
9c8105bd JW |
204 | struct bpf_subprog_info { |
205 | u32 start; /* insn idx of function entry point */ | |
206 | u16 stack_depth; /* max. stack depth used by this function */ | |
207 | }; | |
208 | ||
58e2af8b JK |
209 | /* single container for all structs |
210 | * one verifier_env per bpf_check() call | |
211 | */ | |
212 | struct bpf_verifier_env { | |
213 | struct bpf_prog *prog; /* eBPF program being verified */ | |
00176a34 | 214 | const struct bpf_verifier_ops *ops; |
58e2af8b JK |
215 | struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ |
216 | int stack_size; /* number of states to be processed */ | |
e07b98d9 | 217 | bool strict_alignment; /* perform strict pointer alignment checks */ |
638f5b90 | 218 | struct bpf_verifier_state *cur_state; /* current verifier state */ |
58e2af8b JK |
219 | struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ |
220 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ | |
221 | u32 used_map_cnt; /* number of used maps */ | |
222 | u32 id_gen; /* used to generate unique reg IDs */ | |
223 | bool allow_ptr_leaks; | |
224 | bool seen_direct_write; | |
225 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ | |
b9193c1b | 226 | struct bpf_verifier_log log; |
9c8105bd | 227 | struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; |
cc8b0b92 | 228 | u32 subprog_cnt; |
58e2af8b JK |
229 | }; |
230 | ||
be2d04d1 MM |
231 | __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, |
232 | const char *fmt, va_list args); | |
430e68d1 QM |
233 | __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, |
234 | const char *fmt, ...); | |
235 | ||
fd978bf7 | 236 | static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) |
638f5b90 | 237 | { |
f4d7e40a AS |
238 | struct bpf_verifier_state *cur = env->cur_state; |
239 | ||
fd978bf7 JS |
240 | return cur->frame[cur->curframe]; |
241 | } | |
242 | ||
243 | static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) | |
244 | { | |
245 | return cur_func(env)->regs; | |
638f5b90 AS |
246 | } |
247 | ||
ab3f0063 | 248 | int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); |
cae1927c JK |
249 | int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, |
250 | int insn_idx, int prev_insn_idx); | |
c941ce9c | 251 | int bpf_prog_offload_finalize(struct bpf_verifier_env *env); |
ab3f0063 | 252 | |
58e2af8b | 253 | #endif /* _LINUX_BPF_VERIFIER_H */ |