]>
Commit | Line | Data |
---|---|---|
28fbcfa0 | 1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
81ed18ab | 2 | * Copyright (c) 2016,2017 Facebook |
28fbcfa0 AS |
3 | * |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of version 2 of the GNU General Public | |
6 | * License as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | #include <linux/bpf.h> | |
14 | #include <linux/err.h> | |
28fbcfa0 AS |
15 | #include <linux/slab.h> |
16 | #include <linux/mm.h> | |
04fd61ab | 17 | #include <linux/filter.h> |
0cdf5640 | 18 | #include <linux/perf_event.h> |
28fbcfa0 | 19 | |
56f668df MKL |
20 | #include "map_in_map.h" |
21 | ||
6e71b04a CF |
22 | #define ARRAY_CREATE_FLAG_MASK \ |
23 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) | |
24 | ||
a10423b8 AS |
25 | static void bpf_array_free_percpu(struct bpf_array *array) |
26 | { | |
27 | int i; | |
28 | ||
29 | for (i = 0; i < array->map.max_entries; i++) | |
30 | free_percpu(array->pptrs[i]); | |
31 | } | |
32 | ||
33 | static int bpf_array_alloc_percpu(struct bpf_array *array) | |
34 | { | |
35 | void __percpu *ptr; | |
36 | int i; | |
37 | ||
38 | for (i = 0; i < array->map.max_entries; i++) { | |
39 | ptr = __alloc_percpu_gfp(array->elem_size, 8, | |
40 | GFP_USER | __GFP_NOWARN); | |
41 | if (!ptr) { | |
42 | bpf_array_free_percpu(array); | |
43 | return -ENOMEM; | |
44 | } | |
45 | array->pptrs[i] = ptr; | |
46 | } | |
47 | ||
48 | return 0; | |
49 | } | |
50 | ||
28fbcfa0 AS |
51 | /* Called from syscall */ |
52 | static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |
53 | { | |
a10423b8 | 54 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; |
96eabe7a | 55 | int numa_node = bpf_map_attr_numa_node(attr); |
b2157399 AS |
56 | u32 elem_size, index_mask, max_entries; |
57 | bool unpriv = !capable(CAP_SYS_ADMIN); | |
28fbcfa0 | 58 | struct bpf_array *array; |
a10423b8 | 59 | u64 array_size; |
28fbcfa0 AS |
60 | |
61 | /* check sanity of attributes */ | |
62 | if (attr->max_entries == 0 || attr->key_size != 4 || | |
6e71b04a CF |
63 | attr->value_size == 0 || |
64 | attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || | |
96eabe7a | 65 | (percpu && numa_node != NUMA_NO_NODE)) |
28fbcfa0 AS |
66 | return ERR_PTR(-EINVAL); |
67 | ||
7984c27c | 68 | if (attr->value_size > KMALLOC_MAX_SIZE) |
01b3f521 AS |
69 | /* if value_size is bigger, the user space won't be able to |
70 | * access the elements. | |
71 | */ | |
72 | return ERR_PTR(-E2BIG); | |
73 | ||
28fbcfa0 AS |
74 | elem_size = round_up(attr->value_size, 8); |
75 | ||
b2157399 AS |
76 | max_entries = attr->max_entries; |
77 | index_mask = roundup_pow_of_two(max_entries) - 1; | |
78 | ||
79 | if (unpriv) | |
80 | /* round up array size to nearest power of 2, | |
81 | * since cpu will speculate within index_mask limits | |
82 | */ | |
83 | max_entries = index_mask + 1; | |
84 | ||
a10423b8 AS |
85 | array_size = sizeof(*array); |
86 | if (percpu) | |
b2157399 | 87 | array_size += (u64) max_entries * sizeof(void *); |
a10423b8 | 88 | else |
b2157399 | 89 | array_size += (u64) max_entries * elem_size; |
a10423b8 AS |
90 | |
91 | /* make sure there is no u32 overflow later in round_up() */ | |
92 | if (array_size >= U32_MAX - PAGE_SIZE) | |
daaf427c AS |
93 | return ERR_PTR(-ENOMEM); |
94 | ||
28fbcfa0 | 95 | /* allocate all map elements and zero-initialize them */ |
96eabe7a | 96 | array = bpf_map_area_alloc(array_size, numa_node); |
d407bd25 DB |
97 | if (!array) |
98 | return ERR_PTR(-ENOMEM); | |
b2157399 AS |
99 | array->index_mask = index_mask; |
100 | array->map.unpriv_array = unpriv; | |
28fbcfa0 AS |
101 | |
102 | /* copy mandatory map attributes */ | |
a10423b8 | 103 | array->map.map_type = attr->map_type; |
28fbcfa0 AS |
104 | array->map.key_size = attr->key_size; |
105 | array->map.value_size = attr->value_size; | |
106 | array->map.max_entries = attr->max_entries; | |
a316338c | 107 | array->map.map_flags = attr->map_flags; |
96eabe7a | 108 | array->map.numa_node = numa_node; |
28fbcfa0 AS |
109 | array->elem_size = elem_size; |
110 | ||
a10423b8 AS |
111 | if (!percpu) |
112 | goto out; | |
113 | ||
114 | array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); | |
115 | ||
116 | if (array_size >= U32_MAX - PAGE_SIZE || | |
bc6d5031 | 117 | bpf_array_alloc_percpu(array)) { |
d407bd25 | 118 | bpf_map_area_free(array); |
a10423b8 AS |
119 | return ERR_PTR(-ENOMEM); |
120 | } | |
121 | out: | |
122 | array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT; | |
123 | ||
28fbcfa0 | 124 | return &array->map; |
28fbcfa0 AS |
125 | } |
126 | ||
127 | /* Called from syscall or from eBPF program */ | |
128 | static void *array_map_lookup_elem(struct bpf_map *map, void *key) | |
129 | { | |
130 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
131 | u32 index = *(u32 *)key; | |
132 | ||
a10423b8 | 133 | if (unlikely(index >= array->map.max_entries)) |
28fbcfa0 AS |
134 | return NULL; |
135 | ||
b2157399 | 136 | return array->value + array->elem_size * (index & array->index_mask); |
28fbcfa0 AS |
137 | } |
138 | ||
81ed18ab AS |
139 | /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ |
140 | static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) | |
141 | { | |
b2157399 | 142 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
81ed18ab | 143 | struct bpf_insn *insn = insn_buf; |
fad73a1a | 144 | u32 elem_size = round_up(map->value_size, 8); |
81ed18ab AS |
145 | const int ret = BPF_REG_0; |
146 | const int map_ptr = BPF_REG_1; | |
147 | const int index = BPF_REG_2; | |
148 | ||
149 | *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); | |
150 | *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); | |
b2157399 AS |
151 | if (map->unpriv_array) { |
152 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); | |
153 | *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); | |
154 | } else { | |
155 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); | |
156 | } | |
fad73a1a MKL |
157 | |
158 | if (is_power_of_2(elem_size)) { | |
81ed18ab AS |
159 | *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); |
160 | } else { | |
161 | *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); | |
162 | } | |
163 | *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); | |
164 | *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); | |
165 | *insn++ = BPF_MOV64_IMM(ret, 0); | |
166 | return insn - insn_buf; | |
167 | } | |
168 | ||
a10423b8 AS |
169 | /* Called from eBPF program */ |
170 | static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) | |
171 | { | |
172 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
173 | u32 index = *(u32 *)key; | |
174 | ||
175 | if (unlikely(index >= array->map.max_entries)) | |
176 | return NULL; | |
177 | ||
b2157399 | 178 | return this_cpu_ptr(array->pptrs[index & array->index_mask]); |
a10423b8 AS |
179 | } |
180 | ||
15a07b33 AS |
181 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) |
182 | { | |
183 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
184 | u32 index = *(u32 *)key; | |
185 | void __percpu *pptr; | |
186 | int cpu, off = 0; | |
187 | u32 size; | |
188 | ||
189 | if (unlikely(index >= array->map.max_entries)) | |
190 | return -ENOENT; | |
191 | ||
192 | /* per_cpu areas are zero-filled and bpf programs can only | |
193 | * access 'value_size' of them, so copying rounded areas | |
194 | * will not leak any kernel data | |
195 | */ | |
196 | size = round_up(map->value_size, 8); | |
197 | rcu_read_lock(); | |
b2157399 | 198 | pptr = array->pptrs[index & array->index_mask]; |
15a07b33 AS |
199 | for_each_possible_cpu(cpu) { |
200 | bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); | |
201 | off += size; | |
202 | } | |
203 | rcu_read_unlock(); | |
204 | return 0; | |
205 | } | |
206 | ||
28fbcfa0 AS |
207 | /* Called from syscall */ |
208 | static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
209 | { | |
210 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
8fe45924 | 211 | u32 index = key ? *(u32 *)key : U32_MAX; |
28fbcfa0 AS |
212 | u32 *next = (u32 *)next_key; |
213 | ||
214 | if (index >= array->map.max_entries) { | |
215 | *next = 0; | |
216 | return 0; | |
217 | } | |
218 | ||
219 | if (index == array->map.max_entries - 1) | |
220 | return -ENOENT; | |
221 | ||
222 | *next = index + 1; | |
223 | return 0; | |
224 | } | |
225 | ||
226 | /* Called from syscall or from eBPF program */ | |
227 | static int array_map_update_elem(struct bpf_map *map, void *key, void *value, | |
228 | u64 map_flags) | |
229 | { | |
230 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
231 | u32 index = *(u32 *)key; | |
232 | ||
a10423b8 | 233 | if (unlikely(map_flags > BPF_EXIST)) |
28fbcfa0 AS |
234 | /* unknown flags */ |
235 | return -EINVAL; | |
236 | ||
a10423b8 | 237 | if (unlikely(index >= array->map.max_entries)) |
28fbcfa0 AS |
238 | /* all elements were pre-allocated, cannot insert a new one */ |
239 | return -E2BIG; | |
240 | ||
a10423b8 | 241 | if (unlikely(map_flags == BPF_NOEXIST)) |
daaf427c | 242 | /* all elements already exist */ |
28fbcfa0 AS |
243 | return -EEXIST; |
244 | ||
a10423b8 | 245 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
b2157399 | 246 | memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), |
a10423b8 AS |
247 | value, map->value_size); |
248 | else | |
b2157399 AS |
249 | memcpy(array->value + |
250 | array->elem_size * (index & array->index_mask), | |
a10423b8 | 251 | value, map->value_size); |
28fbcfa0 AS |
252 | return 0; |
253 | } | |
254 | ||
15a07b33 AS |
255 | int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, |
256 | u64 map_flags) | |
257 | { | |
258 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
259 | u32 index = *(u32 *)key; | |
260 | void __percpu *pptr; | |
261 | int cpu, off = 0; | |
262 | u32 size; | |
263 | ||
264 | if (unlikely(map_flags > BPF_EXIST)) | |
265 | /* unknown flags */ | |
266 | return -EINVAL; | |
267 | ||
268 | if (unlikely(index >= array->map.max_entries)) | |
269 | /* all elements were pre-allocated, cannot insert a new one */ | |
270 | return -E2BIG; | |
271 | ||
272 | if (unlikely(map_flags == BPF_NOEXIST)) | |
273 | /* all elements already exist */ | |
274 | return -EEXIST; | |
275 | ||
276 | /* the user space will provide round_up(value_size, 8) bytes that | |
277 | * will be copied into per-cpu area. bpf programs can only access | |
278 | * value_size of it. During lookup the same extra bytes will be | |
279 | * returned or zeros which were zero-filled by percpu_alloc, | |
280 | * so no kernel data leaks possible | |
281 | */ | |
282 | size = round_up(map->value_size, 8); | |
283 | rcu_read_lock(); | |
b2157399 | 284 | pptr = array->pptrs[index & array->index_mask]; |
15a07b33 AS |
285 | for_each_possible_cpu(cpu) { |
286 | bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); | |
287 | off += size; | |
288 | } | |
289 | rcu_read_unlock(); | |
290 | return 0; | |
291 | } | |
292 | ||
28fbcfa0 AS |
293 | /* Called from syscall or from eBPF program */ |
294 | static int array_map_delete_elem(struct bpf_map *map, void *key) | |
295 | { | |
296 | return -EINVAL; | |
297 | } | |
298 | ||
299 | /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ | |
300 | static void array_map_free(struct bpf_map *map) | |
301 | { | |
302 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
303 | ||
304 | /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
305 | * so the programs (can be more than one that used this map) were | |
306 | * disconnected from events. Wait for outstanding programs to complete | |
307 | * and free the array | |
308 | */ | |
309 | synchronize_rcu(); | |
310 | ||
a10423b8 AS |
311 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
312 | bpf_array_free_percpu(array); | |
313 | ||
d407bd25 | 314 | bpf_map_area_free(array); |
28fbcfa0 AS |
315 | } |
316 | ||
40077e0c | 317 | const struct bpf_map_ops array_map_ops = { |
28fbcfa0 AS |
318 | .map_alloc = array_map_alloc, |
319 | .map_free = array_map_free, | |
320 | .map_get_next_key = array_map_get_next_key, | |
321 | .map_lookup_elem = array_map_lookup_elem, | |
322 | .map_update_elem = array_map_update_elem, | |
323 | .map_delete_elem = array_map_delete_elem, | |
81ed18ab | 324 | .map_gen_lookup = array_map_gen_lookup, |
28fbcfa0 AS |
325 | }; |
326 | ||
40077e0c | 327 | const struct bpf_map_ops percpu_array_map_ops = { |
a10423b8 AS |
328 | .map_alloc = array_map_alloc, |
329 | .map_free = array_map_free, | |
330 | .map_get_next_key = array_map_get_next_key, | |
331 | .map_lookup_elem = percpu_array_map_lookup_elem, | |
332 | .map_update_elem = array_map_update_elem, | |
333 | .map_delete_elem = array_map_delete_elem, | |
334 | }; | |
335 | ||
2a36f0b9 | 336 | static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr) |
04fd61ab | 337 | { |
2a36f0b9 | 338 | /* only file descriptors can be stored in this type of map */ |
04fd61ab AS |
339 | if (attr->value_size != sizeof(u32)) |
340 | return ERR_PTR(-EINVAL); | |
341 | return array_map_alloc(attr); | |
342 | } | |
343 | ||
2a36f0b9 | 344 | static void fd_array_map_free(struct bpf_map *map) |
04fd61ab AS |
345 | { |
346 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
347 | int i; | |
348 | ||
349 | synchronize_rcu(); | |
350 | ||
351 | /* make sure it's empty */ | |
352 | for (i = 0; i < array->map.max_entries; i++) | |
2a36f0b9 | 353 | BUG_ON(array->ptrs[i] != NULL); |
d407bd25 DB |
354 | |
355 | bpf_map_area_free(array); | |
04fd61ab AS |
356 | } |
357 | ||
2a36f0b9 | 358 | static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) |
04fd61ab AS |
359 | { |
360 | return NULL; | |
361 | } | |
362 | ||
14dc6f04 MKL |
363 | /* only called from syscall */ |
364 | int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) | |
365 | { | |
366 | void **elem, *ptr; | |
367 | int ret = 0; | |
368 | ||
369 | if (!map->ops->map_fd_sys_lookup_elem) | |
370 | return -ENOTSUPP; | |
371 | ||
372 | rcu_read_lock(); | |
373 | elem = array_map_lookup_elem(map, key); | |
374 | if (elem && (ptr = READ_ONCE(*elem))) | |
375 | *value = map->ops->map_fd_sys_lookup_elem(ptr); | |
376 | else | |
377 | ret = -ENOENT; | |
378 | rcu_read_unlock(); | |
379 | ||
380 | return ret; | |
381 | } | |
382 | ||
04fd61ab | 383 | /* only called from syscall */ |
d056a788 DB |
384 | int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, |
385 | void *key, void *value, u64 map_flags) | |
04fd61ab AS |
386 | { |
387 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
2a36f0b9 | 388 | void *new_ptr, *old_ptr; |
04fd61ab AS |
389 | u32 index = *(u32 *)key, ufd; |
390 | ||
391 | if (map_flags != BPF_ANY) | |
392 | return -EINVAL; | |
393 | ||
394 | if (index >= array->map.max_entries) | |
395 | return -E2BIG; | |
396 | ||
397 | ufd = *(u32 *)value; | |
d056a788 | 398 | new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); |
2a36f0b9 WN |
399 | if (IS_ERR(new_ptr)) |
400 | return PTR_ERR(new_ptr); | |
04fd61ab | 401 | |
2a36f0b9 WN |
402 | old_ptr = xchg(array->ptrs + index, new_ptr); |
403 | if (old_ptr) | |
404 | map->ops->map_fd_put_ptr(old_ptr); | |
04fd61ab AS |
405 | |
406 | return 0; | |
407 | } | |
408 | ||
2a36f0b9 | 409 | static int fd_array_map_delete_elem(struct bpf_map *map, void *key) |
04fd61ab AS |
410 | { |
411 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
2a36f0b9 | 412 | void *old_ptr; |
04fd61ab AS |
413 | u32 index = *(u32 *)key; |
414 | ||
415 | if (index >= array->map.max_entries) | |
416 | return -E2BIG; | |
417 | ||
2a36f0b9 WN |
418 | old_ptr = xchg(array->ptrs + index, NULL); |
419 | if (old_ptr) { | |
420 | map->ops->map_fd_put_ptr(old_ptr); | |
04fd61ab AS |
421 | return 0; |
422 | } else { | |
423 | return -ENOENT; | |
424 | } | |
425 | } | |
426 | ||
d056a788 DB |
427 | static void *prog_fd_array_get_ptr(struct bpf_map *map, |
428 | struct file *map_file, int fd) | |
2a36f0b9 WN |
429 | { |
430 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
431 | struct bpf_prog *prog = bpf_prog_get(fd); | |
d056a788 | 432 | |
2a36f0b9 WN |
433 | if (IS_ERR(prog)) |
434 | return prog; | |
435 | ||
436 | if (!bpf_prog_array_compatible(array, prog)) { | |
437 | bpf_prog_put(prog); | |
438 | return ERR_PTR(-EINVAL); | |
439 | } | |
d056a788 | 440 | |
2a36f0b9 WN |
441 | return prog; |
442 | } | |
443 | ||
444 | static void prog_fd_array_put_ptr(void *ptr) | |
445 | { | |
1aacde3d | 446 | bpf_prog_put(ptr); |
2a36f0b9 WN |
447 | } |
448 | ||
14dc6f04 MKL |
449 | static u32 prog_fd_array_sys_lookup_elem(void *ptr) |
450 | { | |
451 | return ((struct bpf_prog *)ptr)->aux->id; | |
452 | } | |
453 | ||
04fd61ab | 454 | /* decrement refcnt of all bpf_progs that are stored in this map */ |
2a36f0b9 | 455 | void bpf_fd_array_map_clear(struct bpf_map *map) |
04fd61ab AS |
456 | { |
457 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
458 | int i; | |
459 | ||
460 | for (i = 0; i < array->map.max_entries; i++) | |
2a36f0b9 | 461 | fd_array_map_delete_elem(map, &i); |
04fd61ab AS |
462 | } |
463 | ||
40077e0c | 464 | const struct bpf_map_ops prog_array_map_ops = { |
2a36f0b9 WN |
465 | .map_alloc = fd_array_map_alloc, |
466 | .map_free = fd_array_map_free, | |
04fd61ab | 467 | .map_get_next_key = array_map_get_next_key, |
2a36f0b9 | 468 | .map_lookup_elem = fd_array_map_lookup_elem, |
2a36f0b9 WN |
469 | .map_delete_elem = fd_array_map_delete_elem, |
470 | .map_fd_get_ptr = prog_fd_array_get_ptr, | |
471 | .map_fd_put_ptr = prog_fd_array_put_ptr, | |
14dc6f04 | 472 | .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, |
04fd61ab AS |
473 | }; |
474 | ||
3b1efb19 DB |
475 | static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, |
476 | struct file *map_file) | |
ea317b26 | 477 | { |
3b1efb19 DB |
478 | struct bpf_event_entry *ee; |
479 | ||
858d68f1 | 480 | ee = kzalloc(sizeof(*ee), GFP_ATOMIC); |
3b1efb19 DB |
481 | if (ee) { |
482 | ee->event = perf_file->private_data; | |
483 | ee->perf_file = perf_file; | |
484 | ee->map_file = map_file; | |
485 | } | |
486 | ||
487 | return ee; | |
488 | } | |
489 | ||
490 | static void __bpf_event_entry_free(struct rcu_head *rcu) | |
491 | { | |
492 | struct bpf_event_entry *ee; | |
493 | ||
494 | ee = container_of(rcu, struct bpf_event_entry, rcu); | |
495 | fput(ee->perf_file); | |
496 | kfree(ee); | |
497 | } | |
498 | ||
499 | static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) | |
500 | { | |
501 | call_rcu(&ee->rcu, __bpf_event_entry_free); | |
ea317b26 KX |
502 | } |
503 | ||
d056a788 DB |
504 | static void *perf_event_fd_array_get_ptr(struct bpf_map *map, |
505 | struct file *map_file, int fd) | |
ea317b26 | 506 | { |
3b1efb19 DB |
507 | struct bpf_event_entry *ee; |
508 | struct perf_event *event; | |
509 | struct file *perf_file; | |
f91840a3 | 510 | u64 value; |
ea317b26 | 511 | |
3b1efb19 DB |
512 | perf_file = perf_event_get(fd); |
513 | if (IS_ERR(perf_file)) | |
514 | return perf_file; | |
e03e7ee3 | 515 | |
f91840a3 | 516 | ee = ERR_PTR(-EOPNOTSUPP); |
3b1efb19 | 517 | event = perf_file->private_data; |
97562633 | 518 | if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) |
3b1efb19 DB |
519 | goto err_out; |
520 | ||
f91840a3 AS |
521 | ee = bpf_event_entry_gen(perf_file, map_file); |
522 | if (ee) | |
523 | return ee; | |
524 | ee = ERR_PTR(-ENOMEM); | |
3b1efb19 DB |
525 | err_out: |
526 | fput(perf_file); | |
527 | return ee; | |
ea317b26 KX |
528 | } |
529 | ||
530 | static void perf_event_fd_array_put_ptr(void *ptr) | |
531 | { | |
3b1efb19 DB |
532 | bpf_event_entry_free_rcu(ptr); |
533 | } | |
534 | ||
535 | static void perf_event_fd_array_release(struct bpf_map *map, | |
536 | struct file *map_file) | |
537 | { | |
538 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
539 | struct bpf_event_entry *ee; | |
540 | int i; | |
541 | ||
542 | rcu_read_lock(); | |
543 | for (i = 0; i < array->map.max_entries; i++) { | |
544 | ee = READ_ONCE(array->ptrs[i]); | |
545 | if (ee && ee->map_file == map_file) | |
546 | fd_array_map_delete_elem(map, &i); | |
547 | } | |
548 | rcu_read_unlock(); | |
ea317b26 KX |
549 | } |
550 | ||
40077e0c | 551 | const struct bpf_map_ops perf_event_array_map_ops = { |
ea317b26 | 552 | .map_alloc = fd_array_map_alloc, |
3b1efb19 | 553 | .map_free = fd_array_map_free, |
ea317b26 KX |
554 | .map_get_next_key = array_map_get_next_key, |
555 | .map_lookup_elem = fd_array_map_lookup_elem, | |
ea317b26 KX |
556 | .map_delete_elem = fd_array_map_delete_elem, |
557 | .map_fd_get_ptr = perf_event_fd_array_get_ptr, | |
558 | .map_fd_put_ptr = perf_event_fd_array_put_ptr, | |
3b1efb19 | 559 | .map_release = perf_event_fd_array_release, |
ea317b26 KX |
560 | }; |
561 | ||
60d20f91 | 562 | #ifdef CONFIG_CGROUPS |
4ed8ec52 MKL |
563 | static void *cgroup_fd_array_get_ptr(struct bpf_map *map, |
564 | struct file *map_file /* not used */, | |
565 | int fd) | |
566 | { | |
567 | return cgroup_get_from_fd(fd); | |
568 | } | |
569 | ||
570 | static void cgroup_fd_array_put_ptr(void *ptr) | |
571 | { | |
572 | /* cgroup_put free cgrp after a rcu grace period */ | |
573 | cgroup_put(ptr); | |
574 | } | |
575 | ||
576 | static void cgroup_fd_array_free(struct bpf_map *map) | |
577 | { | |
578 | bpf_fd_array_map_clear(map); | |
579 | fd_array_map_free(map); | |
580 | } | |
581 | ||
40077e0c | 582 | const struct bpf_map_ops cgroup_array_map_ops = { |
4ed8ec52 MKL |
583 | .map_alloc = fd_array_map_alloc, |
584 | .map_free = cgroup_fd_array_free, | |
585 | .map_get_next_key = array_map_get_next_key, | |
586 | .map_lookup_elem = fd_array_map_lookup_elem, | |
587 | .map_delete_elem = fd_array_map_delete_elem, | |
588 | .map_fd_get_ptr = cgroup_fd_array_get_ptr, | |
589 | .map_fd_put_ptr = cgroup_fd_array_put_ptr, | |
590 | }; | |
4ed8ec52 | 591 | #endif |
56f668df MKL |
592 | |
593 | static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) | |
594 | { | |
595 | struct bpf_map *map, *inner_map_meta; | |
596 | ||
597 | inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); | |
598 | if (IS_ERR(inner_map_meta)) | |
599 | return inner_map_meta; | |
600 | ||
601 | map = fd_array_map_alloc(attr); | |
602 | if (IS_ERR(map)) { | |
603 | bpf_map_meta_free(inner_map_meta); | |
604 | return map; | |
605 | } | |
606 | ||
607 | map->inner_map_meta = inner_map_meta; | |
608 | ||
609 | return map; | |
610 | } | |
611 | ||
612 | static void array_of_map_free(struct bpf_map *map) | |
613 | { | |
614 | /* map->inner_map_meta is only accessed by syscall which | |
615 | * is protected by fdget/fdput. | |
616 | */ | |
617 | bpf_map_meta_free(map->inner_map_meta); | |
618 | bpf_fd_array_map_clear(map); | |
619 | fd_array_map_free(map); | |
620 | } | |
621 | ||
622 | static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) | |
623 | { | |
624 | struct bpf_map **inner_map = array_map_lookup_elem(map, key); | |
625 | ||
626 | if (!inner_map) | |
627 | return NULL; | |
628 | ||
629 | return READ_ONCE(*inner_map); | |
630 | } | |
631 | ||
7b0c2a05 DB |
632 | static u32 array_of_map_gen_lookup(struct bpf_map *map, |
633 | struct bpf_insn *insn_buf) | |
634 | { | |
b2157399 | 635 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
7b0c2a05 DB |
636 | u32 elem_size = round_up(map->value_size, 8); |
637 | struct bpf_insn *insn = insn_buf; | |
638 | const int ret = BPF_REG_0; | |
639 | const int map_ptr = BPF_REG_1; | |
640 | const int index = BPF_REG_2; | |
641 | ||
642 | *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); | |
643 | *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); | |
b2157399 AS |
644 | if (map->unpriv_array) { |
645 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); | |
646 | *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); | |
647 | } else { | |
648 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); | |
649 | } | |
7b0c2a05 DB |
650 | if (is_power_of_2(elem_size)) |
651 | *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); | |
652 | else | |
653 | *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); | |
654 | *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); | |
655 | *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); | |
656 | *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); | |
657 | *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); | |
658 | *insn++ = BPF_MOV64_IMM(ret, 0); | |
659 | ||
660 | return insn - insn_buf; | |
661 | } | |
662 | ||
40077e0c | 663 | const struct bpf_map_ops array_of_maps_map_ops = { |
56f668df MKL |
664 | .map_alloc = array_of_map_alloc, |
665 | .map_free = array_of_map_free, | |
666 | .map_get_next_key = array_map_get_next_key, | |
667 | .map_lookup_elem = array_of_map_lookup_elem, | |
668 | .map_delete_elem = fd_array_map_delete_elem, | |
669 | .map_fd_get_ptr = bpf_map_fd_get_ptr, | |
670 | .map_fd_put_ptr = bpf_map_fd_put_ptr, | |
14dc6f04 | 671 | .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, |
7b0c2a05 | 672 | .map_gen_lookup = array_of_map_gen_lookup, |
56f668df | 673 | }; |