]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/bpf/arraymap.c
x86/xen: Reset VCPU0 info pointer after shared_info remap
[mirror_ubuntu-artful-kernel.git] / kernel / bpf / arraymap.c
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016,2017 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/bpf.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/filter.h>
18 #include <linux/perf_event.h>
19
20 #include "map_in_map.h"
21
22 static void bpf_array_free_percpu(struct bpf_array *array)
23 {
24 int i;
25
26 for (i = 0; i < array->map.max_entries; i++)
27 free_percpu(array->pptrs[i]);
28 }
29
30 static int bpf_array_alloc_percpu(struct bpf_array *array)
31 {
32 void __percpu *ptr;
33 int i;
34
35 for (i = 0; i < array->map.max_entries; i++) {
36 ptr = __alloc_percpu_gfp(array->elem_size, 8,
37 GFP_USER | __GFP_NOWARN);
38 if (!ptr) {
39 bpf_array_free_percpu(array);
40 return -ENOMEM;
41 }
42 array->pptrs[i] = ptr;
43 }
44
45 return 0;
46 }
47
48 /* Called from syscall */
49 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
50 {
51 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
52 struct bpf_array *array;
53 u64 array_size;
54 u32 elem_size;
55
56 /* check sanity of attributes */
57 if (attr->max_entries == 0 || attr->key_size != 4 ||
58 attr->value_size == 0 || attr->map_flags)
59 return ERR_PTR(-EINVAL);
60
61 if (attr->value_size > KMALLOC_MAX_SIZE)
62 /* if value_size is bigger, the user space won't be able to
63 * access the elements.
64 */
65 return ERR_PTR(-E2BIG);
66
67 elem_size = round_up(attr->value_size, 8);
68
69 array_size = sizeof(*array);
70 if (percpu)
71 array_size += (u64) attr->max_entries * sizeof(void *);
72 else
73 array_size += (u64) attr->max_entries * elem_size;
74
75 /* make sure there is no u32 overflow later in round_up() */
76 if (array_size >= U32_MAX - PAGE_SIZE)
77 return ERR_PTR(-ENOMEM);
78
79 /* allocate all map elements and zero-initialize them */
80 array = bpf_map_area_alloc(array_size);
81 if (!array)
82 return ERR_PTR(-ENOMEM);
83
84 /* copy mandatory map attributes */
85 array->map.map_type = attr->map_type;
86 array->map.key_size = attr->key_size;
87 array->map.value_size = attr->value_size;
88 array->map.max_entries = attr->max_entries;
89 array->map.map_flags = attr->map_flags;
90 array->elem_size = elem_size;
91
92 if (!percpu)
93 goto out;
94
95 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
96
97 if (array_size >= U32_MAX - PAGE_SIZE ||
98 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
99 bpf_map_area_free(array);
100 return ERR_PTR(-ENOMEM);
101 }
102 out:
103 array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
104
105 return &array->map;
106 }
107
108 /* Called from syscall or from eBPF program */
109 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
110 {
111 struct bpf_array *array = container_of(map, struct bpf_array, map);
112 u32 index = *(u32 *)key;
113
114 if (unlikely(index >= array->map.max_entries))
115 return NULL;
116
117 return array->value + array->elem_size * index;
118 }
119
120 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
121 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
122 {
123 struct bpf_insn *insn = insn_buf;
124 u32 elem_size = round_up(map->value_size, 8);
125 const int ret = BPF_REG_0;
126 const int map_ptr = BPF_REG_1;
127 const int index = BPF_REG_2;
128
129 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
130 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
131 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
132
133 if (is_power_of_2(elem_size)) {
134 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
135 } else {
136 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
137 }
138 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
139 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
140 *insn++ = BPF_MOV64_IMM(ret, 0);
141 return insn - insn_buf;
142 }
143
144 /* Called from eBPF program */
145 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
146 {
147 struct bpf_array *array = container_of(map, struct bpf_array, map);
148 u32 index = *(u32 *)key;
149
150 if (unlikely(index >= array->map.max_entries))
151 return NULL;
152
153 return this_cpu_ptr(array->pptrs[index]);
154 }
155
156 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
157 {
158 struct bpf_array *array = container_of(map, struct bpf_array, map);
159 u32 index = *(u32 *)key;
160 void __percpu *pptr;
161 int cpu, off = 0;
162 u32 size;
163
164 if (unlikely(index >= array->map.max_entries))
165 return -ENOENT;
166
167 /* per_cpu areas are zero-filled and bpf programs can only
168 * access 'value_size' of them, so copying rounded areas
169 * will not leak any kernel data
170 */
171 size = round_up(map->value_size, 8);
172 rcu_read_lock();
173 pptr = array->pptrs[index];
174 for_each_possible_cpu(cpu) {
175 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
176 off += size;
177 }
178 rcu_read_unlock();
179 return 0;
180 }
181
182 /* Called from syscall */
183 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
184 {
185 struct bpf_array *array = container_of(map, struct bpf_array, map);
186 u32 index = key ? *(u32 *)key : U32_MAX;
187 u32 *next = (u32 *)next_key;
188
189 if (index >= array->map.max_entries) {
190 *next = 0;
191 return 0;
192 }
193
194 if (index == array->map.max_entries - 1)
195 return -ENOENT;
196
197 *next = index + 1;
198 return 0;
199 }
200
201 /* Called from syscall or from eBPF program */
202 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
203 u64 map_flags)
204 {
205 struct bpf_array *array = container_of(map, struct bpf_array, map);
206 u32 index = *(u32 *)key;
207
208 if (unlikely(map_flags > BPF_EXIST))
209 /* unknown flags */
210 return -EINVAL;
211
212 if (unlikely(index >= array->map.max_entries))
213 /* all elements were pre-allocated, cannot insert a new one */
214 return -E2BIG;
215
216 if (unlikely(map_flags == BPF_NOEXIST))
217 /* all elements already exist */
218 return -EEXIST;
219
220 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
221 memcpy(this_cpu_ptr(array->pptrs[index]),
222 value, map->value_size);
223 else
224 memcpy(array->value + array->elem_size * index,
225 value, map->value_size);
226 return 0;
227 }
228
229 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
230 u64 map_flags)
231 {
232 struct bpf_array *array = container_of(map, struct bpf_array, map);
233 u32 index = *(u32 *)key;
234 void __percpu *pptr;
235 int cpu, off = 0;
236 u32 size;
237
238 if (unlikely(map_flags > BPF_EXIST))
239 /* unknown flags */
240 return -EINVAL;
241
242 if (unlikely(index >= array->map.max_entries))
243 /* all elements were pre-allocated, cannot insert a new one */
244 return -E2BIG;
245
246 if (unlikely(map_flags == BPF_NOEXIST))
247 /* all elements already exist */
248 return -EEXIST;
249
250 /* the user space will provide round_up(value_size, 8) bytes that
251 * will be copied into per-cpu area. bpf programs can only access
252 * value_size of it. During lookup the same extra bytes will be
253 * returned or zeros which were zero-filled by percpu_alloc,
254 * so no kernel data leaks possible
255 */
256 size = round_up(map->value_size, 8);
257 rcu_read_lock();
258 pptr = array->pptrs[index];
259 for_each_possible_cpu(cpu) {
260 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
261 off += size;
262 }
263 rcu_read_unlock();
264 return 0;
265 }
266
267 /* Called from syscall or from eBPF program */
268 static int array_map_delete_elem(struct bpf_map *map, void *key)
269 {
270 return -EINVAL;
271 }
272
273 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
274 static void array_map_free(struct bpf_map *map)
275 {
276 struct bpf_array *array = container_of(map, struct bpf_array, map);
277
278 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
279 * so the programs (can be more than one that used this map) were
280 * disconnected from events. Wait for outstanding programs to complete
281 * and free the array
282 */
283 synchronize_rcu();
284
285 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
286 bpf_array_free_percpu(array);
287
288 bpf_map_area_free(array);
289 }
290
291 const struct bpf_map_ops array_map_ops = {
292 .map_alloc = array_map_alloc,
293 .map_free = array_map_free,
294 .map_get_next_key = array_map_get_next_key,
295 .map_lookup_elem = array_map_lookup_elem,
296 .map_update_elem = array_map_update_elem,
297 .map_delete_elem = array_map_delete_elem,
298 .map_gen_lookup = array_map_gen_lookup,
299 };
300
301 const struct bpf_map_ops percpu_array_map_ops = {
302 .map_alloc = array_map_alloc,
303 .map_free = array_map_free,
304 .map_get_next_key = array_map_get_next_key,
305 .map_lookup_elem = percpu_array_map_lookup_elem,
306 .map_update_elem = array_map_update_elem,
307 .map_delete_elem = array_map_delete_elem,
308 };
309
310 static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
311 {
312 /* only file descriptors can be stored in this type of map */
313 if (attr->value_size != sizeof(u32))
314 return ERR_PTR(-EINVAL);
315 return array_map_alloc(attr);
316 }
317
318 static void fd_array_map_free(struct bpf_map *map)
319 {
320 struct bpf_array *array = container_of(map, struct bpf_array, map);
321 int i;
322
323 synchronize_rcu();
324
325 /* make sure it's empty */
326 for (i = 0; i < array->map.max_entries; i++)
327 BUG_ON(array->ptrs[i] != NULL);
328
329 bpf_map_area_free(array);
330 }
331
332 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
333 {
334 return NULL;
335 }
336
337 /* only called from syscall */
338 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
339 {
340 void **elem, *ptr;
341 int ret = 0;
342
343 if (!map->ops->map_fd_sys_lookup_elem)
344 return -ENOTSUPP;
345
346 rcu_read_lock();
347 elem = array_map_lookup_elem(map, key);
348 if (elem && (ptr = READ_ONCE(*elem)))
349 *value = map->ops->map_fd_sys_lookup_elem(ptr);
350 else
351 ret = -ENOENT;
352 rcu_read_unlock();
353
354 return ret;
355 }
356
357 /* only called from syscall */
358 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
359 void *key, void *value, u64 map_flags)
360 {
361 struct bpf_array *array = container_of(map, struct bpf_array, map);
362 void *new_ptr, *old_ptr;
363 u32 index = *(u32 *)key, ufd;
364
365 if (map_flags != BPF_ANY)
366 return -EINVAL;
367
368 if (index >= array->map.max_entries)
369 return -E2BIG;
370
371 ufd = *(u32 *)value;
372 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
373 if (IS_ERR(new_ptr))
374 return PTR_ERR(new_ptr);
375
376 old_ptr = xchg(array->ptrs + index, new_ptr);
377 if (old_ptr)
378 map->ops->map_fd_put_ptr(old_ptr);
379
380 return 0;
381 }
382
383 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
384 {
385 struct bpf_array *array = container_of(map, struct bpf_array, map);
386 void *old_ptr;
387 u32 index = *(u32 *)key;
388
389 if (index >= array->map.max_entries)
390 return -E2BIG;
391
392 old_ptr = xchg(array->ptrs + index, NULL);
393 if (old_ptr) {
394 map->ops->map_fd_put_ptr(old_ptr);
395 return 0;
396 } else {
397 return -ENOENT;
398 }
399 }
400
401 static void *prog_fd_array_get_ptr(struct bpf_map *map,
402 struct file *map_file, int fd)
403 {
404 struct bpf_array *array = container_of(map, struct bpf_array, map);
405 struct bpf_prog *prog = bpf_prog_get(fd);
406
407 if (IS_ERR(prog))
408 return prog;
409
410 if (!bpf_prog_array_compatible(array, prog)) {
411 bpf_prog_put(prog);
412 return ERR_PTR(-EINVAL);
413 }
414
415 return prog;
416 }
417
418 static void prog_fd_array_put_ptr(void *ptr)
419 {
420 bpf_prog_put(ptr);
421 }
422
423 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
424 {
425 return ((struct bpf_prog *)ptr)->aux->id;
426 }
427
428 /* decrement refcnt of all bpf_progs that are stored in this map */
429 void bpf_fd_array_map_clear(struct bpf_map *map)
430 {
431 struct bpf_array *array = container_of(map, struct bpf_array, map);
432 int i;
433
434 for (i = 0; i < array->map.max_entries; i++)
435 fd_array_map_delete_elem(map, &i);
436 }
437
438 const struct bpf_map_ops prog_array_map_ops = {
439 .map_alloc = fd_array_map_alloc,
440 .map_free = fd_array_map_free,
441 .map_get_next_key = array_map_get_next_key,
442 .map_lookup_elem = fd_array_map_lookup_elem,
443 .map_delete_elem = fd_array_map_delete_elem,
444 .map_fd_get_ptr = prog_fd_array_get_ptr,
445 .map_fd_put_ptr = prog_fd_array_put_ptr,
446 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
447 };
448
449 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
450 struct file *map_file)
451 {
452 struct bpf_event_entry *ee;
453
454 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
455 if (ee) {
456 ee->event = perf_file->private_data;
457 ee->perf_file = perf_file;
458 ee->map_file = map_file;
459 }
460
461 return ee;
462 }
463
464 static void __bpf_event_entry_free(struct rcu_head *rcu)
465 {
466 struct bpf_event_entry *ee;
467
468 ee = container_of(rcu, struct bpf_event_entry, rcu);
469 fput(ee->perf_file);
470 kfree(ee);
471 }
472
473 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
474 {
475 call_rcu(&ee->rcu, __bpf_event_entry_free);
476 }
477
478 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
479 struct file *map_file, int fd)
480 {
481 struct bpf_event_entry *ee;
482 struct perf_event *event;
483 struct file *perf_file;
484 u64 value;
485
486 perf_file = perf_event_get(fd);
487 if (IS_ERR(perf_file))
488 return perf_file;
489
490 ee = ERR_PTR(-EOPNOTSUPP);
491 event = perf_file->private_data;
492 if (perf_event_read_local(event, &value) == -EOPNOTSUPP)
493 goto err_out;
494
495 ee = bpf_event_entry_gen(perf_file, map_file);
496 if (ee)
497 return ee;
498 ee = ERR_PTR(-ENOMEM);
499 err_out:
500 fput(perf_file);
501 return ee;
502 }
503
504 static void perf_event_fd_array_put_ptr(void *ptr)
505 {
506 bpf_event_entry_free_rcu(ptr);
507 }
508
509 static void perf_event_fd_array_release(struct bpf_map *map,
510 struct file *map_file)
511 {
512 struct bpf_array *array = container_of(map, struct bpf_array, map);
513 struct bpf_event_entry *ee;
514 int i;
515
516 rcu_read_lock();
517 for (i = 0; i < array->map.max_entries; i++) {
518 ee = READ_ONCE(array->ptrs[i]);
519 if (ee && ee->map_file == map_file)
520 fd_array_map_delete_elem(map, &i);
521 }
522 rcu_read_unlock();
523 }
524
525 const struct bpf_map_ops perf_event_array_map_ops = {
526 .map_alloc = fd_array_map_alloc,
527 .map_free = fd_array_map_free,
528 .map_get_next_key = array_map_get_next_key,
529 .map_lookup_elem = fd_array_map_lookup_elem,
530 .map_delete_elem = fd_array_map_delete_elem,
531 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
532 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
533 .map_release = perf_event_fd_array_release,
534 };
535
536 #ifdef CONFIG_CGROUPS
537 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
538 struct file *map_file /* not used */,
539 int fd)
540 {
541 return cgroup_get_from_fd(fd);
542 }
543
544 static void cgroup_fd_array_put_ptr(void *ptr)
545 {
546 /* cgroup_put free cgrp after a rcu grace period */
547 cgroup_put(ptr);
548 }
549
550 static void cgroup_fd_array_free(struct bpf_map *map)
551 {
552 bpf_fd_array_map_clear(map);
553 fd_array_map_free(map);
554 }
555
556 const struct bpf_map_ops cgroup_array_map_ops = {
557 .map_alloc = fd_array_map_alloc,
558 .map_free = cgroup_fd_array_free,
559 .map_get_next_key = array_map_get_next_key,
560 .map_lookup_elem = fd_array_map_lookup_elem,
561 .map_delete_elem = fd_array_map_delete_elem,
562 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
563 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
564 };
565 #endif
566
567 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
568 {
569 struct bpf_map *map, *inner_map_meta;
570
571 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
572 if (IS_ERR(inner_map_meta))
573 return inner_map_meta;
574
575 map = fd_array_map_alloc(attr);
576 if (IS_ERR(map)) {
577 bpf_map_meta_free(inner_map_meta);
578 return map;
579 }
580
581 map->inner_map_meta = inner_map_meta;
582
583 return map;
584 }
585
586 static void array_of_map_free(struct bpf_map *map)
587 {
588 /* map->inner_map_meta is only accessed by syscall which
589 * is protected by fdget/fdput.
590 */
591 bpf_map_meta_free(map->inner_map_meta);
592 bpf_fd_array_map_clear(map);
593 fd_array_map_free(map);
594 }
595
596 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
597 {
598 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
599
600 if (!inner_map)
601 return NULL;
602
603 return READ_ONCE(*inner_map);
604 }
605
606 const struct bpf_map_ops array_of_maps_map_ops = {
607 .map_alloc = array_of_map_alloc,
608 .map_free = array_of_map_free,
609 .map_get_next_key = array_map_get_next_key,
610 .map_lookup_elem = array_of_map_lookup_elem,
611 .map_delete_elem = fd_array_map_delete_elem,
612 .map_fd_get_ptr = bpf_map_fd_get_ptr,
613 .map_fd_put_ptr = bpf_map_fd_put_ptr,
614 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
615 };