]>
Commit | Line | Data |
---|---|---|
0f8e4bd8 | 1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
6c905981 | 2 | * Copyright (c) 2016 Facebook |
0f8e4bd8 AS |
3 | * |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of version 2 of the GNU General Public | |
6 | * License as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | #include <linux/bpf.h> | |
14 | #include <linux/jhash.h> | |
15 | #include <linux/filter.h> | |
4fe84359 | 16 | #include <linux/rculist_nulls.h> |
6c905981 | 17 | #include "percpu_freelist.h" |
29ba732a | 18 | #include "bpf_lru_list.h" |
bcc6b1b7 | 19 | #include "map_in_map.h" |
0f8e4bd8 | 20 | |
688ecfe6 | 21 | struct bucket { |
4fe84359 | 22 | struct hlist_nulls_head head; |
688ecfe6 | 23 | raw_spinlock_t lock; |
24 | }; | |
25 | ||
0f8e4bd8 AS |
26 | struct bpf_htab { |
27 | struct bpf_map map; | |
688ecfe6 | 28 | struct bucket *buckets; |
6c905981 | 29 | void *elems; |
29ba732a MKL |
30 | union { |
31 | struct pcpu_freelist freelist; | |
32 | struct bpf_lru lru; | |
33 | }; | |
a6ed3ea6 | 34 | void __percpu *extra_elems; |
6591f1e6 | 35 | atomic_t count; /* number of elements in this hashtable */ |
0f8e4bd8 AS |
36 | u32 n_buckets; /* number of hash buckets */ |
37 | u32 elem_size; /* size of each element in bytes */ | |
38 | }; | |
39 | ||
a6ed3ea6 AS |
40 | enum extra_elem_state { |
41 | HTAB_NOT_AN_EXTRA_ELEM = 0, | |
42 | HTAB_EXTRA_ELEM_FREE, | |
43 | HTAB_EXTRA_ELEM_USED | |
44 | }; | |
45 | ||
0f8e4bd8 AS |
46 | /* each htab element is struct htab_elem + key + value */ |
47 | struct htab_elem { | |
824bd0ce | 48 | union { |
4fe84359 | 49 | struct hlist_nulls_node hash_node; |
9f691549 AS |
50 | struct { |
51 | void *padding; | |
52 | union { | |
53 | struct bpf_htab *htab; | |
54 | struct pcpu_freelist_node fnode; | |
55 | }; | |
56 | }; | |
824bd0ce | 57 | }; |
a6ed3ea6 AS |
58 | union { |
59 | struct rcu_head rcu; | |
60 | enum extra_elem_state state; | |
29ba732a | 61 | struct bpf_lru_node lru_node; |
a6ed3ea6 | 62 | }; |
6c905981 | 63 | u32 hash; |
0f8e4bd8 AS |
64 | char key[0] __aligned(8); |
65 | }; | |
66 | ||
29ba732a MKL |
67 | static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); |
68 | ||
69 | static bool htab_is_lru(const struct bpf_htab *htab) | |
70 | { | |
8f844938 MKL |
71 | return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || |
72 | htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; | |
73 | } | |
74 | ||
75 | static bool htab_is_percpu(const struct bpf_htab *htab) | |
76 | { | |
77 | return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || | |
78 | htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; | |
29ba732a MKL |
79 | } |
80 | ||
6c905981 AS |
81 | static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, |
82 | void __percpu *pptr) | |
83 | { | |
84 | *(void __percpu **)(l->key + key_size) = pptr; | |
85 | } | |
86 | ||
87 | static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) | |
88 | { | |
89 | return *(void __percpu **)(l->key + key_size); | |
90 | } | |
91 | ||
bcc6b1b7 MKL |
92 | static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) |
93 | { | |
94 | return *(void **)(l->key + roundup(map->key_size, 8)); | |
95 | } | |
96 | ||
6c905981 AS |
97 | static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) |
98 | { | |
99 | return (struct htab_elem *) (htab->elems + i * htab->elem_size); | |
100 | } | |
101 | ||
102 | static void htab_free_elems(struct bpf_htab *htab) | |
103 | { | |
104 | int i; | |
105 | ||
8f844938 | 106 | if (!htab_is_percpu(htab)) |
6c905981 AS |
107 | goto free_elems; |
108 | ||
109 | for (i = 0; i < htab->map.max_entries; i++) { | |
110 | void __percpu *pptr; | |
111 | ||
112 | pptr = htab_elem_get_ptr(get_htab_elem(htab, i), | |
113 | htab->map.key_size); | |
114 | free_percpu(pptr); | |
115 | } | |
116 | free_elems: | |
d407bd25 | 117 | bpf_map_area_free(htab->elems); |
6c905981 AS |
118 | } |
119 | ||
29ba732a MKL |
120 | static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, |
121 | u32 hash) | |
122 | { | |
123 | struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); | |
124 | struct htab_elem *l; | |
125 | ||
126 | if (node) { | |
127 | l = container_of(node, struct htab_elem, lru_node); | |
128 | memcpy(l->key, key, htab->map.key_size); | |
129 | return l; | |
130 | } | |
131 | ||
132 | return NULL; | |
133 | } | |
134 | ||
135 | static int prealloc_init(struct bpf_htab *htab) | |
6c905981 AS |
136 | { |
137 | int err = -ENOMEM, i; | |
138 | ||
d407bd25 DB |
139 | htab->elems = bpf_map_area_alloc(htab->elem_size * |
140 | htab->map.max_entries); | |
6c905981 AS |
141 | if (!htab->elems) |
142 | return -ENOMEM; | |
143 | ||
8f844938 | 144 | if (!htab_is_percpu(htab)) |
6c905981 AS |
145 | goto skip_percpu_elems; |
146 | ||
147 | for (i = 0; i < htab->map.max_entries; i++) { | |
148 | u32 size = round_up(htab->map.value_size, 8); | |
149 | void __percpu *pptr; | |
150 | ||
151 | pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN); | |
152 | if (!pptr) | |
153 | goto free_elems; | |
154 | htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, | |
155 | pptr); | |
156 | } | |
157 | ||
158 | skip_percpu_elems: | |
29ba732a MKL |
159 | if (htab_is_lru(htab)) |
160 | err = bpf_lru_init(&htab->lru, | |
161 | htab->map.map_flags & BPF_F_NO_COMMON_LRU, | |
162 | offsetof(struct htab_elem, hash) - | |
163 | offsetof(struct htab_elem, lru_node), | |
164 | htab_lru_map_delete_node, | |
165 | htab); | |
166 | else | |
167 | err = pcpu_freelist_init(&htab->freelist); | |
168 | ||
6c905981 AS |
169 | if (err) |
170 | goto free_elems; | |
171 | ||
29ba732a MKL |
172 | if (htab_is_lru(htab)) |
173 | bpf_lru_populate(&htab->lru, htab->elems, | |
174 | offsetof(struct htab_elem, lru_node), | |
175 | htab->elem_size, htab->map.max_entries); | |
176 | else | |
9f691549 AS |
177 | pcpu_freelist_populate(&htab->freelist, |
178 | htab->elems + offsetof(struct htab_elem, fnode), | |
29ba732a MKL |
179 | htab->elem_size, htab->map.max_entries); |
180 | ||
6c905981 AS |
181 | return 0; |
182 | ||
183 | free_elems: | |
184 | htab_free_elems(htab); | |
185 | return err; | |
186 | } | |
187 | ||
29ba732a MKL |
188 | static void prealloc_destroy(struct bpf_htab *htab) |
189 | { | |
190 | htab_free_elems(htab); | |
191 | ||
192 | if (htab_is_lru(htab)) | |
193 | bpf_lru_destroy(&htab->lru); | |
194 | else | |
195 | pcpu_freelist_destroy(&htab->freelist); | |
196 | } | |
197 | ||
a6ed3ea6 AS |
198 | static int alloc_extra_elems(struct bpf_htab *htab) |
199 | { | |
200 | void __percpu *pptr; | |
201 | int cpu; | |
202 | ||
203 | pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN); | |
204 | if (!pptr) | |
205 | return -ENOMEM; | |
206 | ||
207 | for_each_possible_cpu(cpu) { | |
208 | ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state = | |
209 | HTAB_EXTRA_ELEM_FREE; | |
210 | } | |
211 | htab->extra_elems = pptr; | |
212 | return 0; | |
213 | } | |
214 | ||
0f8e4bd8 AS |
215 | /* Called from syscall */ |
216 | static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |
217 | { | |
8f844938 MKL |
218 | bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || |
219 | attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); | |
220 | bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || | |
221 | attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); | |
29ba732a MKL |
222 | /* percpu_lru means each cpu has its own LRU list. |
223 | * it is different from BPF_MAP_TYPE_PERCPU_HASH where | |
224 | * the map's value itself is percpu. percpu_lru has | |
225 | * nothing to do with the map's value. | |
226 | */ | |
227 | bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); | |
228 | bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); | |
0f8e4bd8 AS |
229 | struct bpf_htab *htab; |
230 | int err, i; | |
824bd0ce | 231 | u64 cost; |
0f8e4bd8 | 232 | |
9f691549 AS |
233 | BUILD_BUG_ON(offsetof(struct htab_elem, htab) != |
234 | offsetof(struct htab_elem, hash_node.pprev)); | |
235 | BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != | |
236 | offsetof(struct htab_elem, hash_node.pprev)); | |
237 | ||
29ba732a MKL |
238 | if (lru && !capable(CAP_SYS_ADMIN)) |
239 | /* LRU implementation is much complicated than other | |
240 | * maps. Hence, limit to CAP_SYS_ADMIN for now. | |
241 | */ | |
242 | return ERR_PTR(-EPERM); | |
243 | ||
244 | if (attr->map_flags & ~(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU)) | |
6c905981 AS |
245 | /* reserved bits should not be used */ |
246 | return ERR_PTR(-EINVAL); | |
247 | ||
29ba732a MKL |
248 | if (!lru && percpu_lru) |
249 | return ERR_PTR(-EINVAL); | |
250 | ||
251 | if (lru && !prealloc) | |
252 | return ERR_PTR(-ENOTSUPP); | |
253 | ||
0f8e4bd8 AS |
254 | htab = kzalloc(sizeof(*htab), GFP_USER); |
255 | if (!htab) | |
256 | return ERR_PTR(-ENOMEM); | |
257 | ||
258 | /* mandatory map attributes */ | |
824bd0ce | 259 | htab->map.map_type = attr->map_type; |
0f8e4bd8 AS |
260 | htab->map.key_size = attr->key_size; |
261 | htab->map.value_size = attr->value_size; | |
262 | htab->map.max_entries = attr->max_entries; | |
6c905981 | 263 | htab->map.map_flags = attr->map_flags; |
0f8e4bd8 AS |
264 | |
265 | /* check sanity of attributes. | |
266 | * value_size == 0 may be allowed in the future to use map as a set | |
267 | */ | |
268 | err = -EINVAL; | |
269 | if (htab->map.max_entries == 0 || htab->map.key_size == 0 || | |
270 | htab->map.value_size == 0) | |
271 | goto free_htab; | |
272 | ||
29ba732a MKL |
273 | if (percpu_lru) { |
274 | /* ensure each CPU's lru list has >=1 elements. | |
275 | * since we are at it, make each lru list has the same | |
276 | * number of elements. | |
277 | */ | |
278 | htab->map.max_entries = roundup(attr->max_entries, | |
279 | num_possible_cpus()); | |
280 | if (htab->map.max_entries < attr->max_entries) | |
281 | htab->map.max_entries = rounddown(attr->max_entries, | |
282 | num_possible_cpus()); | |
283 | } | |
284 | ||
0f8e4bd8 AS |
285 | /* hash table size must be power of 2 */ |
286 | htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); | |
287 | ||
288 | err = -E2BIG; | |
289 | if (htab->map.key_size > MAX_BPF_STACK) | |
290 | /* eBPF programs initialize keys on stack, so they cannot be | |
291 | * larger than max stack size | |
292 | */ | |
293 | goto free_htab; | |
294 | ||
7984c27c | 295 | if (htab->map.value_size >= KMALLOC_MAX_SIZE - |
01b3f521 AS |
296 | MAX_BPF_STACK - sizeof(struct htab_elem)) |
297 | /* if value_size is bigger, the user space won't be able to | |
298 | * access the elements via bpf syscall. This check also makes | |
299 | * sure that the elem_size doesn't overflow and it's | |
300 | * kmalloc-able later in htab_map_update_elem() | |
301 | */ | |
302 | goto free_htab; | |
303 | ||
824bd0ce AS |
304 | if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE) |
305 | /* make sure the size for pcpu_alloc() is reasonable */ | |
306 | goto free_htab; | |
307 | ||
01b3f521 | 308 | htab->elem_size = sizeof(struct htab_elem) + |
824bd0ce AS |
309 | round_up(htab->map.key_size, 8); |
310 | if (percpu) | |
311 | htab->elem_size += sizeof(void *); | |
312 | else | |
6c905981 | 313 | htab->elem_size += round_up(htab->map.value_size, 8); |
01b3f521 | 314 | |
daaf427c AS |
315 | /* prevent zero size kmalloc and check for u32 overflow */ |
316 | if (htab->n_buckets == 0 || | |
688ecfe6 | 317 | htab->n_buckets > U32_MAX / sizeof(struct bucket)) |
daaf427c AS |
318 | goto free_htab; |
319 | ||
824bd0ce AS |
320 | cost = (u64) htab->n_buckets * sizeof(struct bucket) + |
321 | (u64) htab->elem_size * htab->map.max_entries; | |
322 | ||
323 | if (percpu) | |
324 | cost += (u64) round_up(htab->map.value_size, 8) * | |
325 | num_possible_cpus() * htab->map.max_entries; | |
a6ed3ea6 AS |
326 | else |
327 | cost += (u64) htab->elem_size * num_possible_cpus(); | |
824bd0ce AS |
328 | |
329 | if (cost >= U32_MAX - PAGE_SIZE) | |
01b3f521 AS |
330 | /* make sure page count doesn't overflow */ |
331 | goto free_htab; | |
332 | ||
824bd0ce | 333 | htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; |
01b3f521 | 334 | |
6c905981 AS |
335 | /* if map size is larger than memlock limit, reject it early */ |
336 | err = bpf_map_precharge_memlock(htab->map.pages); | |
337 | if (err) | |
338 | goto free_htab; | |
339 | ||
01b3f521 | 340 | err = -ENOMEM; |
d407bd25 DB |
341 | htab->buckets = bpf_map_area_alloc(htab->n_buckets * |
342 | sizeof(struct bucket)); | |
343 | if (!htab->buckets) | |
344 | goto free_htab; | |
0f8e4bd8 | 345 | |
688ecfe6 | 346 | for (i = 0; i < htab->n_buckets; i++) { |
4fe84359 | 347 | INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); |
688ecfe6 | 348 | raw_spin_lock_init(&htab->buckets[i].lock); |
349 | } | |
0f8e4bd8 | 350 | |
29ba732a MKL |
351 | if (!percpu && !lru) { |
352 | /* lru itself can remove the least used element, so | |
353 | * there is no need for an extra elem during map_update. | |
354 | */ | |
a6ed3ea6 AS |
355 | err = alloc_extra_elems(htab); |
356 | if (err) | |
357 | goto free_buckets; | |
358 | } | |
359 | ||
29ba732a MKL |
360 | if (prealloc) { |
361 | err = prealloc_init(htab); | |
6c905981 | 362 | if (err) |
a6ed3ea6 | 363 | goto free_extra_elems; |
6c905981 | 364 | } |
0f8e4bd8 | 365 | |
0f8e4bd8 AS |
366 | return &htab->map; |
367 | ||
a6ed3ea6 AS |
368 | free_extra_elems: |
369 | free_percpu(htab->extra_elems); | |
6c905981 | 370 | free_buckets: |
d407bd25 | 371 | bpf_map_area_free(htab->buckets); |
0f8e4bd8 AS |
372 | free_htab: |
373 | kfree(htab); | |
374 | return ERR_PTR(err); | |
375 | } | |
376 | ||
377 | static inline u32 htab_map_hash(const void *key, u32 key_len) | |
378 | { | |
379 | return jhash(key, key_len, 0); | |
380 | } | |
381 | ||
688ecfe6 | 382 | static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) |
0f8e4bd8 AS |
383 | { |
384 | return &htab->buckets[hash & (htab->n_buckets - 1)]; | |
385 | } | |
386 | ||
4fe84359 | 387 | static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) |
688ecfe6 | 388 | { |
389 | return &__select_bucket(htab, hash)->head; | |
390 | } | |
391 | ||
4fe84359 AS |
392 | /* this lookup function can only be called with bucket lock taken */ |
393 | static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, | |
0f8e4bd8 AS |
394 | void *key, u32 key_size) |
395 | { | |
4fe84359 | 396 | struct hlist_nulls_node *n; |
0f8e4bd8 AS |
397 | struct htab_elem *l; |
398 | ||
4fe84359 | 399 | hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) |
0f8e4bd8 AS |
400 | if (l->hash == hash && !memcmp(&l->key, key, key_size)) |
401 | return l; | |
402 | ||
403 | return NULL; | |
404 | } | |
405 | ||
4fe84359 AS |
406 | /* can be called without bucket lock. it will repeat the loop in |
407 | * the unlikely event when elements moved from one bucket into another | |
408 | * while link list is being walked | |
409 | */ | |
410 | static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, | |
411 | u32 hash, void *key, | |
412 | u32 key_size, u32 n_buckets) | |
413 | { | |
414 | struct hlist_nulls_node *n; | |
415 | struct htab_elem *l; | |
416 | ||
417 | again: | |
418 | hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) | |
419 | if (l->hash == hash && !memcmp(&l->key, key, key_size)) | |
420 | return l; | |
421 | ||
422 | if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) | |
423 | goto again; | |
424 | ||
425 | return NULL; | |
426 | } | |
427 | ||
9015d2f5 AS |
428 | /* Called from syscall or from eBPF program directly, so |
429 | * arguments have to match bpf_map_lookup_elem() exactly. | |
430 | * The return value is adjusted by BPF instructions | |
431 | * in htab_map_gen_lookup(). | |
432 | */ | |
824bd0ce | 433 | static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) |
0f8e4bd8 AS |
434 | { |
435 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 436 | struct hlist_nulls_head *head; |
0f8e4bd8 AS |
437 | struct htab_elem *l; |
438 | u32 hash, key_size; | |
439 | ||
440 | /* Must be called with rcu_read_lock. */ | |
441 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
442 | ||
443 | key_size = map->key_size; | |
444 | ||
445 | hash = htab_map_hash(key, key_size); | |
446 | ||
447 | head = select_bucket(htab, hash); | |
448 | ||
4fe84359 | 449 | l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); |
0f8e4bd8 | 450 | |
824bd0ce AS |
451 | return l; |
452 | } | |
453 | ||
454 | static void *htab_map_lookup_elem(struct bpf_map *map, void *key) | |
455 | { | |
456 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
457 | ||
0f8e4bd8 AS |
458 | if (l) |
459 | return l->key + round_up(map->key_size, 8); | |
460 | ||
461 | return NULL; | |
462 | } | |
463 | ||
9015d2f5 AS |
464 | /* inline bpf_map_lookup_elem() call. |
465 | * Instead of: | |
466 | * bpf_prog | |
467 | * bpf_map_lookup_elem | |
468 | * map->ops->map_lookup_elem | |
469 | * htab_map_lookup_elem | |
470 | * __htab_map_lookup_elem | |
471 | * do: | |
472 | * bpf_prog | |
473 | * __htab_map_lookup_elem | |
474 | */ | |
475 | static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) | |
476 | { | |
477 | struct bpf_insn *insn = insn_buf; | |
478 | const int ret = BPF_REG_0; | |
479 | ||
480 | *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); | |
481 | *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); | |
482 | *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, | |
483 | offsetof(struct htab_elem, key) + | |
484 | round_up(map->key_size, 8)); | |
485 | return insn - insn_buf; | |
486 | } | |
487 | ||
29ba732a MKL |
488 | static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) |
489 | { | |
490 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
491 | ||
492 | if (l) { | |
493 | bpf_lru_node_set_ref(&l->lru_node); | |
494 | return l->key + round_up(map->key_size, 8); | |
495 | } | |
496 | ||
497 | return NULL; | |
498 | } | |
499 | ||
500 | /* It is called from the bpf_lru_list when the LRU needs to delete | |
501 | * older elements from the htab. | |
502 | */ | |
503 | static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) | |
504 | { | |
505 | struct bpf_htab *htab = (struct bpf_htab *)arg; | |
4fe84359 AS |
506 | struct htab_elem *l = NULL, *tgt_l; |
507 | struct hlist_nulls_head *head; | |
508 | struct hlist_nulls_node *n; | |
29ba732a MKL |
509 | unsigned long flags; |
510 | struct bucket *b; | |
511 | ||
512 | tgt_l = container_of(node, struct htab_elem, lru_node); | |
513 | b = __select_bucket(htab, tgt_l->hash); | |
514 | head = &b->head; | |
515 | ||
516 | raw_spin_lock_irqsave(&b->lock, flags); | |
517 | ||
4fe84359 | 518 | hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) |
29ba732a | 519 | if (l == tgt_l) { |
4fe84359 | 520 | hlist_nulls_del_rcu(&l->hash_node); |
29ba732a MKL |
521 | break; |
522 | } | |
523 | ||
524 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
525 | ||
526 | return l == tgt_l; | |
527 | } | |
528 | ||
0f8e4bd8 AS |
529 | /* Called from syscall */ |
530 | static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
531 | { | |
532 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 533 | struct hlist_nulls_head *head; |
0f8e4bd8 AS |
534 | struct htab_elem *l, *next_l; |
535 | u32 hash, key_size; | |
536 | int i; | |
537 | ||
538 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
539 | ||
540 | key_size = map->key_size; | |
541 | ||
542 | hash = htab_map_hash(key, key_size); | |
543 | ||
544 | head = select_bucket(htab, hash); | |
545 | ||
546 | /* lookup the key */ | |
4fe84359 | 547 | l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); |
0f8e4bd8 AS |
548 | |
549 | if (!l) { | |
550 | i = 0; | |
551 | goto find_first_elem; | |
552 | } | |
553 | ||
554 | /* key was found, get next key in the same bucket */ | |
4fe84359 | 555 | next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), |
0f8e4bd8 AS |
556 | struct htab_elem, hash_node); |
557 | ||
558 | if (next_l) { | |
559 | /* if next elem in this hash list is non-zero, just return it */ | |
560 | memcpy(next_key, next_l->key, key_size); | |
561 | return 0; | |
562 | } | |
563 | ||
564 | /* no more elements in this hash list, go to the next bucket */ | |
565 | i = hash & (htab->n_buckets - 1); | |
566 | i++; | |
567 | ||
568 | find_first_elem: | |
569 | /* iterate over buckets */ | |
570 | for (; i < htab->n_buckets; i++) { | |
571 | head = select_bucket(htab, i); | |
572 | ||
573 | /* pick first element in the bucket */ | |
4fe84359 | 574 | next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)), |
0f8e4bd8 AS |
575 | struct htab_elem, hash_node); |
576 | if (next_l) { | |
577 | /* if it's not empty, just return it */ | |
578 | memcpy(next_key, next_l->key, key_size); | |
579 | return 0; | |
580 | } | |
581 | } | |
582 | ||
6c905981 | 583 | /* iterated over all buckets and all elements */ |
0f8e4bd8 AS |
584 | return -ENOENT; |
585 | } | |
586 | ||
6c905981 | 587 | static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) |
824bd0ce | 588 | { |
6c905981 AS |
589 | if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) |
590 | free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); | |
824bd0ce AS |
591 | kfree(l); |
592 | } | |
593 | ||
6c905981 | 594 | static void htab_elem_free_rcu(struct rcu_head *head) |
824bd0ce AS |
595 | { |
596 | struct htab_elem *l = container_of(head, struct htab_elem, rcu); | |
6c905981 | 597 | struct bpf_htab *htab = l->htab; |
824bd0ce | 598 | |
6c905981 AS |
599 | /* must increment bpf_prog_active to avoid kprobe+bpf triggering while |
600 | * we're calling kfree, otherwise deadlock is possible if kprobes | |
601 | * are placed somewhere inside of slub | |
602 | */ | |
603 | preempt_disable(); | |
604 | __this_cpu_inc(bpf_prog_active); | |
605 | htab_elem_free(htab, l); | |
606 | __this_cpu_dec(bpf_prog_active); | |
607 | preempt_enable(); | |
824bd0ce AS |
608 | } |
609 | ||
6c905981 | 610 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) |
824bd0ce | 611 | { |
bcc6b1b7 MKL |
612 | struct bpf_map *map = &htab->map; |
613 | ||
614 | if (map->ops->map_fd_put_ptr) { | |
615 | void *ptr = fd_htab_map_get_ptr(map, l); | |
616 | ||
617 | map->ops->map_fd_put_ptr(ptr); | |
618 | } | |
619 | ||
a6ed3ea6 AS |
620 | if (l->state == HTAB_EXTRA_ELEM_USED) { |
621 | l->state = HTAB_EXTRA_ELEM_FREE; | |
622 | return; | |
623 | } | |
624 | ||
6c905981 AS |
625 | if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) { |
626 | pcpu_freelist_push(&htab->freelist, &l->fnode); | |
824bd0ce | 627 | } else { |
6c905981 AS |
628 | atomic_dec(&htab->count); |
629 | l->htab = htab; | |
630 | call_rcu(&l->rcu, htab_elem_free_rcu); | |
824bd0ce AS |
631 | } |
632 | } | |
633 | ||
fd91de7b MKL |
634 | static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, |
635 | void *value, bool onallcpus) | |
636 | { | |
637 | if (!onallcpus) { | |
638 | /* copy true value_size bytes */ | |
639 | memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); | |
640 | } else { | |
641 | u32 size = round_up(htab->map.value_size, 8); | |
642 | int off = 0, cpu; | |
643 | ||
644 | for_each_possible_cpu(cpu) { | |
645 | bpf_long_memcpy(per_cpu_ptr(pptr, cpu), | |
646 | value + off, size); | |
647 | off += size; | |
648 | } | |
649 | } | |
650 | } | |
651 | ||
824bd0ce AS |
652 | static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, |
653 | void *value, u32 key_size, u32 hash, | |
a6ed3ea6 AS |
654 | bool percpu, bool onallcpus, |
655 | bool old_elem_exists) | |
824bd0ce AS |
656 | { |
657 | u32 size = htab->map.value_size; | |
6c905981 | 658 | bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); |
824bd0ce AS |
659 | struct htab_elem *l_new; |
660 | void __percpu *pptr; | |
a6ed3ea6 | 661 | int err = 0; |
824bd0ce | 662 | |
6c905981 | 663 | if (prealloc) { |
9f691549 AS |
664 | struct pcpu_freelist_node *l; |
665 | ||
666 | l = pcpu_freelist_pop(&htab->freelist); | |
667 | if (!l) | |
a6ed3ea6 | 668 | err = -E2BIG; |
9f691549 AS |
669 | else |
670 | l_new = container_of(l, struct htab_elem, fnode); | |
6c905981 AS |
671 | } else { |
672 | if (atomic_inc_return(&htab->count) > htab->map.max_entries) { | |
673 | atomic_dec(&htab->count); | |
a6ed3ea6 AS |
674 | err = -E2BIG; |
675 | } else { | |
676 | l_new = kmalloc(htab->elem_size, | |
677 | GFP_ATOMIC | __GFP_NOWARN); | |
678 | if (!l_new) | |
679 | return ERR_PTR(-ENOMEM); | |
6c905981 | 680 | } |
a6ed3ea6 AS |
681 | } |
682 | ||
683 | if (err) { | |
684 | if (!old_elem_exists) | |
685 | return ERR_PTR(err); | |
686 | ||
687 | /* if we're updating the existing element and the hash table | |
688 | * is full, use per-cpu extra elems | |
689 | */ | |
690 | l_new = this_cpu_ptr(htab->extra_elems); | |
691 | if (l_new->state != HTAB_EXTRA_ELEM_FREE) | |
692 | return ERR_PTR(-E2BIG); | |
693 | l_new->state = HTAB_EXTRA_ELEM_USED; | |
694 | } else { | |
695 | l_new->state = HTAB_NOT_AN_EXTRA_ELEM; | |
6c905981 | 696 | } |
824bd0ce AS |
697 | |
698 | memcpy(l_new->key, key, key_size); | |
699 | if (percpu) { | |
700 | /* round up value_size to 8 bytes */ | |
701 | size = round_up(size, 8); | |
702 | ||
6c905981 AS |
703 | if (prealloc) { |
704 | pptr = htab_elem_get_ptr(l_new, key_size); | |
705 | } else { | |
706 | /* alloc_percpu zero-fills */ | |
707 | pptr = __alloc_percpu_gfp(size, 8, | |
708 | GFP_ATOMIC | __GFP_NOWARN); | |
709 | if (!pptr) { | |
710 | kfree(l_new); | |
711 | return ERR_PTR(-ENOMEM); | |
712 | } | |
824bd0ce AS |
713 | } |
714 | ||
fd91de7b | 715 | pcpu_copy_value(htab, pptr, value, onallcpus); |
15a07b33 | 716 | |
6c905981 AS |
717 | if (!prealloc) |
718 | htab_elem_set_ptr(l_new, key_size, pptr); | |
824bd0ce AS |
719 | } else { |
720 | memcpy(l_new->key + round_up(key_size, 8), value, size); | |
721 | } | |
722 | ||
723 | l_new->hash = hash; | |
724 | return l_new; | |
725 | } | |
726 | ||
727 | static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, | |
728 | u64 map_flags) | |
729 | { | |
824bd0ce AS |
730 | if (l_old && map_flags == BPF_NOEXIST) |
731 | /* elem already exists */ | |
732 | return -EEXIST; | |
733 | ||
734 | if (!l_old && map_flags == BPF_EXIST) | |
735 | /* elem doesn't exist, cannot update it */ | |
736 | return -ENOENT; | |
737 | ||
738 | return 0; | |
739 | } | |
740 | ||
0f8e4bd8 AS |
741 | /* Called from syscall or from eBPF program */ |
742 | static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, | |
743 | u64 map_flags) | |
744 | { | |
745 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
824bd0ce | 746 | struct htab_elem *l_new = NULL, *l_old; |
4fe84359 | 747 | struct hlist_nulls_head *head; |
0f8e4bd8 | 748 | unsigned long flags; |
824bd0ce AS |
749 | struct bucket *b; |
750 | u32 key_size, hash; | |
0f8e4bd8 AS |
751 | int ret; |
752 | ||
824bd0ce | 753 | if (unlikely(map_flags > BPF_EXIST)) |
0f8e4bd8 AS |
754 | /* unknown flags */ |
755 | return -EINVAL; | |
756 | ||
757 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
758 | ||
0f8e4bd8 AS |
759 | key_size = map->key_size; |
760 | ||
824bd0ce AS |
761 | hash = htab_map_hash(key, key_size); |
762 | ||
824bd0ce | 763 | b = __select_bucket(htab, hash); |
688ecfe6 | 764 | head = &b->head; |
0f8e4bd8 AS |
765 | |
766 | /* bpf_map_update_elem() can be called in_irq() */ | |
688ecfe6 | 767 | raw_spin_lock_irqsave(&b->lock, flags); |
0f8e4bd8 | 768 | |
824bd0ce | 769 | l_old = lookup_elem_raw(head, hash, key, key_size); |
0f8e4bd8 | 770 | |
824bd0ce AS |
771 | ret = check_flags(htab, l_old, map_flags); |
772 | if (ret) | |
0f8e4bd8 | 773 | goto err; |
0f8e4bd8 | 774 | |
a6ed3ea6 AS |
775 | l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, |
776 | !!l_old); | |
6c905981 AS |
777 | if (IS_ERR(l_new)) { |
778 | /* all pre-allocated elements are in use or memory exhausted */ | |
779 | ret = PTR_ERR(l_new); | |
780 | goto err; | |
781 | } | |
782 | ||
824bd0ce AS |
783 | /* add new element to the head of the list, so that |
784 | * concurrent search will find it before old elem | |
0f8e4bd8 | 785 | */ |
4fe84359 | 786 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
0f8e4bd8 | 787 | if (l_old) { |
4fe84359 | 788 | hlist_nulls_del_rcu(&l_old->hash_node); |
6c905981 | 789 | free_htab_elem(htab, l_old); |
0f8e4bd8 | 790 | } |
6c905981 | 791 | ret = 0; |
0f8e4bd8 | 792 | err: |
688ecfe6 | 793 | raw_spin_unlock_irqrestore(&b->lock, flags); |
0f8e4bd8 AS |
794 | return ret; |
795 | } | |
796 | ||
29ba732a MKL |
797 | static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, |
798 | u64 map_flags) | |
799 | { | |
800 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
801 | struct htab_elem *l_new, *l_old = NULL; | |
4fe84359 | 802 | struct hlist_nulls_head *head; |
29ba732a MKL |
803 | unsigned long flags; |
804 | struct bucket *b; | |
805 | u32 key_size, hash; | |
806 | int ret; | |
807 | ||
808 | if (unlikely(map_flags > BPF_EXIST)) | |
809 | /* unknown flags */ | |
810 | return -EINVAL; | |
811 | ||
812 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
813 | ||
814 | key_size = map->key_size; | |
815 | ||
816 | hash = htab_map_hash(key, key_size); | |
817 | ||
818 | b = __select_bucket(htab, hash); | |
819 | head = &b->head; | |
820 | ||
821 | /* For LRU, we need to alloc before taking bucket's | |
822 | * spinlock because getting free nodes from LRU may need | |
823 | * to remove older elements from htab and this removal | |
824 | * operation will need a bucket lock. | |
825 | */ | |
826 | l_new = prealloc_lru_pop(htab, key, hash); | |
827 | if (!l_new) | |
828 | return -ENOMEM; | |
829 | memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); | |
830 | ||
831 | /* bpf_map_update_elem() can be called in_irq() */ | |
832 | raw_spin_lock_irqsave(&b->lock, flags); | |
833 | ||
834 | l_old = lookup_elem_raw(head, hash, key, key_size); | |
835 | ||
836 | ret = check_flags(htab, l_old, map_flags); | |
837 | if (ret) | |
838 | goto err; | |
839 | ||
840 | /* add new element to the head of the list, so that | |
841 | * concurrent search will find it before old elem | |
842 | */ | |
4fe84359 | 843 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
29ba732a MKL |
844 | if (l_old) { |
845 | bpf_lru_node_set_ref(&l_new->lru_node); | |
4fe84359 | 846 | hlist_nulls_del_rcu(&l_old->hash_node); |
29ba732a MKL |
847 | } |
848 | ret = 0; | |
849 | ||
850 | err: | |
851 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
852 | ||
853 | if (ret) | |
854 | bpf_lru_push_free(&htab->lru, &l_new->lru_node); | |
855 | else if (l_old) | |
856 | bpf_lru_push_free(&htab->lru, &l_old->lru_node); | |
857 | ||
858 | return ret; | |
859 | } | |
860 | ||
15a07b33 AS |
861 | static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, |
862 | void *value, u64 map_flags, | |
863 | bool onallcpus) | |
824bd0ce AS |
864 | { |
865 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
866 | struct htab_elem *l_new = NULL, *l_old; | |
4fe84359 | 867 | struct hlist_nulls_head *head; |
824bd0ce AS |
868 | unsigned long flags; |
869 | struct bucket *b; | |
870 | u32 key_size, hash; | |
871 | int ret; | |
872 | ||
873 | if (unlikely(map_flags > BPF_EXIST)) | |
874 | /* unknown flags */ | |
875 | return -EINVAL; | |
876 | ||
877 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
878 | ||
879 | key_size = map->key_size; | |
880 | ||
881 | hash = htab_map_hash(key, key_size); | |
882 | ||
883 | b = __select_bucket(htab, hash); | |
884 | head = &b->head; | |
885 | ||
886 | /* bpf_map_update_elem() can be called in_irq() */ | |
887 | raw_spin_lock_irqsave(&b->lock, flags); | |
888 | ||
889 | l_old = lookup_elem_raw(head, hash, key, key_size); | |
890 | ||
891 | ret = check_flags(htab, l_old, map_flags); | |
892 | if (ret) | |
893 | goto err; | |
894 | ||
895 | if (l_old) { | |
896 | /* per-cpu hash map can update value in-place */ | |
fd91de7b MKL |
897 | pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), |
898 | value, onallcpus); | |
824bd0ce AS |
899 | } else { |
900 | l_new = alloc_htab_elem(htab, key, value, key_size, | |
a6ed3ea6 | 901 | hash, true, onallcpus, false); |
6c905981 AS |
902 | if (IS_ERR(l_new)) { |
903 | ret = PTR_ERR(l_new); | |
824bd0ce AS |
904 | goto err; |
905 | } | |
4fe84359 | 906 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
824bd0ce AS |
907 | } |
908 | ret = 0; | |
909 | err: | |
910 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
911 | return ret; | |
912 | } | |
913 | ||
8f844938 MKL |
914 | static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, |
915 | void *value, u64 map_flags, | |
916 | bool onallcpus) | |
917 | { | |
918 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
919 | struct htab_elem *l_new = NULL, *l_old; | |
4fe84359 | 920 | struct hlist_nulls_head *head; |
8f844938 MKL |
921 | unsigned long flags; |
922 | struct bucket *b; | |
923 | u32 key_size, hash; | |
924 | int ret; | |
925 | ||
926 | if (unlikely(map_flags > BPF_EXIST)) | |
927 | /* unknown flags */ | |
928 | return -EINVAL; | |
929 | ||
930 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
931 | ||
932 | key_size = map->key_size; | |
933 | ||
934 | hash = htab_map_hash(key, key_size); | |
935 | ||
936 | b = __select_bucket(htab, hash); | |
937 | head = &b->head; | |
938 | ||
939 | /* For LRU, we need to alloc before taking bucket's | |
940 | * spinlock because LRU's elem alloc may need | |
941 | * to remove older elem from htab and this removal | |
942 | * operation will need a bucket lock. | |
943 | */ | |
944 | if (map_flags != BPF_EXIST) { | |
945 | l_new = prealloc_lru_pop(htab, key, hash); | |
946 | if (!l_new) | |
947 | return -ENOMEM; | |
948 | } | |
949 | ||
950 | /* bpf_map_update_elem() can be called in_irq() */ | |
951 | raw_spin_lock_irqsave(&b->lock, flags); | |
952 | ||
953 | l_old = lookup_elem_raw(head, hash, key, key_size); | |
954 | ||
955 | ret = check_flags(htab, l_old, map_flags); | |
956 | if (ret) | |
957 | goto err; | |
958 | ||
959 | if (l_old) { | |
960 | bpf_lru_node_set_ref(&l_old->lru_node); | |
961 | ||
962 | /* per-cpu hash map can update value in-place */ | |
963 | pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), | |
964 | value, onallcpus); | |
965 | } else { | |
966 | pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), | |
967 | value, onallcpus); | |
4fe84359 | 968 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
8f844938 MKL |
969 | l_new = NULL; |
970 | } | |
971 | ret = 0; | |
972 | err: | |
973 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
974 | if (l_new) | |
975 | bpf_lru_push_free(&htab->lru, &l_new->lru_node); | |
976 | return ret; | |
977 | } | |
978 | ||
15a07b33 AS |
979 | static int htab_percpu_map_update_elem(struct bpf_map *map, void *key, |
980 | void *value, u64 map_flags) | |
981 | { | |
982 | return __htab_percpu_map_update_elem(map, key, value, map_flags, false); | |
983 | } | |
984 | ||
8f844938 MKL |
985 | static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, |
986 | void *value, u64 map_flags) | |
987 | { | |
988 | return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, | |
989 | false); | |
990 | } | |
991 | ||
0f8e4bd8 AS |
992 | /* Called from syscall or from eBPF program */ |
993 | static int htab_map_delete_elem(struct bpf_map *map, void *key) | |
994 | { | |
995 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 996 | struct hlist_nulls_head *head; |
688ecfe6 | 997 | struct bucket *b; |
0f8e4bd8 AS |
998 | struct htab_elem *l; |
999 | unsigned long flags; | |
1000 | u32 hash, key_size; | |
1001 | int ret = -ENOENT; | |
1002 | ||
1003 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
1004 | ||
1005 | key_size = map->key_size; | |
1006 | ||
1007 | hash = htab_map_hash(key, key_size); | |
688ecfe6 | 1008 | b = __select_bucket(htab, hash); |
1009 | head = &b->head; | |
0f8e4bd8 | 1010 | |
688ecfe6 | 1011 | raw_spin_lock_irqsave(&b->lock, flags); |
0f8e4bd8 | 1012 | |
0f8e4bd8 AS |
1013 | l = lookup_elem_raw(head, hash, key, key_size); |
1014 | ||
1015 | if (l) { | |
4fe84359 | 1016 | hlist_nulls_del_rcu(&l->hash_node); |
6c905981 | 1017 | free_htab_elem(htab, l); |
0f8e4bd8 AS |
1018 | ret = 0; |
1019 | } | |
1020 | ||
688ecfe6 | 1021 | raw_spin_unlock_irqrestore(&b->lock, flags); |
0f8e4bd8 AS |
1022 | return ret; |
1023 | } | |
1024 | ||
29ba732a MKL |
1025 | static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) |
1026 | { | |
1027 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 1028 | struct hlist_nulls_head *head; |
29ba732a MKL |
1029 | struct bucket *b; |
1030 | struct htab_elem *l; | |
1031 | unsigned long flags; | |
1032 | u32 hash, key_size; | |
1033 | int ret = -ENOENT; | |
1034 | ||
1035 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
1036 | ||
1037 | key_size = map->key_size; | |
1038 | ||
1039 | hash = htab_map_hash(key, key_size); | |
1040 | b = __select_bucket(htab, hash); | |
1041 | head = &b->head; | |
1042 | ||
1043 | raw_spin_lock_irqsave(&b->lock, flags); | |
1044 | ||
1045 | l = lookup_elem_raw(head, hash, key, key_size); | |
1046 | ||
1047 | if (l) { | |
4fe84359 | 1048 | hlist_nulls_del_rcu(&l->hash_node); |
29ba732a MKL |
1049 | ret = 0; |
1050 | } | |
1051 | ||
1052 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
1053 | if (l) | |
1054 | bpf_lru_push_free(&htab->lru, &l->lru_node); | |
1055 | return ret; | |
1056 | } | |
1057 | ||
0f8e4bd8 AS |
1058 | static void delete_all_elements(struct bpf_htab *htab) |
1059 | { | |
1060 | int i; | |
1061 | ||
1062 | for (i = 0; i < htab->n_buckets; i++) { | |
4fe84359 AS |
1063 | struct hlist_nulls_head *head = select_bucket(htab, i); |
1064 | struct hlist_nulls_node *n; | |
0f8e4bd8 AS |
1065 | struct htab_elem *l; |
1066 | ||
4fe84359 AS |
1067 | hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { |
1068 | hlist_nulls_del_rcu(&l->hash_node); | |
483bed2b DB |
1069 | if (l->state != HTAB_EXTRA_ELEM_USED) |
1070 | htab_elem_free(htab, l); | |
0f8e4bd8 AS |
1071 | } |
1072 | } | |
1073 | } | |
bcc6b1b7 | 1074 | |
0f8e4bd8 AS |
1075 | /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ |
1076 | static void htab_map_free(struct bpf_map *map) | |
1077 | { | |
1078 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
1079 | ||
1080 | /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
1081 | * so the programs (can be more than one that used this map) were | |
1082 | * disconnected from events. Wait for outstanding critical sections in | |
1083 | * these programs to complete | |
1084 | */ | |
1085 | synchronize_rcu(); | |
1086 | ||
6c905981 AS |
1087 | /* some of free_htab_elem() callbacks for elements of this map may |
1088 | * not have executed. Wait for them. | |
0f8e4bd8 | 1089 | */ |
6c905981 | 1090 | rcu_barrier(); |
29ba732a | 1091 | if (htab->map.map_flags & BPF_F_NO_PREALLOC) |
6c905981 | 1092 | delete_all_elements(htab); |
29ba732a MKL |
1093 | else |
1094 | prealloc_destroy(htab); | |
1095 | ||
a6ed3ea6 | 1096 | free_percpu(htab->extra_elems); |
d407bd25 | 1097 | bpf_map_area_free(htab->buckets); |
0f8e4bd8 AS |
1098 | kfree(htab); |
1099 | } | |
1100 | ||
a2c83fff | 1101 | static const struct bpf_map_ops htab_ops = { |
0f8e4bd8 AS |
1102 | .map_alloc = htab_map_alloc, |
1103 | .map_free = htab_map_free, | |
1104 | .map_get_next_key = htab_map_get_next_key, | |
1105 | .map_lookup_elem = htab_map_lookup_elem, | |
1106 | .map_update_elem = htab_map_update_elem, | |
1107 | .map_delete_elem = htab_map_delete_elem, | |
9015d2f5 | 1108 | .map_gen_lookup = htab_map_gen_lookup, |
0f8e4bd8 AS |
1109 | }; |
1110 | ||
c78f8bdf | 1111 | static struct bpf_map_type_list htab_type __ro_after_init = { |
0f8e4bd8 AS |
1112 | .ops = &htab_ops, |
1113 | .type = BPF_MAP_TYPE_HASH, | |
1114 | }; | |
1115 | ||
29ba732a MKL |
1116 | static const struct bpf_map_ops htab_lru_ops = { |
1117 | .map_alloc = htab_map_alloc, | |
1118 | .map_free = htab_map_free, | |
1119 | .map_get_next_key = htab_map_get_next_key, | |
1120 | .map_lookup_elem = htab_lru_map_lookup_elem, | |
1121 | .map_update_elem = htab_lru_map_update_elem, | |
1122 | .map_delete_elem = htab_lru_map_delete_elem, | |
1123 | }; | |
1124 | ||
c78f8bdf | 1125 | static struct bpf_map_type_list htab_lru_type __ro_after_init = { |
29ba732a MKL |
1126 | .ops = &htab_lru_ops, |
1127 | .type = BPF_MAP_TYPE_LRU_HASH, | |
1128 | }; | |
1129 | ||
824bd0ce AS |
1130 | /* Called from eBPF program */ |
1131 | static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) | |
1132 | { | |
1133 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
1134 | ||
1135 | if (l) | |
1136 | return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); | |
1137 | else | |
1138 | return NULL; | |
1139 | } | |
1140 | ||
8f844938 MKL |
1141 | static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) |
1142 | { | |
1143 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
1144 | ||
1145 | if (l) { | |
1146 | bpf_lru_node_set_ref(&l->lru_node); | |
1147 | return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); | |
1148 | } | |
1149 | ||
1150 | return NULL; | |
1151 | } | |
1152 | ||
15a07b33 AS |
1153 | int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) |
1154 | { | |
8f844938 | 1155 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); |
15a07b33 AS |
1156 | struct htab_elem *l; |
1157 | void __percpu *pptr; | |
1158 | int ret = -ENOENT; | |
1159 | int cpu, off = 0; | |
1160 | u32 size; | |
1161 | ||
1162 | /* per_cpu areas are zero-filled and bpf programs can only | |
1163 | * access 'value_size' of them, so copying rounded areas | |
1164 | * will not leak any kernel data | |
1165 | */ | |
1166 | size = round_up(map->value_size, 8); | |
1167 | rcu_read_lock(); | |
1168 | l = __htab_map_lookup_elem(map, key); | |
1169 | if (!l) | |
1170 | goto out; | |
8f844938 MKL |
1171 | if (htab_is_lru(htab)) |
1172 | bpf_lru_node_set_ref(&l->lru_node); | |
15a07b33 AS |
1173 | pptr = htab_elem_get_ptr(l, map->key_size); |
1174 | for_each_possible_cpu(cpu) { | |
1175 | bpf_long_memcpy(value + off, | |
1176 | per_cpu_ptr(pptr, cpu), size); | |
1177 | off += size; | |
1178 | } | |
1179 | ret = 0; | |
1180 | out: | |
1181 | rcu_read_unlock(); | |
1182 | return ret; | |
1183 | } | |
1184 | ||
1185 | int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, | |
1186 | u64 map_flags) | |
1187 | { | |
8f844938 | 1188 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); |
6bbd9a05 SL |
1189 | int ret; |
1190 | ||
1191 | rcu_read_lock(); | |
8f844938 MKL |
1192 | if (htab_is_lru(htab)) |
1193 | ret = __htab_lru_percpu_map_update_elem(map, key, value, | |
1194 | map_flags, true); | |
1195 | else | |
1196 | ret = __htab_percpu_map_update_elem(map, key, value, map_flags, | |
1197 | true); | |
6bbd9a05 SL |
1198 | rcu_read_unlock(); |
1199 | ||
1200 | return ret; | |
15a07b33 AS |
1201 | } |
1202 | ||
824bd0ce AS |
1203 | static const struct bpf_map_ops htab_percpu_ops = { |
1204 | .map_alloc = htab_map_alloc, | |
1205 | .map_free = htab_map_free, | |
1206 | .map_get_next_key = htab_map_get_next_key, | |
1207 | .map_lookup_elem = htab_percpu_map_lookup_elem, | |
1208 | .map_update_elem = htab_percpu_map_update_elem, | |
1209 | .map_delete_elem = htab_map_delete_elem, | |
1210 | }; | |
1211 | ||
c78f8bdf | 1212 | static struct bpf_map_type_list htab_percpu_type __ro_after_init = { |
824bd0ce AS |
1213 | .ops = &htab_percpu_ops, |
1214 | .type = BPF_MAP_TYPE_PERCPU_HASH, | |
1215 | }; | |
1216 | ||
8f844938 MKL |
1217 | static const struct bpf_map_ops htab_lru_percpu_ops = { |
1218 | .map_alloc = htab_map_alloc, | |
1219 | .map_free = htab_map_free, | |
1220 | .map_get_next_key = htab_map_get_next_key, | |
1221 | .map_lookup_elem = htab_lru_percpu_map_lookup_elem, | |
1222 | .map_update_elem = htab_lru_percpu_map_update_elem, | |
1223 | .map_delete_elem = htab_lru_map_delete_elem, | |
1224 | }; | |
1225 | ||
c78f8bdf | 1226 | static struct bpf_map_type_list htab_lru_percpu_type __ro_after_init = { |
8f844938 MKL |
1227 | .ops = &htab_lru_percpu_ops, |
1228 | .type = BPF_MAP_TYPE_LRU_PERCPU_HASH, | |
1229 | }; | |
1230 | ||
bcc6b1b7 MKL |
1231 | static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr) |
1232 | { | |
1233 | struct bpf_map *map; | |
1234 | ||
1235 | if (attr->value_size != sizeof(u32)) | |
1236 | return ERR_PTR(-EINVAL); | |
1237 | ||
1238 | /* pointer is stored internally */ | |
1239 | attr->value_size = sizeof(void *); | |
1240 | map = htab_map_alloc(attr); | |
1241 | attr->value_size = sizeof(u32); | |
1242 | ||
1243 | return map; | |
1244 | } | |
1245 | ||
1246 | static void fd_htab_map_free(struct bpf_map *map) | |
1247 | { | |
1248 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
1249 | struct hlist_nulls_node *n; | |
1250 | struct hlist_nulls_head *head; | |
1251 | struct htab_elem *l; | |
1252 | int i; | |
1253 | ||
1254 | for (i = 0; i < htab->n_buckets; i++) { | |
1255 | head = select_bucket(htab, i); | |
1256 | ||
1257 | hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { | |
1258 | void *ptr = fd_htab_map_get_ptr(map, l); | |
1259 | ||
1260 | map->ops->map_fd_put_ptr(ptr); | |
1261 | } | |
1262 | } | |
1263 | ||
1264 | htab_map_free(map); | |
1265 | } | |
1266 | ||
1267 | /* only called from syscall */ | |
1268 | int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, | |
1269 | void *key, void *value, u64 map_flags) | |
1270 | { | |
1271 | void *ptr; | |
1272 | int ret; | |
1273 | u32 ufd = *(u32 *)value; | |
1274 | ||
1275 | ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); | |
1276 | if (IS_ERR(ptr)) | |
1277 | return PTR_ERR(ptr); | |
1278 | ||
1279 | ret = htab_map_update_elem(map, key, &ptr, map_flags); | |
1280 | if (ret) | |
1281 | map->ops->map_fd_put_ptr(ptr); | |
1282 | ||
1283 | return ret; | |
1284 | } | |
1285 | ||
1286 | static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) | |
1287 | { | |
1288 | struct bpf_map *map, *inner_map_meta; | |
1289 | ||
1290 | inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); | |
1291 | if (IS_ERR(inner_map_meta)) | |
1292 | return inner_map_meta; | |
1293 | ||
1294 | map = fd_htab_map_alloc(attr); | |
1295 | if (IS_ERR(map)) { | |
1296 | bpf_map_meta_free(inner_map_meta); | |
1297 | return map; | |
1298 | } | |
1299 | ||
1300 | map->inner_map_meta = inner_map_meta; | |
1301 | ||
1302 | return map; | |
1303 | } | |
1304 | ||
1305 | static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) | |
1306 | { | |
1307 | struct bpf_map **inner_map = htab_map_lookup_elem(map, key); | |
1308 | ||
1309 | if (!inner_map) | |
1310 | return NULL; | |
1311 | ||
1312 | return READ_ONCE(*inner_map); | |
1313 | } | |
1314 | ||
1315 | static void htab_of_map_free(struct bpf_map *map) | |
1316 | { | |
1317 | bpf_map_meta_free(map->inner_map_meta); | |
1318 | fd_htab_map_free(map); | |
1319 | } | |
1320 | ||
1321 | static const struct bpf_map_ops htab_of_map_ops = { | |
1322 | .map_alloc = htab_of_map_alloc, | |
1323 | .map_free = htab_of_map_free, | |
1324 | .map_get_next_key = htab_map_get_next_key, | |
1325 | .map_lookup_elem = htab_of_map_lookup_elem, | |
1326 | .map_delete_elem = htab_map_delete_elem, | |
1327 | .map_fd_get_ptr = bpf_map_fd_get_ptr, | |
1328 | .map_fd_put_ptr = bpf_map_fd_put_ptr, | |
1329 | }; | |
1330 | ||
1331 | static struct bpf_map_type_list htab_of_map_type __ro_after_init = { | |
1332 | .ops = &htab_of_map_ops, | |
1333 | .type = BPF_MAP_TYPE_HASH_OF_MAPS, | |
1334 | }; | |
1335 | ||
0f8e4bd8 AS |
1336 | static int __init register_htab_map(void) |
1337 | { | |
a2c83fff | 1338 | bpf_register_map_type(&htab_type); |
824bd0ce | 1339 | bpf_register_map_type(&htab_percpu_type); |
29ba732a | 1340 | bpf_register_map_type(&htab_lru_type); |
8f844938 | 1341 | bpf_register_map_type(&htab_lru_percpu_type); |
bcc6b1b7 | 1342 | bpf_register_map_type(&htab_of_map_type); |
0f8e4bd8 AS |
1343 | return 0; |
1344 | } | |
1345 | late_initcall(register_htab_map); |