]>
Commit | Line | Data |
---|---|---|
0f8e4bd8 | 1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
6c905981 | 2 | * Copyright (c) 2016 Facebook |
0f8e4bd8 AS |
3 | * |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of version 2 of the GNU General Public | |
6 | * License as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | #include <linux/bpf.h> | |
14 | #include <linux/jhash.h> | |
15 | #include <linux/filter.h> | |
4fe84359 | 16 | #include <linux/rculist_nulls.h> |
6c905981 | 17 | #include "percpu_freelist.h" |
29ba732a | 18 | #include "bpf_lru_list.h" |
bcc6b1b7 | 19 | #include "map_in_map.h" |
0f8e4bd8 | 20 | |
96eabe7a MKL |
21 | #define HTAB_CREATE_FLAG_MASK \ |
22 | (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE) | |
23 | ||
688ecfe6 | 24 | struct bucket { |
4fe84359 | 25 | struct hlist_nulls_head head; |
688ecfe6 | 26 | raw_spinlock_t lock; |
27 | }; | |
28 | ||
0f8e4bd8 AS |
29 | struct bpf_htab { |
30 | struct bpf_map map; | |
688ecfe6 | 31 | struct bucket *buckets; |
6c905981 | 32 | void *elems; |
29ba732a MKL |
33 | union { |
34 | struct pcpu_freelist freelist; | |
35 | struct bpf_lru lru; | |
36 | }; | |
8c290e60 | 37 | struct htab_elem *__percpu *extra_elems; |
6591f1e6 | 38 | atomic_t count; /* number of elements in this hashtable */ |
0f8e4bd8 AS |
39 | u32 n_buckets; /* number of hash buckets */ |
40 | u32 elem_size; /* size of each element in bytes */ | |
41 | }; | |
42 | ||
43 | /* each htab element is struct htab_elem + key + value */ | |
44 | struct htab_elem { | |
824bd0ce | 45 | union { |
4fe84359 | 46 | struct hlist_nulls_node hash_node; |
9f691549 AS |
47 | struct { |
48 | void *padding; | |
49 | union { | |
50 | struct bpf_htab *htab; | |
51 | struct pcpu_freelist_node fnode; | |
52 | }; | |
53 | }; | |
824bd0ce | 54 | }; |
a6ed3ea6 AS |
55 | union { |
56 | struct rcu_head rcu; | |
29ba732a | 57 | struct bpf_lru_node lru_node; |
a6ed3ea6 | 58 | }; |
6c905981 | 59 | u32 hash; |
0f8e4bd8 AS |
60 | char key[0] __aligned(8); |
61 | }; | |
62 | ||
29ba732a MKL |
63 | static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); |
64 | ||
65 | static bool htab_is_lru(const struct bpf_htab *htab) | |
66 | { | |
8f844938 MKL |
67 | return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || |
68 | htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; | |
69 | } | |
70 | ||
71 | static bool htab_is_percpu(const struct bpf_htab *htab) | |
72 | { | |
73 | return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || | |
74 | htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; | |
29ba732a MKL |
75 | } |
76 | ||
8c290e60 AS |
77 | static bool htab_is_prealloc(const struct bpf_htab *htab) |
78 | { | |
79 | return !(htab->map.map_flags & BPF_F_NO_PREALLOC); | |
80 | } | |
81 | ||
6c905981 AS |
82 | static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, |
83 | void __percpu *pptr) | |
84 | { | |
85 | *(void __percpu **)(l->key + key_size) = pptr; | |
86 | } | |
87 | ||
88 | static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) | |
89 | { | |
90 | return *(void __percpu **)(l->key + key_size); | |
91 | } | |
92 | ||
bcc6b1b7 MKL |
93 | static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) |
94 | { | |
95 | return *(void **)(l->key + roundup(map->key_size, 8)); | |
96 | } | |
97 | ||
6c905981 AS |
98 | static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) |
99 | { | |
100 | return (struct htab_elem *) (htab->elems + i * htab->elem_size); | |
101 | } | |
102 | ||
103 | static void htab_free_elems(struct bpf_htab *htab) | |
104 | { | |
105 | int i; | |
106 | ||
8f844938 | 107 | if (!htab_is_percpu(htab)) |
6c905981 AS |
108 | goto free_elems; |
109 | ||
110 | for (i = 0; i < htab->map.max_entries; i++) { | |
111 | void __percpu *pptr; | |
112 | ||
113 | pptr = htab_elem_get_ptr(get_htab_elem(htab, i), | |
114 | htab->map.key_size); | |
115 | free_percpu(pptr); | |
116 | } | |
117 | free_elems: | |
d407bd25 | 118 | bpf_map_area_free(htab->elems); |
6c905981 AS |
119 | } |
120 | ||
29ba732a MKL |
121 | static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, |
122 | u32 hash) | |
123 | { | |
124 | struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); | |
125 | struct htab_elem *l; | |
126 | ||
127 | if (node) { | |
128 | l = container_of(node, struct htab_elem, lru_node); | |
129 | memcpy(l->key, key, htab->map.key_size); | |
130 | return l; | |
131 | } | |
132 | ||
133 | return NULL; | |
134 | } | |
135 | ||
136 | static int prealloc_init(struct bpf_htab *htab) | |
6c905981 | 137 | { |
8c290e60 | 138 | u32 num_entries = htab->map.max_entries; |
6c905981 AS |
139 | int err = -ENOMEM, i; |
140 | ||
8c290e60 AS |
141 | if (!htab_is_percpu(htab) && !htab_is_lru(htab)) |
142 | num_entries += num_possible_cpus(); | |
143 | ||
96eabe7a MKL |
144 | htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries, |
145 | htab->map.numa_node); | |
6c905981 AS |
146 | if (!htab->elems) |
147 | return -ENOMEM; | |
148 | ||
8f844938 | 149 | if (!htab_is_percpu(htab)) |
6c905981 AS |
150 | goto skip_percpu_elems; |
151 | ||
8c290e60 | 152 | for (i = 0; i < num_entries; i++) { |
6c905981 AS |
153 | u32 size = round_up(htab->map.value_size, 8); |
154 | void __percpu *pptr; | |
155 | ||
156 | pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN); | |
157 | if (!pptr) | |
158 | goto free_elems; | |
159 | htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, | |
160 | pptr); | |
161 | } | |
162 | ||
163 | skip_percpu_elems: | |
29ba732a MKL |
164 | if (htab_is_lru(htab)) |
165 | err = bpf_lru_init(&htab->lru, | |
166 | htab->map.map_flags & BPF_F_NO_COMMON_LRU, | |
167 | offsetof(struct htab_elem, hash) - | |
168 | offsetof(struct htab_elem, lru_node), | |
169 | htab_lru_map_delete_node, | |
170 | htab); | |
171 | else | |
172 | err = pcpu_freelist_init(&htab->freelist); | |
173 | ||
6c905981 AS |
174 | if (err) |
175 | goto free_elems; | |
176 | ||
29ba732a MKL |
177 | if (htab_is_lru(htab)) |
178 | bpf_lru_populate(&htab->lru, htab->elems, | |
179 | offsetof(struct htab_elem, lru_node), | |
8c290e60 | 180 | htab->elem_size, num_entries); |
29ba732a | 181 | else |
9f691549 AS |
182 | pcpu_freelist_populate(&htab->freelist, |
183 | htab->elems + offsetof(struct htab_elem, fnode), | |
8c290e60 | 184 | htab->elem_size, num_entries); |
29ba732a | 185 | |
6c905981 AS |
186 | return 0; |
187 | ||
188 | free_elems: | |
189 | htab_free_elems(htab); | |
190 | return err; | |
191 | } | |
192 | ||
29ba732a MKL |
193 | static void prealloc_destroy(struct bpf_htab *htab) |
194 | { | |
195 | htab_free_elems(htab); | |
196 | ||
197 | if (htab_is_lru(htab)) | |
198 | bpf_lru_destroy(&htab->lru); | |
199 | else | |
200 | pcpu_freelist_destroy(&htab->freelist); | |
201 | } | |
202 | ||
a6ed3ea6 AS |
203 | static int alloc_extra_elems(struct bpf_htab *htab) |
204 | { | |
8c290e60 AS |
205 | struct htab_elem *__percpu *pptr, *l_new; |
206 | struct pcpu_freelist_node *l; | |
a6ed3ea6 AS |
207 | int cpu; |
208 | ||
8c290e60 AS |
209 | pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8, |
210 | GFP_USER | __GFP_NOWARN); | |
a6ed3ea6 AS |
211 | if (!pptr) |
212 | return -ENOMEM; | |
213 | ||
214 | for_each_possible_cpu(cpu) { | |
8c290e60 AS |
215 | l = pcpu_freelist_pop(&htab->freelist); |
216 | /* pop will succeed, since prealloc_init() | |
217 | * preallocated extra num_possible_cpus elements | |
218 | */ | |
219 | l_new = container_of(l, struct htab_elem, fnode); | |
220 | *per_cpu_ptr(pptr, cpu) = l_new; | |
a6ed3ea6 AS |
221 | } |
222 | htab->extra_elems = pptr; | |
223 | return 0; | |
224 | } | |
225 | ||
0f8e4bd8 AS |
226 | /* Called from syscall */ |
227 | static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |
228 | { | |
8f844938 MKL |
229 | bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || |
230 | attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); | |
231 | bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || | |
232 | attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); | |
29ba732a MKL |
233 | /* percpu_lru means each cpu has its own LRU list. |
234 | * it is different from BPF_MAP_TYPE_PERCPU_HASH where | |
235 | * the map's value itself is percpu. percpu_lru has | |
236 | * nothing to do with the map's value. | |
237 | */ | |
238 | bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); | |
239 | bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); | |
96eabe7a | 240 | int numa_node = bpf_map_attr_numa_node(attr); |
0f8e4bd8 AS |
241 | struct bpf_htab *htab; |
242 | int err, i; | |
824bd0ce | 243 | u64 cost; |
0f8e4bd8 | 244 | |
9f691549 AS |
245 | BUILD_BUG_ON(offsetof(struct htab_elem, htab) != |
246 | offsetof(struct htab_elem, hash_node.pprev)); | |
247 | BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != | |
248 | offsetof(struct htab_elem, hash_node.pprev)); | |
249 | ||
29ba732a MKL |
250 | if (lru && !capable(CAP_SYS_ADMIN)) |
251 | /* LRU implementation is much complicated than other | |
252 | * maps. Hence, limit to CAP_SYS_ADMIN for now. | |
253 | */ | |
254 | return ERR_PTR(-EPERM); | |
255 | ||
96eabe7a | 256 | if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK) |
6c905981 AS |
257 | /* reserved bits should not be used */ |
258 | return ERR_PTR(-EINVAL); | |
259 | ||
29ba732a MKL |
260 | if (!lru && percpu_lru) |
261 | return ERR_PTR(-EINVAL); | |
262 | ||
263 | if (lru && !prealloc) | |
264 | return ERR_PTR(-ENOTSUPP); | |
265 | ||
96eabe7a MKL |
266 | if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) |
267 | return ERR_PTR(-EINVAL); | |
268 | ||
0f8e4bd8 AS |
269 | htab = kzalloc(sizeof(*htab), GFP_USER); |
270 | if (!htab) | |
271 | return ERR_PTR(-ENOMEM); | |
272 | ||
273 | /* mandatory map attributes */ | |
824bd0ce | 274 | htab->map.map_type = attr->map_type; |
0f8e4bd8 AS |
275 | htab->map.key_size = attr->key_size; |
276 | htab->map.value_size = attr->value_size; | |
277 | htab->map.max_entries = attr->max_entries; | |
6c905981 | 278 | htab->map.map_flags = attr->map_flags; |
96eabe7a | 279 | htab->map.numa_node = numa_node; |
0f8e4bd8 AS |
280 | |
281 | /* check sanity of attributes. | |
282 | * value_size == 0 may be allowed in the future to use map as a set | |
283 | */ | |
284 | err = -EINVAL; | |
285 | if (htab->map.max_entries == 0 || htab->map.key_size == 0 || | |
286 | htab->map.value_size == 0) | |
287 | goto free_htab; | |
288 | ||
29ba732a MKL |
289 | if (percpu_lru) { |
290 | /* ensure each CPU's lru list has >=1 elements. | |
291 | * since we are at it, make each lru list has the same | |
292 | * number of elements. | |
293 | */ | |
294 | htab->map.max_entries = roundup(attr->max_entries, | |
295 | num_possible_cpus()); | |
296 | if (htab->map.max_entries < attr->max_entries) | |
297 | htab->map.max_entries = rounddown(attr->max_entries, | |
298 | num_possible_cpus()); | |
299 | } | |
300 | ||
0f8e4bd8 AS |
301 | /* hash table size must be power of 2 */ |
302 | htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); | |
303 | ||
304 | err = -E2BIG; | |
305 | if (htab->map.key_size > MAX_BPF_STACK) | |
306 | /* eBPF programs initialize keys on stack, so they cannot be | |
307 | * larger than max stack size | |
308 | */ | |
309 | goto free_htab; | |
310 | ||
7984c27c | 311 | if (htab->map.value_size >= KMALLOC_MAX_SIZE - |
01b3f521 AS |
312 | MAX_BPF_STACK - sizeof(struct htab_elem)) |
313 | /* if value_size is bigger, the user space won't be able to | |
314 | * access the elements via bpf syscall. This check also makes | |
315 | * sure that the elem_size doesn't overflow and it's | |
316 | * kmalloc-able later in htab_map_update_elem() | |
317 | */ | |
318 | goto free_htab; | |
319 | ||
824bd0ce AS |
320 | if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE) |
321 | /* make sure the size for pcpu_alloc() is reasonable */ | |
322 | goto free_htab; | |
323 | ||
01b3f521 | 324 | htab->elem_size = sizeof(struct htab_elem) + |
824bd0ce AS |
325 | round_up(htab->map.key_size, 8); |
326 | if (percpu) | |
327 | htab->elem_size += sizeof(void *); | |
328 | else | |
6c905981 | 329 | htab->elem_size += round_up(htab->map.value_size, 8); |
01b3f521 | 330 | |
daaf427c AS |
331 | /* prevent zero size kmalloc and check for u32 overflow */ |
332 | if (htab->n_buckets == 0 || | |
688ecfe6 | 333 | htab->n_buckets > U32_MAX / sizeof(struct bucket)) |
daaf427c AS |
334 | goto free_htab; |
335 | ||
824bd0ce AS |
336 | cost = (u64) htab->n_buckets * sizeof(struct bucket) + |
337 | (u64) htab->elem_size * htab->map.max_entries; | |
338 | ||
339 | if (percpu) | |
340 | cost += (u64) round_up(htab->map.value_size, 8) * | |
341 | num_possible_cpus() * htab->map.max_entries; | |
a6ed3ea6 AS |
342 | else |
343 | cost += (u64) htab->elem_size * num_possible_cpus(); | |
824bd0ce AS |
344 | |
345 | if (cost >= U32_MAX - PAGE_SIZE) | |
01b3f521 AS |
346 | /* make sure page count doesn't overflow */ |
347 | goto free_htab; | |
348 | ||
824bd0ce | 349 | htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; |
01b3f521 | 350 | |
6c905981 AS |
351 | /* if map size is larger than memlock limit, reject it early */ |
352 | err = bpf_map_precharge_memlock(htab->map.pages); | |
353 | if (err) | |
354 | goto free_htab; | |
355 | ||
01b3f521 | 356 | err = -ENOMEM; |
d407bd25 | 357 | htab->buckets = bpf_map_area_alloc(htab->n_buckets * |
96eabe7a MKL |
358 | sizeof(struct bucket), |
359 | htab->map.numa_node); | |
d407bd25 DB |
360 | if (!htab->buckets) |
361 | goto free_htab; | |
0f8e4bd8 | 362 | |
688ecfe6 | 363 | for (i = 0; i < htab->n_buckets; i++) { |
4fe84359 | 364 | INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); |
688ecfe6 | 365 | raw_spin_lock_init(&htab->buckets[i].lock); |
366 | } | |
0f8e4bd8 | 367 | |
29ba732a MKL |
368 | if (prealloc) { |
369 | err = prealloc_init(htab); | |
6c905981 | 370 | if (err) |
8c290e60 AS |
371 | goto free_buckets; |
372 | ||
373 | if (!percpu && !lru) { | |
374 | /* lru itself can remove the least used element, so | |
375 | * there is no need for an extra elem during map_update. | |
376 | */ | |
377 | err = alloc_extra_elems(htab); | |
378 | if (err) | |
379 | goto free_prealloc; | |
380 | } | |
6c905981 | 381 | } |
0f8e4bd8 | 382 | |
0f8e4bd8 AS |
383 | return &htab->map; |
384 | ||
8c290e60 AS |
385 | free_prealloc: |
386 | prealloc_destroy(htab); | |
6c905981 | 387 | free_buckets: |
d407bd25 | 388 | bpf_map_area_free(htab->buckets); |
0f8e4bd8 AS |
389 | free_htab: |
390 | kfree(htab); | |
391 | return ERR_PTR(err); | |
392 | } | |
393 | ||
394 | static inline u32 htab_map_hash(const void *key, u32 key_len) | |
395 | { | |
396 | return jhash(key, key_len, 0); | |
397 | } | |
398 | ||
688ecfe6 | 399 | static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) |
0f8e4bd8 AS |
400 | { |
401 | return &htab->buckets[hash & (htab->n_buckets - 1)]; | |
402 | } | |
403 | ||
4fe84359 | 404 | static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) |
688ecfe6 | 405 | { |
406 | return &__select_bucket(htab, hash)->head; | |
407 | } | |
408 | ||
4fe84359 AS |
409 | /* this lookup function can only be called with bucket lock taken */ |
410 | static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, | |
0f8e4bd8 AS |
411 | void *key, u32 key_size) |
412 | { | |
4fe84359 | 413 | struct hlist_nulls_node *n; |
0f8e4bd8 AS |
414 | struct htab_elem *l; |
415 | ||
4fe84359 | 416 | hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) |
0f8e4bd8 AS |
417 | if (l->hash == hash && !memcmp(&l->key, key, key_size)) |
418 | return l; | |
419 | ||
420 | return NULL; | |
421 | } | |
422 | ||
4fe84359 AS |
423 | /* can be called without bucket lock. it will repeat the loop in |
424 | * the unlikely event when elements moved from one bucket into another | |
425 | * while link list is being walked | |
426 | */ | |
427 | static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, | |
428 | u32 hash, void *key, | |
429 | u32 key_size, u32 n_buckets) | |
430 | { | |
431 | struct hlist_nulls_node *n; | |
432 | struct htab_elem *l; | |
433 | ||
434 | again: | |
435 | hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) | |
436 | if (l->hash == hash && !memcmp(&l->key, key, key_size)) | |
437 | return l; | |
438 | ||
439 | if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) | |
440 | goto again; | |
441 | ||
442 | return NULL; | |
443 | } | |
444 | ||
9015d2f5 AS |
445 | /* Called from syscall or from eBPF program directly, so |
446 | * arguments have to match bpf_map_lookup_elem() exactly. | |
447 | * The return value is adjusted by BPF instructions | |
448 | * in htab_map_gen_lookup(). | |
449 | */ | |
824bd0ce | 450 | static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) |
0f8e4bd8 AS |
451 | { |
452 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 453 | struct hlist_nulls_head *head; |
0f8e4bd8 AS |
454 | struct htab_elem *l; |
455 | u32 hash, key_size; | |
456 | ||
457 | /* Must be called with rcu_read_lock. */ | |
458 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
459 | ||
460 | key_size = map->key_size; | |
461 | ||
462 | hash = htab_map_hash(key, key_size); | |
463 | ||
464 | head = select_bucket(htab, hash); | |
465 | ||
4fe84359 | 466 | l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); |
0f8e4bd8 | 467 | |
824bd0ce AS |
468 | return l; |
469 | } | |
470 | ||
471 | static void *htab_map_lookup_elem(struct bpf_map *map, void *key) | |
472 | { | |
473 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
474 | ||
0f8e4bd8 AS |
475 | if (l) |
476 | return l->key + round_up(map->key_size, 8); | |
477 | ||
478 | return NULL; | |
479 | } | |
480 | ||
9015d2f5 AS |
481 | /* inline bpf_map_lookup_elem() call. |
482 | * Instead of: | |
483 | * bpf_prog | |
484 | * bpf_map_lookup_elem | |
485 | * map->ops->map_lookup_elem | |
486 | * htab_map_lookup_elem | |
487 | * __htab_map_lookup_elem | |
488 | * do: | |
489 | * bpf_prog | |
490 | * __htab_map_lookup_elem | |
491 | */ | |
492 | static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) | |
493 | { | |
494 | struct bpf_insn *insn = insn_buf; | |
495 | const int ret = BPF_REG_0; | |
496 | ||
497 | *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); | |
498 | *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); | |
499 | *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, | |
500 | offsetof(struct htab_elem, key) + | |
501 | round_up(map->key_size, 8)); | |
502 | return insn - insn_buf; | |
503 | } | |
504 | ||
29ba732a MKL |
505 | static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) |
506 | { | |
507 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
508 | ||
509 | if (l) { | |
510 | bpf_lru_node_set_ref(&l->lru_node); | |
511 | return l->key + round_up(map->key_size, 8); | |
512 | } | |
513 | ||
514 | return NULL; | |
515 | } | |
516 | ||
cc555421 MKL |
517 | static u32 htab_lru_map_gen_lookup(struct bpf_map *map, |
518 | struct bpf_insn *insn_buf) | |
519 | { | |
520 | struct bpf_insn *insn = insn_buf; | |
521 | const int ret = BPF_REG_0; | |
522 | ||
523 | *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); | |
524 | *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); | |
525 | *insn++ = BPF_ST_MEM(BPF_B, ret, | |
526 | offsetof(struct htab_elem, lru_node) + | |
527 | offsetof(struct bpf_lru_node, ref), | |
528 | 1); | |
529 | *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, | |
530 | offsetof(struct htab_elem, key) + | |
531 | round_up(map->key_size, 8)); | |
532 | return insn - insn_buf; | |
533 | } | |
534 | ||
29ba732a MKL |
535 | /* It is called from the bpf_lru_list when the LRU needs to delete |
536 | * older elements from the htab. | |
537 | */ | |
538 | static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) | |
539 | { | |
540 | struct bpf_htab *htab = (struct bpf_htab *)arg; | |
4fe84359 AS |
541 | struct htab_elem *l = NULL, *tgt_l; |
542 | struct hlist_nulls_head *head; | |
543 | struct hlist_nulls_node *n; | |
29ba732a MKL |
544 | unsigned long flags; |
545 | struct bucket *b; | |
546 | ||
547 | tgt_l = container_of(node, struct htab_elem, lru_node); | |
548 | b = __select_bucket(htab, tgt_l->hash); | |
549 | head = &b->head; | |
550 | ||
551 | raw_spin_lock_irqsave(&b->lock, flags); | |
552 | ||
4fe84359 | 553 | hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) |
29ba732a | 554 | if (l == tgt_l) { |
4fe84359 | 555 | hlist_nulls_del_rcu(&l->hash_node); |
29ba732a MKL |
556 | break; |
557 | } | |
558 | ||
559 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
560 | ||
561 | return l == tgt_l; | |
562 | } | |
563 | ||
0f8e4bd8 AS |
564 | /* Called from syscall */ |
565 | static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
566 | { | |
567 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 568 | struct hlist_nulls_head *head; |
0f8e4bd8 AS |
569 | struct htab_elem *l, *next_l; |
570 | u32 hash, key_size; | |
8fe45924 | 571 | int i = 0; |
0f8e4bd8 AS |
572 | |
573 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
574 | ||
575 | key_size = map->key_size; | |
576 | ||
8fe45924 TQ |
577 | if (!key) |
578 | goto find_first_elem; | |
579 | ||
0f8e4bd8 AS |
580 | hash = htab_map_hash(key, key_size); |
581 | ||
582 | head = select_bucket(htab, hash); | |
583 | ||
584 | /* lookup the key */ | |
4fe84359 | 585 | l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); |
0f8e4bd8 | 586 | |
8fe45924 | 587 | if (!l) |
0f8e4bd8 | 588 | goto find_first_elem; |
0f8e4bd8 AS |
589 | |
590 | /* key was found, get next key in the same bucket */ | |
4fe84359 | 591 | next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), |
0f8e4bd8 AS |
592 | struct htab_elem, hash_node); |
593 | ||
594 | if (next_l) { | |
595 | /* if next elem in this hash list is non-zero, just return it */ | |
596 | memcpy(next_key, next_l->key, key_size); | |
597 | return 0; | |
598 | } | |
599 | ||
600 | /* no more elements in this hash list, go to the next bucket */ | |
601 | i = hash & (htab->n_buckets - 1); | |
602 | i++; | |
603 | ||
604 | find_first_elem: | |
605 | /* iterate over buckets */ | |
606 | for (; i < htab->n_buckets; i++) { | |
607 | head = select_bucket(htab, i); | |
608 | ||
609 | /* pick first element in the bucket */ | |
4fe84359 | 610 | next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)), |
0f8e4bd8 AS |
611 | struct htab_elem, hash_node); |
612 | if (next_l) { | |
613 | /* if it's not empty, just return it */ | |
614 | memcpy(next_key, next_l->key, key_size); | |
615 | return 0; | |
616 | } | |
617 | } | |
618 | ||
6c905981 | 619 | /* iterated over all buckets and all elements */ |
0f8e4bd8 AS |
620 | return -ENOENT; |
621 | } | |
622 | ||
6c905981 | 623 | static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) |
824bd0ce | 624 | { |
6c905981 AS |
625 | if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) |
626 | free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); | |
824bd0ce AS |
627 | kfree(l); |
628 | } | |
629 | ||
6c905981 | 630 | static void htab_elem_free_rcu(struct rcu_head *head) |
824bd0ce AS |
631 | { |
632 | struct htab_elem *l = container_of(head, struct htab_elem, rcu); | |
6c905981 | 633 | struct bpf_htab *htab = l->htab; |
824bd0ce | 634 | |
6c905981 AS |
635 | /* must increment bpf_prog_active to avoid kprobe+bpf triggering while |
636 | * we're calling kfree, otherwise deadlock is possible if kprobes | |
637 | * are placed somewhere inside of slub | |
638 | */ | |
639 | preempt_disable(); | |
640 | __this_cpu_inc(bpf_prog_active); | |
641 | htab_elem_free(htab, l); | |
642 | __this_cpu_dec(bpf_prog_active); | |
643 | preempt_enable(); | |
824bd0ce AS |
644 | } |
645 | ||
6c905981 | 646 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) |
824bd0ce | 647 | { |
bcc6b1b7 MKL |
648 | struct bpf_map *map = &htab->map; |
649 | ||
650 | if (map->ops->map_fd_put_ptr) { | |
651 | void *ptr = fd_htab_map_get_ptr(map, l); | |
652 | ||
653 | map->ops->map_fd_put_ptr(ptr); | |
654 | } | |
655 | ||
8c290e60 | 656 | if (htab_is_prealloc(htab)) { |
6c905981 | 657 | pcpu_freelist_push(&htab->freelist, &l->fnode); |
824bd0ce | 658 | } else { |
6c905981 AS |
659 | atomic_dec(&htab->count); |
660 | l->htab = htab; | |
661 | call_rcu(&l->rcu, htab_elem_free_rcu); | |
824bd0ce AS |
662 | } |
663 | } | |
664 | ||
fd91de7b MKL |
665 | static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, |
666 | void *value, bool onallcpus) | |
667 | { | |
668 | if (!onallcpus) { | |
669 | /* copy true value_size bytes */ | |
670 | memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); | |
671 | } else { | |
672 | u32 size = round_up(htab->map.value_size, 8); | |
673 | int off = 0, cpu; | |
674 | ||
675 | for_each_possible_cpu(cpu) { | |
676 | bpf_long_memcpy(per_cpu_ptr(pptr, cpu), | |
677 | value + off, size); | |
678 | off += size; | |
679 | } | |
680 | } | |
681 | } | |
682 | ||
cd36c3a2 DB |
683 | static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) |
684 | { | |
685 | return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && | |
686 | BITS_PER_LONG == 64; | |
687 | } | |
688 | ||
689 | static u32 htab_size_value(const struct bpf_htab *htab, bool percpu) | |
690 | { | |
691 | u32 size = htab->map.value_size; | |
692 | ||
693 | if (percpu || fd_htab_map_needs_adjust(htab)) | |
694 | size = round_up(size, 8); | |
695 | return size; | |
696 | } | |
697 | ||
824bd0ce AS |
698 | static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, |
699 | void *value, u32 key_size, u32 hash, | |
a6ed3ea6 | 700 | bool percpu, bool onallcpus, |
8c290e60 | 701 | struct htab_elem *old_elem) |
824bd0ce | 702 | { |
cd36c3a2 | 703 | u32 size = htab_size_value(htab, percpu); |
8c290e60 AS |
704 | bool prealloc = htab_is_prealloc(htab); |
705 | struct htab_elem *l_new, **pl_new; | |
824bd0ce AS |
706 | void __percpu *pptr; |
707 | ||
6c905981 | 708 | if (prealloc) { |
8c290e60 AS |
709 | if (old_elem) { |
710 | /* if we're updating the existing element, | |
711 | * use per-cpu extra elems to avoid freelist_pop/push | |
712 | */ | |
713 | pl_new = this_cpu_ptr(htab->extra_elems); | |
714 | l_new = *pl_new; | |
715 | *pl_new = old_elem; | |
716 | } else { | |
717 | struct pcpu_freelist_node *l; | |
9f691549 | 718 | |
8c290e60 AS |
719 | l = pcpu_freelist_pop(&htab->freelist); |
720 | if (!l) | |
721 | return ERR_PTR(-E2BIG); | |
9f691549 | 722 | l_new = container_of(l, struct htab_elem, fnode); |
6c905981 | 723 | } |
a6ed3ea6 | 724 | } else { |
8c290e60 AS |
725 | if (atomic_inc_return(&htab->count) > htab->map.max_entries) |
726 | if (!old_elem) { | |
727 | /* when map is full and update() is replacing | |
728 | * old element, it's ok to allocate, since | |
729 | * old element will be freed immediately. | |
730 | * Otherwise return an error | |
731 | */ | |
732 | atomic_dec(&htab->count); | |
733 | return ERR_PTR(-E2BIG); | |
734 | } | |
96eabe7a MKL |
735 | l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, |
736 | htab->map.numa_node); | |
8c290e60 AS |
737 | if (!l_new) |
738 | return ERR_PTR(-ENOMEM); | |
6c905981 | 739 | } |
824bd0ce AS |
740 | |
741 | memcpy(l_new->key, key, key_size); | |
742 | if (percpu) { | |
6c905981 AS |
743 | if (prealloc) { |
744 | pptr = htab_elem_get_ptr(l_new, key_size); | |
745 | } else { | |
746 | /* alloc_percpu zero-fills */ | |
747 | pptr = __alloc_percpu_gfp(size, 8, | |
748 | GFP_ATOMIC | __GFP_NOWARN); | |
749 | if (!pptr) { | |
750 | kfree(l_new); | |
751 | return ERR_PTR(-ENOMEM); | |
752 | } | |
824bd0ce AS |
753 | } |
754 | ||
fd91de7b | 755 | pcpu_copy_value(htab, pptr, value, onallcpus); |
15a07b33 | 756 | |
6c905981 AS |
757 | if (!prealloc) |
758 | htab_elem_set_ptr(l_new, key_size, pptr); | |
824bd0ce AS |
759 | } else { |
760 | memcpy(l_new->key + round_up(key_size, 8), value, size); | |
761 | } | |
762 | ||
763 | l_new->hash = hash; | |
764 | return l_new; | |
765 | } | |
766 | ||
767 | static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, | |
768 | u64 map_flags) | |
769 | { | |
824bd0ce AS |
770 | if (l_old && map_flags == BPF_NOEXIST) |
771 | /* elem already exists */ | |
772 | return -EEXIST; | |
773 | ||
774 | if (!l_old && map_flags == BPF_EXIST) | |
775 | /* elem doesn't exist, cannot update it */ | |
776 | return -ENOENT; | |
777 | ||
778 | return 0; | |
779 | } | |
780 | ||
0f8e4bd8 AS |
781 | /* Called from syscall or from eBPF program */ |
782 | static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, | |
783 | u64 map_flags) | |
784 | { | |
785 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
824bd0ce | 786 | struct htab_elem *l_new = NULL, *l_old; |
4fe84359 | 787 | struct hlist_nulls_head *head; |
0f8e4bd8 | 788 | unsigned long flags; |
824bd0ce AS |
789 | struct bucket *b; |
790 | u32 key_size, hash; | |
0f8e4bd8 AS |
791 | int ret; |
792 | ||
824bd0ce | 793 | if (unlikely(map_flags > BPF_EXIST)) |
0f8e4bd8 AS |
794 | /* unknown flags */ |
795 | return -EINVAL; | |
796 | ||
797 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
798 | ||
0f8e4bd8 AS |
799 | key_size = map->key_size; |
800 | ||
824bd0ce AS |
801 | hash = htab_map_hash(key, key_size); |
802 | ||
824bd0ce | 803 | b = __select_bucket(htab, hash); |
688ecfe6 | 804 | head = &b->head; |
0f8e4bd8 AS |
805 | |
806 | /* bpf_map_update_elem() can be called in_irq() */ | |
688ecfe6 | 807 | raw_spin_lock_irqsave(&b->lock, flags); |
0f8e4bd8 | 808 | |
824bd0ce | 809 | l_old = lookup_elem_raw(head, hash, key, key_size); |
0f8e4bd8 | 810 | |
824bd0ce AS |
811 | ret = check_flags(htab, l_old, map_flags); |
812 | if (ret) | |
0f8e4bd8 | 813 | goto err; |
0f8e4bd8 | 814 | |
a6ed3ea6 | 815 | l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, |
8c290e60 | 816 | l_old); |
6c905981 AS |
817 | if (IS_ERR(l_new)) { |
818 | /* all pre-allocated elements are in use or memory exhausted */ | |
819 | ret = PTR_ERR(l_new); | |
820 | goto err; | |
821 | } | |
822 | ||
824bd0ce AS |
823 | /* add new element to the head of the list, so that |
824 | * concurrent search will find it before old elem | |
0f8e4bd8 | 825 | */ |
4fe84359 | 826 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
0f8e4bd8 | 827 | if (l_old) { |
4fe84359 | 828 | hlist_nulls_del_rcu(&l_old->hash_node); |
8c290e60 AS |
829 | if (!htab_is_prealloc(htab)) |
830 | free_htab_elem(htab, l_old); | |
0f8e4bd8 | 831 | } |
6c905981 | 832 | ret = 0; |
0f8e4bd8 | 833 | err: |
688ecfe6 | 834 | raw_spin_unlock_irqrestore(&b->lock, flags); |
0f8e4bd8 AS |
835 | return ret; |
836 | } | |
837 | ||
29ba732a MKL |
838 | static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, |
839 | u64 map_flags) | |
840 | { | |
841 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
842 | struct htab_elem *l_new, *l_old = NULL; | |
4fe84359 | 843 | struct hlist_nulls_head *head; |
29ba732a MKL |
844 | unsigned long flags; |
845 | struct bucket *b; | |
846 | u32 key_size, hash; | |
847 | int ret; | |
848 | ||
849 | if (unlikely(map_flags > BPF_EXIST)) | |
850 | /* unknown flags */ | |
851 | return -EINVAL; | |
852 | ||
853 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
854 | ||
855 | key_size = map->key_size; | |
856 | ||
857 | hash = htab_map_hash(key, key_size); | |
858 | ||
859 | b = __select_bucket(htab, hash); | |
860 | head = &b->head; | |
861 | ||
862 | /* For LRU, we need to alloc before taking bucket's | |
863 | * spinlock because getting free nodes from LRU may need | |
864 | * to remove older elements from htab and this removal | |
865 | * operation will need a bucket lock. | |
866 | */ | |
867 | l_new = prealloc_lru_pop(htab, key, hash); | |
868 | if (!l_new) | |
869 | return -ENOMEM; | |
870 | memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); | |
871 | ||
872 | /* bpf_map_update_elem() can be called in_irq() */ | |
873 | raw_spin_lock_irqsave(&b->lock, flags); | |
874 | ||
875 | l_old = lookup_elem_raw(head, hash, key, key_size); | |
876 | ||
877 | ret = check_flags(htab, l_old, map_flags); | |
878 | if (ret) | |
879 | goto err; | |
880 | ||
881 | /* add new element to the head of the list, so that | |
882 | * concurrent search will find it before old elem | |
883 | */ | |
4fe84359 | 884 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
29ba732a MKL |
885 | if (l_old) { |
886 | bpf_lru_node_set_ref(&l_new->lru_node); | |
4fe84359 | 887 | hlist_nulls_del_rcu(&l_old->hash_node); |
29ba732a MKL |
888 | } |
889 | ret = 0; | |
890 | ||
891 | err: | |
892 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
893 | ||
894 | if (ret) | |
895 | bpf_lru_push_free(&htab->lru, &l_new->lru_node); | |
896 | else if (l_old) | |
897 | bpf_lru_push_free(&htab->lru, &l_old->lru_node); | |
898 | ||
899 | return ret; | |
900 | } | |
901 | ||
15a07b33 AS |
902 | static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, |
903 | void *value, u64 map_flags, | |
904 | bool onallcpus) | |
824bd0ce AS |
905 | { |
906 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
907 | struct htab_elem *l_new = NULL, *l_old; | |
4fe84359 | 908 | struct hlist_nulls_head *head; |
824bd0ce AS |
909 | unsigned long flags; |
910 | struct bucket *b; | |
911 | u32 key_size, hash; | |
912 | int ret; | |
913 | ||
914 | if (unlikely(map_flags > BPF_EXIST)) | |
915 | /* unknown flags */ | |
916 | return -EINVAL; | |
917 | ||
918 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
919 | ||
920 | key_size = map->key_size; | |
921 | ||
922 | hash = htab_map_hash(key, key_size); | |
923 | ||
924 | b = __select_bucket(htab, hash); | |
925 | head = &b->head; | |
926 | ||
927 | /* bpf_map_update_elem() can be called in_irq() */ | |
928 | raw_spin_lock_irqsave(&b->lock, flags); | |
929 | ||
930 | l_old = lookup_elem_raw(head, hash, key, key_size); | |
931 | ||
932 | ret = check_flags(htab, l_old, map_flags); | |
933 | if (ret) | |
934 | goto err; | |
935 | ||
936 | if (l_old) { | |
937 | /* per-cpu hash map can update value in-place */ | |
fd91de7b MKL |
938 | pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), |
939 | value, onallcpus); | |
824bd0ce AS |
940 | } else { |
941 | l_new = alloc_htab_elem(htab, key, value, key_size, | |
8c290e60 | 942 | hash, true, onallcpus, NULL); |
6c905981 AS |
943 | if (IS_ERR(l_new)) { |
944 | ret = PTR_ERR(l_new); | |
824bd0ce AS |
945 | goto err; |
946 | } | |
4fe84359 | 947 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
824bd0ce AS |
948 | } |
949 | ret = 0; | |
950 | err: | |
951 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
952 | return ret; | |
953 | } | |
954 | ||
8f844938 MKL |
955 | static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, |
956 | void *value, u64 map_flags, | |
957 | bool onallcpus) | |
958 | { | |
959 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
960 | struct htab_elem *l_new = NULL, *l_old; | |
4fe84359 | 961 | struct hlist_nulls_head *head; |
8f844938 MKL |
962 | unsigned long flags; |
963 | struct bucket *b; | |
964 | u32 key_size, hash; | |
965 | int ret; | |
966 | ||
967 | if (unlikely(map_flags > BPF_EXIST)) | |
968 | /* unknown flags */ | |
969 | return -EINVAL; | |
970 | ||
971 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
972 | ||
973 | key_size = map->key_size; | |
974 | ||
975 | hash = htab_map_hash(key, key_size); | |
976 | ||
977 | b = __select_bucket(htab, hash); | |
978 | head = &b->head; | |
979 | ||
980 | /* For LRU, we need to alloc before taking bucket's | |
981 | * spinlock because LRU's elem alloc may need | |
982 | * to remove older elem from htab and this removal | |
983 | * operation will need a bucket lock. | |
984 | */ | |
985 | if (map_flags != BPF_EXIST) { | |
986 | l_new = prealloc_lru_pop(htab, key, hash); | |
987 | if (!l_new) | |
988 | return -ENOMEM; | |
989 | } | |
990 | ||
991 | /* bpf_map_update_elem() can be called in_irq() */ | |
992 | raw_spin_lock_irqsave(&b->lock, flags); | |
993 | ||
994 | l_old = lookup_elem_raw(head, hash, key, key_size); | |
995 | ||
996 | ret = check_flags(htab, l_old, map_flags); | |
997 | if (ret) | |
998 | goto err; | |
999 | ||
1000 | if (l_old) { | |
1001 | bpf_lru_node_set_ref(&l_old->lru_node); | |
1002 | ||
1003 | /* per-cpu hash map can update value in-place */ | |
1004 | pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), | |
1005 | value, onallcpus); | |
1006 | } else { | |
1007 | pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), | |
1008 | value, onallcpus); | |
4fe84359 | 1009 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
8f844938 MKL |
1010 | l_new = NULL; |
1011 | } | |
1012 | ret = 0; | |
1013 | err: | |
1014 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
1015 | if (l_new) | |
1016 | bpf_lru_push_free(&htab->lru, &l_new->lru_node); | |
1017 | return ret; | |
1018 | } | |
1019 | ||
15a07b33 AS |
1020 | static int htab_percpu_map_update_elem(struct bpf_map *map, void *key, |
1021 | void *value, u64 map_flags) | |
1022 | { | |
1023 | return __htab_percpu_map_update_elem(map, key, value, map_flags, false); | |
1024 | } | |
1025 | ||
8f844938 MKL |
1026 | static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, |
1027 | void *value, u64 map_flags) | |
1028 | { | |
1029 | return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, | |
1030 | false); | |
1031 | } | |
1032 | ||
0f8e4bd8 AS |
1033 | /* Called from syscall or from eBPF program */ |
1034 | static int htab_map_delete_elem(struct bpf_map *map, void *key) | |
1035 | { | |
1036 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 1037 | struct hlist_nulls_head *head; |
688ecfe6 | 1038 | struct bucket *b; |
0f8e4bd8 AS |
1039 | struct htab_elem *l; |
1040 | unsigned long flags; | |
1041 | u32 hash, key_size; | |
1042 | int ret = -ENOENT; | |
1043 | ||
1044 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
1045 | ||
1046 | key_size = map->key_size; | |
1047 | ||
1048 | hash = htab_map_hash(key, key_size); | |
688ecfe6 | 1049 | b = __select_bucket(htab, hash); |
1050 | head = &b->head; | |
0f8e4bd8 | 1051 | |
688ecfe6 | 1052 | raw_spin_lock_irqsave(&b->lock, flags); |
0f8e4bd8 | 1053 | |
0f8e4bd8 AS |
1054 | l = lookup_elem_raw(head, hash, key, key_size); |
1055 | ||
1056 | if (l) { | |
4fe84359 | 1057 | hlist_nulls_del_rcu(&l->hash_node); |
6c905981 | 1058 | free_htab_elem(htab, l); |
0f8e4bd8 AS |
1059 | ret = 0; |
1060 | } | |
1061 | ||
688ecfe6 | 1062 | raw_spin_unlock_irqrestore(&b->lock, flags); |
0f8e4bd8 AS |
1063 | return ret; |
1064 | } | |
1065 | ||
29ba732a MKL |
1066 | static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) |
1067 | { | |
1068 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 1069 | struct hlist_nulls_head *head; |
29ba732a MKL |
1070 | struct bucket *b; |
1071 | struct htab_elem *l; | |
1072 | unsigned long flags; | |
1073 | u32 hash, key_size; | |
1074 | int ret = -ENOENT; | |
1075 | ||
1076 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
1077 | ||
1078 | key_size = map->key_size; | |
1079 | ||
1080 | hash = htab_map_hash(key, key_size); | |
1081 | b = __select_bucket(htab, hash); | |
1082 | head = &b->head; | |
1083 | ||
1084 | raw_spin_lock_irqsave(&b->lock, flags); | |
1085 | ||
1086 | l = lookup_elem_raw(head, hash, key, key_size); | |
1087 | ||
1088 | if (l) { | |
4fe84359 | 1089 | hlist_nulls_del_rcu(&l->hash_node); |
29ba732a MKL |
1090 | ret = 0; |
1091 | } | |
1092 | ||
1093 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
1094 | if (l) | |
1095 | bpf_lru_push_free(&htab->lru, &l->lru_node); | |
1096 | return ret; | |
1097 | } | |
1098 | ||
0f8e4bd8 AS |
1099 | static void delete_all_elements(struct bpf_htab *htab) |
1100 | { | |
1101 | int i; | |
1102 | ||
1103 | for (i = 0; i < htab->n_buckets; i++) { | |
4fe84359 AS |
1104 | struct hlist_nulls_head *head = select_bucket(htab, i); |
1105 | struct hlist_nulls_node *n; | |
0f8e4bd8 AS |
1106 | struct htab_elem *l; |
1107 | ||
4fe84359 AS |
1108 | hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { |
1109 | hlist_nulls_del_rcu(&l->hash_node); | |
8c290e60 | 1110 | htab_elem_free(htab, l); |
0f8e4bd8 AS |
1111 | } |
1112 | } | |
1113 | } | |
bcc6b1b7 | 1114 | |
0f8e4bd8 AS |
1115 | /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ |
1116 | static void htab_map_free(struct bpf_map *map) | |
1117 | { | |
1118 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
1119 | ||
1120 | /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
1121 | * so the programs (can be more than one that used this map) were | |
1122 | * disconnected from events. Wait for outstanding critical sections in | |
1123 | * these programs to complete | |
1124 | */ | |
1125 | synchronize_rcu(); | |
1126 | ||
6c905981 AS |
1127 | /* some of free_htab_elem() callbacks for elements of this map may |
1128 | * not have executed. Wait for them. | |
0f8e4bd8 | 1129 | */ |
6c905981 | 1130 | rcu_barrier(); |
8c290e60 | 1131 | if (!htab_is_prealloc(htab)) |
6c905981 | 1132 | delete_all_elements(htab); |
29ba732a MKL |
1133 | else |
1134 | prealloc_destroy(htab); | |
1135 | ||
a6ed3ea6 | 1136 | free_percpu(htab->extra_elems); |
d407bd25 | 1137 | bpf_map_area_free(htab->buckets); |
0f8e4bd8 AS |
1138 | kfree(htab); |
1139 | } | |
1140 | ||
40077e0c | 1141 | const struct bpf_map_ops htab_map_ops = { |
0f8e4bd8 AS |
1142 | .map_alloc = htab_map_alloc, |
1143 | .map_free = htab_map_free, | |
1144 | .map_get_next_key = htab_map_get_next_key, | |
1145 | .map_lookup_elem = htab_map_lookup_elem, | |
1146 | .map_update_elem = htab_map_update_elem, | |
1147 | .map_delete_elem = htab_map_delete_elem, | |
9015d2f5 | 1148 | .map_gen_lookup = htab_map_gen_lookup, |
0f8e4bd8 AS |
1149 | }; |
1150 | ||
40077e0c | 1151 | const struct bpf_map_ops htab_lru_map_ops = { |
29ba732a MKL |
1152 | .map_alloc = htab_map_alloc, |
1153 | .map_free = htab_map_free, | |
1154 | .map_get_next_key = htab_map_get_next_key, | |
1155 | .map_lookup_elem = htab_lru_map_lookup_elem, | |
1156 | .map_update_elem = htab_lru_map_update_elem, | |
1157 | .map_delete_elem = htab_lru_map_delete_elem, | |
cc555421 | 1158 | .map_gen_lookup = htab_lru_map_gen_lookup, |
29ba732a MKL |
1159 | }; |
1160 | ||
824bd0ce AS |
1161 | /* Called from eBPF program */ |
1162 | static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) | |
1163 | { | |
1164 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
1165 | ||
1166 | if (l) | |
1167 | return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); | |
1168 | else | |
1169 | return NULL; | |
1170 | } | |
1171 | ||
8f844938 MKL |
1172 | static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) |
1173 | { | |
1174 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
1175 | ||
1176 | if (l) { | |
1177 | bpf_lru_node_set_ref(&l->lru_node); | |
1178 | return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); | |
1179 | } | |
1180 | ||
1181 | return NULL; | |
1182 | } | |
1183 | ||
15a07b33 AS |
1184 | int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) |
1185 | { | |
8f844938 | 1186 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); |
15a07b33 AS |
1187 | struct htab_elem *l; |
1188 | void __percpu *pptr; | |
1189 | int ret = -ENOENT; | |
1190 | int cpu, off = 0; | |
1191 | u32 size; | |
1192 | ||
1193 | /* per_cpu areas are zero-filled and bpf programs can only | |
1194 | * access 'value_size' of them, so copying rounded areas | |
1195 | * will not leak any kernel data | |
1196 | */ | |
1197 | size = round_up(map->value_size, 8); | |
1198 | rcu_read_lock(); | |
1199 | l = __htab_map_lookup_elem(map, key); | |
1200 | if (!l) | |
1201 | goto out; | |
8f844938 MKL |
1202 | if (htab_is_lru(htab)) |
1203 | bpf_lru_node_set_ref(&l->lru_node); | |
15a07b33 AS |
1204 | pptr = htab_elem_get_ptr(l, map->key_size); |
1205 | for_each_possible_cpu(cpu) { | |
1206 | bpf_long_memcpy(value + off, | |
1207 | per_cpu_ptr(pptr, cpu), size); | |
1208 | off += size; | |
1209 | } | |
1210 | ret = 0; | |
1211 | out: | |
1212 | rcu_read_unlock(); | |
1213 | return ret; | |
1214 | } | |
1215 | ||
1216 | int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, | |
1217 | u64 map_flags) | |
1218 | { | |
8f844938 | 1219 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); |
6bbd9a05 SL |
1220 | int ret; |
1221 | ||
1222 | rcu_read_lock(); | |
8f844938 MKL |
1223 | if (htab_is_lru(htab)) |
1224 | ret = __htab_lru_percpu_map_update_elem(map, key, value, | |
1225 | map_flags, true); | |
1226 | else | |
1227 | ret = __htab_percpu_map_update_elem(map, key, value, map_flags, | |
1228 | true); | |
6bbd9a05 SL |
1229 | rcu_read_unlock(); |
1230 | ||
1231 | return ret; | |
15a07b33 AS |
1232 | } |
1233 | ||
40077e0c | 1234 | const struct bpf_map_ops htab_percpu_map_ops = { |
824bd0ce AS |
1235 | .map_alloc = htab_map_alloc, |
1236 | .map_free = htab_map_free, | |
1237 | .map_get_next_key = htab_map_get_next_key, | |
1238 | .map_lookup_elem = htab_percpu_map_lookup_elem, | |
1239 | .map_update_elem = htab_percpu_map_update_elem, | |
1240 | .map_delete_elem = htab_map_delete_elem, | |
1241 | }; | |
1242 | ||
40077e0c | 1243 | const struct bpf_map_ops htab_lru_percpu_map_ops = { |
8f844938 MKL |
1244 | .map_alloc = htab_map_alloc, |
1245 | .map_free = htab_map_free, | |
1246 | .map_get_next_key = htab_map_get_next_key, | |
1247 | .map_lookup_elem = htab_lru_percpu_map_lookup_elem, | |
1248 | .map_update_elem = htab_lru_percpu_map_update_elem, | |
1249 | .map_delete_elem = htab_lru_map_delete_elem, | |
1250 | }; | |
1251 | ||
bcc6b1b7 MKL |
1252 | static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr) |
1253 | { | |
bcc6b1b7 MKL |
1254 | if (attr->value_size != sizeof(u32)) |
1255 | return ERR_PTR(-EINVAL); | |
cd36c3a2 | 1256 | return htab_map_alloc(attr); |
bcc6b1b7 MKL |
1257 | } |
1258 | ||
1259 | static void fd_htab_map_free(struct bpf_map *map) | |
1260 | { | |
1261 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
1262 | struct hlist_nulls_node *n; | |
1263 | struct hlist_nulls_head *head; | |
1264 | struct htab_elem *l; | |
1265 | int i; | |
1266 | ||
1267 | for (i = 0; i < htab->n_buckets; i++) { | |
1268 | head = select_bucket(htab, i); | |
1269 | ||
1270 | hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { | |
1271 | void *ptr = fd_htab_map_get_ptr(map, l); | |
1272 | ||
1273 | map->ops->map_fd_put_ptr(ptr); | |
1274 | } | |
1275 | } | |
1276 | ||
1277 | htab_map_free(map); | |
1278 | } | |
1279 | ||
14dc6f04 MKL |
1280 | /* only called from syscall */ |
1281 | int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) | |
1282 | { | |
1283 | void **ptr; | |
1284 | int ret = 0; | |
1285 | ||
1286 | if (!map->ops->map_fd_sys_lookup_elem) | |
1287 | return -ENOTSUPP; | |
1288 | ||
1289 | rcu_read_lock(); | |
1290 | ptr = htab_map_lookup_elem(map, key); | |
1291 | if (ptr) | |
1292 | *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); | |
1293 | else | |
1294 | ret = -ENOENT; | |
1295 | rcu_read_unlock(); | |
1296 | ||
1297 | return ret; | |
1298 | } | |
1299 | ||
bcc6b1b7 MKL |
1300 | /* only called from syscall */ |
1301 | int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, | |
1302 | void *key, void *value, u64 map_flags) | |
1303 | { | |
1304 | void *ptr; | |
1305 | int ret; | |
1306 | u32 ufd = *(u32 *)value; | |
1307 | ||
1308 | ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); | |
1309 | if (IS_ERR(ptr)) | |
1310 | return PTR_ERR(ptr); | |
1311 | ||
1312 | ret = htab_map_update_elem(map, key, &ptr, map_flags); | |
1313 | if (ret) | |
1314 | map->ops->map_fd_put_ptr(ptr); | |
1315 | ||
1316 | return ret; | |
1317 | } | |
1318 | ||
1319 | static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) | |
1320 | { | |
1321 | struct bpf_map *map, *inner_map_meta; | |
1322 | ||
1323 | inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); | |
1324 | if (IS_ERR(inner_map_meta)) | |
1325 | return inner_map_meta; | |
1326 | ||
1327 | map = fd_htab_map_alloc(attr); | |
1328 | if (IS_ERR(map)) { | |
1329 | bpf_map_meta_free(inner_map_meta); | |
1330 | return map; | |
1331 | } | |
1332 | ||
1333 | map->inner_map_meta = inner_map_meta; | |
1334 | ||
1335 | return map; | |
1336 | } | |
1337 | ||
1338 | static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) | |
1339 | { | |
1340 | struct bpf_map **inner_map = htab_map_lookup_elem(map, key); | |
1341 | ||
1342 | if (!inner_map) | |
1343 | return NULL; | |
1344 | ||
1345 | return READ_ONCE(*inner_map); | |
1346 | } | |
1347 | ||
7b0c2a05 DB |
1348 | static u32 htab_of_map_gen_lookup(struct bpf_map *map, |
1349 | struct bpf_insn *insn_buf) | |
1350 | { | |
1351 | struct bpf_insn *insn = insn_buf; | |
1352 | const int ret = BPF_REG_0; | |
1353 | ||
1354 | *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); | |
1355 | *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); | |
1356 | *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, | |
1357 | offsetof(struct htab_elem, key) + | |
1358 | round_up(map->key_size, 8)); | |
1359 | *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); | |
1360 | ||
1361 | return insn - insn_buf; | |
1362 | } | |
1363 | ||
bcc6b1b7 MKL |
1364 | static void htab_of_map_free(struct bpf_map *map) |
1365 | { | |
1366 | bpf_map_meta_free(map->inner_map_meta); | |
1367 | fd_htab_map_free(map); | |
1368 | } | |
1369 | ||
40077e0c | 1370 | const struct bpf_map_ops htab_of_maps_map_ops = { |
bcc6b1b7 MKL |
1371 | .map_alloc = htab_of_map_alloc, |
1372 | .map_free = htab_of_map_free, | |
1373 | .map_get_next_key = htab_map_get_next_key, | |
1374 | .map_lookup_elem = htab_of_map_lookup_elem, | |
1375 | .map_delete_elem = htab_map_delete_elem, | |
1376 | .map_fd_get_ptr = bpf_map_fd_get_ptr, | |
1377 | .map_fd_put_ptr = bpf_map_fd_put_ptr, | |
14dc6f04 | 1378 | .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, |
7b0c2a05 | 1379 | .map_gen_lookup = htab_of_map_gen_lookup, |
bcc6b1b7 | 1380 | }; |