]>
Commit | Line | Data |
---|---|---|
0f8e4bd8 | 1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
6c905981 | 2 | * Copyright (c) 2016 Facebook |
0f8e4bd8 AS |
3 | * |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of version 2 of the GNU General Public | |
6 | * License as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | #include <linux/bpf.h> | |
14 | #include <linux/jhash.h> | |
15 | #include <linux/filter.h> | |
4fe84359 | 16 | #include <linux/rculist_nulls.h> |
6c905981 | 17 | #include "percpu_freelist.h" |
29ba732a | 18 | #include "bpf_lru_list.h" |
bcc6b1b7 | 19 | #include "map_in_map.h" |
0f8e4bd8 | 20 | |
6e71b04a CF |
21 | #define HTAB_CREATE_FLAG_MASK \ |
22 | (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \ | |
23 | BPF_F_RDONLY | BPF_F_WRONLY) | |
96eabe7a | 24 | |
688ecfe6 | 25 | struct bucket { |
4fe84359 | 26 | struct hlist_nulls_head head; |
688ecfe6 | 27 | raw_spinlock_t lock; |
28 | }; | |
29 | ||
0f8e4bd8 AS |
30 | struct bpf_htab { |
31 | struct bpf_map map; | |
688ecfe6 | 32 | struct bucket *buckets; |
6c905981 | 33 | void *elems; |
29ba732a MKL |
34 | union { |
35 | struct pcpu_freelist freelist; | |
36 | struct bpf_lru lru; | |
37 | }; | |
8c290e60 | 38 | struct htab_elem *__percpu *extra_elems; |
6591f1e6 | 39 | atomic_t count; /* number of elements in this hashtable */ |
0f8e4bd8 AS |
40 | u32 n_buckets; /* number of hash buckets */ |
41 | u32 elem_size; /* size of each element in bytes */ | |
42 | }; | |
43 | ||
44 | /* each htab element is struct htab_elem + key + value */ | |
45 | struct htab_elem { | |
824bd0ce | 46 | union { |
4fe84359 | 47 | struct hlist_nulls_node hash_node; |
9f691549 AS |
48 | struct { |
49 | void *padding; | |
50 | union { | |
51 | struct bpf_htab *htab; | |
52 | struct pcpu_freelist_node fnode; | |
53 | }; | |
54 | }; | |
824bd0ce | 55 | }; |
a6ed3ea6 AS |
56 | union { |
57 | struct rcu_head rcu; | |
29ba732a | 58 | struct bpf_lru_node lru_node; |
a6ed3ea6 | 59 | }; |
6c905981 | 60 | u32 hash; |
0f8e4bd8 AS |
61 | char key[0] __aligned(8); |
62 | }; | |
63 | ||
29ba732a MKL |
64 | static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); |
65 | ||
66 | static bool htab_is_lru(const struct bpf_htab *htab) | |
67 | { | |
8f844938 MKL |
68 | return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || |
69 | htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; | |
70 | } | |
71 | ||
72 | static bool htab_is_percpu(const struct bpf_htab *htab) | |
73 | { | |
74 | return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || | |
75 | htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; | |
29ba732a MKL |
76 | } |
77 | ||
8c290e60 AS |
78 | static bool htab_is_prealloc(const struct bpf_htab *htab) |
79 | { | |
80 | return !(htab->map.map_flags & BPF_F_NO_PREALLOC); | |
81 | } | |
82 | ||
6c905981 AS |
83 | static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, |
84 | void __percpu *pptr) | |
85 | { | |
86 | *(void __percpu **)(l->key + key_size) = pptr; | |
87 | } | |
88 | ||
89 | static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) | |
90 | { | |
91 | return *(void __percpu **)(l->key + key_size); | |
92 | } | |
93 | ||
bcc6b1b7 MKL |
94 | static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) |
95 | { | |
96 | return *(void **)(l->key + roundup(map->key_size, 8)); | |
97 | } | |
98 | ||
6c905981 AS |
99 | static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) |
100 | { | |
101 | return (struct htab_elem *) (htab->elems + i * htab->elem_size); | |
102 | } | |
103 | ||
104 | static void htab_free_elems(struct bpf_htab *htab) | |
105 | { | |
106 | int i; | |
107 | ||
8f844938 | 108 | if (!htab_is_percpu(htab)) |
6c905981 AS |
109 | goto free_elems; |
110 | ||
111 | for (i = 0; i < htab->map.max_entries; i++) { | |
112 | void __percpu *pptr; | |
113 | ||
114 | pptr = htab_elem_get_ptr(get_htab_elem(htab, i), | |
115 | htab->map.key_size); | |
116 | free_percpu(pptr); | |
9147efcb | 117 | cond_resched(); |
6c905981 AS |
118 | } |
119 | free_elems: | |
d407bd25 | 120 | bpf_map_area_free(htab->elems); |
6c905981 AS |
121 | } |
122 | ||
29ba732a MKL |
123 | static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, |
124 | u32 hash) | |
125 | { | |
126 | struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); | |
127 | struct htab_elem *l; | |
128 | ||
129 | if (node) { | |
130 | l = container_of(node, struct htab_elem, lru_node); | |
131 | memcpy(l->key, key, htab->map.key_size); | |
132 | return l; | |
133 | } | |
134 | ||
135 | return NULL; | |
136 | } | |
137 | ||
138 | static int prealloc_init(struct bpf_htab *htab) | |
6c905981 | 139 | { |
8c290e60 | 140 | u32 num_entries = htab->map.max_entries; |
6c905981 AS |
141 | int err = -ENOMEM, i; |
142 | ||
8c290e60 AS |
143 | if (!htab_is_percpu(htab) && !htab_is_lru(htab)) |
144 | num_entries += num_possible_cpus(); | |
145 | ||
96eabe7a MKL |
146 | htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries, |
147 | htab->map.numa_node); | |
6c905981 AS |
148 | if (!htab->elems) |
149 | return -ENOMEM; | |
150 | ||
8f844938 | 151 | if (!htab_is_percpu(htab)) |
6c905981 AS |
152 | goto skip_percpu_elems; |
153 | ||
8c290e60 | 154 | for (i = 0; i < num_entries; i++) { |
6c905981 AS |
155 | u32 size = round_up(htab->map.value_size, 8); |
156 | void __percpu *pptr; | |
157 | ||
158 | pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN); | |
159 | if (!pptr) | |
160 | goto free_elems; | |
161 | htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, | |
162 | pptr); | |
9147efcb | 163 | cond_resched(); |
6c905981 AS |
164 | } |
165 | ||
166 | skip_percpu_elems: | |
29ba732a MKL |
167 | if (htab_is_lru(htab)) |
168 | err = bpf_lru_init(&htab->lru, | |
169 | htab->map.map_flags & BPF_F_NO_COMMON_LRU, | |
170 | offsetof(struct htab_elem, hash) - | |
171 | offsetof(struct htab_elem, lru_node), | |
172 | htab_lru_map_delete_node, | |
173 | htab); | |
174 | else | |
175 | err = pcpu_freelist_init(&htab->freelist); | |
176 | ||
6c905981 AS |
177 | if (err) |
178 | goto free_elems; | |
179 | ||
29ba732a MKL |
180 | if (htab_is_lru(htab)) |
181 | bpf_lru_populate(&htab->lru, htab->elems, | |
182 | offsetof(struct htab_elem, lru_node), | |
8c290e60 | 183 | htab->elem_size, num_entries); |
29ba732a | 184 | else |
9f691549 AS |
185 | pcpu_freelist_populate(&htab->freelist, |
186 | htab->elems + offsetof(struct htab_elem, fnode), | |
8c290e60 | 187 | htab->elem_size, num_entries); |
29ba732a | 188 | |
6c905981 AS |
189 | return 0; |
190 | ||
191 | free_elems: | |
192 | htab_free_elems(htab); | |
193 | return err; | |
194 | } | |
195 | ||
29ba732a MKL |
196 | static void prealloc_destroy(struct bpf_htab *htab) |
197 | { | |
198 | htab_free_elems(htab); | |
199 | ||
200 | if (htab_is_lru(htab)) | |
201 | bpf_lru_destroy(&htab->lru); | |
202 | else | |
203 | pcpu_freelist_destroy(&htab->freelist); | |
204 | } | |
205 | ||
a6ed3ea6 AS |
206 | static int alloc_extra_elems(struct bpf_htab *htab) |
207 | { | |
8c290e60 AS |
208 | struct htab_elem *__percpu *pptr, *l_new; |
209 | struct pcpu_freelist_node *l; | |
a6ed3ea6 AS |
210 | int cpu; |
211 | ||
8c290e60 AS |
212 | pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8, |
213 | GFP_USER | __GFP_NOWARN); | |
a6ed3ea6 AS |
214 | if (!pptr) |
215 | return -ENOMEM; | |
216 | ||
217 | for_each_possible_cpu(cpu) { | |
8c290e60 AS |
218 | l = pcpu_freelist_pop(&htab->freelist); |
219 | /* pop will succeed, since prealloc_init() | |
220 | * preallocated extra num_possible_cpus elements | |
221 | */ | |
222 | l_new = container_of(l, struct htab_elem, fnode); | |
223 | *per_cpu_ptr(pptr, cpu) = l_new; | |
a6ed3ea6 AS |
224 | } |
225 | htab->extra_elems = pptr; | |
226 | return 0; | |
227 | } | |
228 | ||
0f8e4bd8 AS |
229 | /* Called from syscall */ |
230 | static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |
231 | { | |
8f844938 MKL |
232 | bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || |
233 | attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); | |
234 | bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || | |
235 | attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); | |
29ba732a MKL |
236 | /* percpu_lru means each cpu has its own LRU list. |
237 | * it is different from BPF_MAP_TYPE_PERCPU_HASH where | |
238 | * the map's value itself is percpu. percpu_lru has | |
239 | * nothing to do with the map's value. | |
240 | */ | |
241 | bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); | |
242 | bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); | |
96eabe7a | 243 | int numa_node = bpf_map_attr_numa_node(attr); |
0f8e4bd8 AS |
244 | struct bpf_htab *htab; |
245 | int err, i; | |
824bd0ce | 246 | u64 cost; |
0f8e4bd8 | 247 | |
9f691549 AS |
248 | BUILD_BUG_ON(offsetof(struct htab_elem, htab) != |
249 | offsetof(struct htab_elem, hash_node.pprev)); | |
250 | BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != | |
251 | offsetof(struct htab_elem, hash_node.pprev)); | |
252 | ||
29ba732a MKL |
253 | if (lru && !capable(CAP_SYS_ADMIN)) |
254 | /* LRU implementation is much complicated than other | |
255 | * maps. Hence, limit to CAP_SYS_ADMIN for now. | |
256 | */ | |
257 | return ERR_PTR(-EPERM); | |
258 | ||
96eabe7a | 259 | if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK) |
6c905981 AS |
260 | /* reserved bits should not be used */ |
261 | return ERR_PTR(-EINVAL); | |
262 | ||
29ba732a MKL |
263 | if (!lru && percpu_lru) |
264 | return ERR_PTR(-EINVAL); | |
265 | ||
266 | if (lru && !prealloc) | |
267 | return ERR_PTR(-ENOTSUPP); | |
268 | ||
96eabe7a MKL |
269 | if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) |
270 | return ERR_PTR(-EINVAL); | |
271 | ||
0f8e4bd8 AS |
272 | htab = kzalloc(sizeof(*htab), GFP_USER); |
273 | if (!htab) | |
274 | return ERR_PTR(-ENOMEM); | |
275 | ||
276 | /* mandatory map attributes */ | |
824bd0ce | 277 | htab->map.map_type = attr->map_type; |
0f8e4bd8 AS |
278 | htab->map.key_size = attr->key_size; |
279 | htab->map.value_size = attr->value_size; | |
280 | htab->map.max_entries = attr->max_entries; | |
6c905981 | 281 | htab->map.map_flags = attr->map_flags; |
96eabe7a | 282 | htab->map.numa_node = numa_node; |
0f8e4bd8 AS |
283 | |
284 | /* check sanity of attributes. | |
285 | * value_size == 0 may be allowed in the future to use map as a set | |
286 | */ | |
287 | err = -EINVAL; | |
288 | if (htab->map.max_entries == 0 || htab->map.key_size == 0 || | |
289 | htab->map.value_size == 0) | |
290 | goto free_htab; | |
291 | ||
29ba732a MKL |
292 | if (percpu_lru) { |
293 | /* ensure each CPU's lru list has >=1 elements. | |
294 | * since we are at it, make each lru list has the same | |
295 | * number of elements. | |
296 | */ | |
297 | htab->map.max_entries = roundup(attr->max_entries, | |
298 | num_possible_cpus()); | |
299 | if (htab->map.max_entries < attr->max_entries) | |
300 | htab->map.max_entries = rounddown(attr->max_entries, | |
301 | num_possible_cpus()); | |
302 | } | |
303 | ||
0f8e4bd8 AS |
304 | /* hash table size must be power of 2 */ |
305 | htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); | |
306 | ||
307 | err = -E2BIG; | |
308 | if (htab->map.key_size > MAX_BPF_STACK) | |
309 | /* eBPF programs initialize keys on stack, so they cannot be | |
310 | * larger than max stack size | |
311 | */ | |
312 | goto free_htab; | |
313 | ||
7984c27c | 314 | if (htab->map.value_size >= KMALLOC_MAX_SIZE - |
01b3f521 AS |
315 | MAX_BPF_STACK - sizeof(struct htab_elem)) |
316 | /* if value_size is bigger, the user space won't be able to | |
317 | * access the elements via bpf syscall. This check also makes | |
318 | * sure that the elem_size doesn't overflow and it's | |
319 | * kmalloc-able later in htab_map_update_elem() | |
320 | */ | |
321 | goto free_htab; | |
322 | ||
323 | htab->elem_size = sizeof(struct htab_elem) + | |
824bd0ce AS |
324 | round_up(htab->map.key_size, 8); |
325 | if (percpu) | |
326 | htab->elem_size += sizeof(void *); | |
327 | else | |
6c905981 | 328 | htab->elem_size += round_up(htab->map.value_size, 8); |
01b3f521 | 329 | |
daaf427c AS |
330 | /* prevent zero size kmalloc and check for u32 overflow */ |
331 | if (htab->n_buckets == 0 || | |
688ecfe6 | 332 | htab->n_buckets > U32_MAX / sizeof(struct bucket)) |
daaf427c AS |
333 | goto free_htab; |
334 | ||
824bd0ce AS |
335 | cost = (u64) htab->n_buckets * sizeof(struct bucket) + |
336 | (u64) htab->elem_size * htab->map.max_entries; | |
337 | ||
338 | if (percpu) | |
339 | cost += (u64) round_up(htab->map.value_size, 8) * | |
340 | num_possible_cpus() * htab->map.max_entries; | |
a6ed3ea6 AS |
341 | else |
342 | cost += (u64) htab->elem_size * num_possible_cpus(); | |
824bd0ce AS |
343 | |
344 | if (cost >= U32_MAX - PAGE_SIZE) | |
01b3f521 AS |
345 | /* make sure page count doesn't overflow */ |
346 | goto free_htab; | |
347 | ||
824bd0ce | 348 | htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; |
01b3f521 | 349 | |
6c905981 AS |
350 | /* if map size is larger than memlock limit, reject it early */ |
351 | err = bpf_map_precharge_memlock(htab->map.pages); | |
352 | if (err) | |
353 | goto free_htab; | |
354 | ||
01b3f521 | 355 | err = -ENOMEM; |
d407bd25 | 356 | htab->buckets = bpf_map_area_alloc(htab->n_buckets * |
96eabe7a MKL |
357 | sizeof(struct bucket), |
358 | htab->map.numa_node); | |
d407bd25 DB |
359 | if (!htab->buckets) |
360 | goto free_htab; | |
0f8e4bd8 | 361 | |
688ecfe6 | 362 | for (i = 0; i < htab->n_buckets; i++) { |
4fe84359 | 363 | INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); |
688ecfe6 | 364 | raw_spin_lock_init(&htab->buckets[i].lock); |
365 | } | |
0f8e4bd8 | 366 | |
29ba732a MKL |
367 | if (prealloc) { |
368 | err = prealloc_init(htab); | |
6c905981 | 369 | if (err) |
8c290e60 AS |
370 | goto free_buckets; |
371 | ||
372 | if (!percpu && !lru) { | |
373 | /* lru itself can remove the least used element, so | |
374 | * there is no need for an extra elem during map_update. | |
375 | */ | |
376 | err = alloc_extra_elems(htab); | |
377 | if (err) | |
378 | goto free_prealloc; | |
379 | } | |
6c905981 | 380 | } |
0f8e4bd8 | 381 | |
0f8e4bd8 AS |
382 | return &htab->map; |
383 | ||
8c290e60 AS |
384 | free_prealloc: |
385 | prealloc_destroy(htab); | |
6c905981 | 386 | free_buckets: |
d407bd25 | 387 | bpf_map_area_free(htab->buckets); |
0f8e4bd8 AS |
388 | free_htab: |
389 | kfree(htab); | |
390 | return ERR_PTR(err); | |
391 | } | |
392 | ||
393 | static inline u32 htab_map_hash(const void *key, u32 key_len) | |
394 | { | |
395 | return jhash(key, key_len, 0); | |
396 | } | |
397 | ||
688ecfe6 | 398 | static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) |
0f8e4bd8 AS |
399 | { |
400 | return &htab->buckets[hash & (htab->n_buckets - 1)]; | |
401 | } | |
402 | ||
4fe84359 | 403 | static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) |
688ecfe6 | 404 | { |
405 | return &__select_bucket(htab, hash)->head; | |
406 | } | |
407 | ||
4fe84359 AS |
408 | /* this lookup function can only be called with bucket lock taken */ |
409 | static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, | |
0f8e4bd8 AS |
410 | void *key, u32 key_size) |
411 | { | |
4fe84359 | 412 | struct hlist_nulls_node *n; |
0f8e4bd8 AS |
413 | struct htab_elem *l; |
414 | ||
4fe84359 | 415 | hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) |
0f8e4bd8 AS |
416 | if (l->hash == hash && !memcmp(&l->key, key, key_size)) |
417 | return l; | |
418 | ||
419 | return NULL; | |
420 | } | |
421 | ||
4fe84359 AS |
422 | /* can be called without bucket lock. it will repeat the loop in |
423 | * the unlikely event when elements moved from one bucket into another | |
424 | * while link list is being walked | |
425 | */ | |
426 | static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, | |
427 | u32 hash, void *key, | |
428 | u32 key_size, u32 n_buckets) | |
429 | { | |
430 | struct hlist_nulls_node *n; | |
431 | struct htab_elem *l; | |
432 | ||
433 | again: | |
434 | hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) | |
435 | if (l->hash == hash && !memcmp(&l->key, key, key_size)) | |
436 | return l; | |
437 | ||
438 | if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) | |
439 | goto again; | |
440 | ||
441 | return NULL; | |
442 | } | |
443 | ||
9015d2f5 AS |
444 | /* Called from syscall or from eBPF program directly, so |
445 | * arguments have to match bpf_map_lookup_elem() exactly. | |
446 | * The return value is adjusted by BPF instructions | |
447 | * in htab_map_gen_lookup(). | |
448 | */ | |
824bd0ce | 449 | static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) |
0f8e4bd8 AS |
450 | { |
451 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 452 | struct hlist_nulls_head *head; |
0f8e4bd8 AS |
453 | struct htab_elem *l; |
454 | u32 hash, key_size; | |
455 | ||
456 | /* Must be called with rcu_read_lock. */ | |
457 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
458 | ||
459 | key_size = map->key_size; | |
460 | ||
461 | hash = htab_map_hash(key, key_size); | |
462 | ||
463 | head = select_bucket(htab, hash); | |
464 | ||
4fe84359 | 465 | l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); |
0f8e4bd8 | 466 | |
824bd0ce AS |
467 | return l; |
468 | } | |
469 | ||
470 | static void *htab_map_lookup_elem(struct bpf_map *map, void *key) | |
471 | { | |
472 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
473 | ||
0f8e4bd8 AS |
474 | if (l) |
475 | return l->key + round_up(map->key_size, 8); | |
476 | ||
477 | return NULL; | |
478 | } | |
479 | ||
9015d2f5 AS |
480 | /* inline bpf_map_lookup_elem() call. |
481 | * Instead of: | |
482 | * bpf_prog | |
483 | * bpf_map_lookup_elem | |
484 | * map->ops->map_lookup_elem | |
485 | * htab_map_lookup_elem | |
486 | * __htab_map_lookup_elem | |
487 | * do: | |
488 | * bpf_prog | |
489 | * __htab_map_lookup_elem | |
490 | */ | |
491 | static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) | |
492 | { | |
493 | struct bpf_insn *insn = insn_buf; | |
494 | const int ret = BPF_REG_0; | |
495 | ||
496 | *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); | |
497 | *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); | |
498 | *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, | |
499 | offsetof(struct htab_elem, key) + | |
500 | round_up(map->key_size, 8)); | |
501 | return insn - insn_buf; | |
502 | } | |
503 | ||
29ba732a MKL |
504 | static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) |
505 | { | |
506 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
507 | ||
508 | if (l) { | |
509 | bpf_lru_node_set_ref(&l->lru_node); | |
510 | return l->key + round_up(map->key_size, 8); | |
511 | } | |
512 | ||
513 | return NULL; | |
514 | } | |
515 | ||
cc555421 MKL |
516 | static u32 htab_lru_map_gen_lookup(struct bpf_map *map, |
517 | struct bpf_insn *insn_buf) | |
518 | { | |
519 | struct bpf_insn *insn = insn_buf; | |
520 | const int ret = BPF_REG_0; | |
bb9b9f88 | 521 | const int ref_reg = BPF_REG_1; |
cc555421 MKL |
522 | |
523 | *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); | |
bb9b9f88 MKL |
524 | *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4); |
525 | *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret, | |
526 | offsetof(struct htab_elem, lru_node) + | |
527 | offsetof(struct bpf_lru_node, ref)); | |
528 | *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1); | |
cc555421 MKL |
529 | *insn++ = BPF_ST_MEM(BPF_B, ret, |
530 | offsetof(struct htab_elem, lru_node) + | |
531 | offsetof(struct bpf_lru_node, ref), | |
532 | 1); | |
533 | *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, | |
534 | offsetof(struct htab_elem, key) + | |
535 | round_up(map->key_size, 8)); | |
536 | return insn - insn_buf; | |
537 | } | |
538 | ||
29ba732a MKL |
539 | /* It is called from the bpf_lru_list when the LRU needs to delete |
540 | * older elements from the htab. | |
541 | */ | |
542 | static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) | |
543 | { | |
544 | struct bpf_htab *htab = (struct bpf_htab *)arg; | |
4fe84359 AS |
545 | struct htab_elem *l = NULL, *tgt_l; |
546 | struct hlist_nulls_head *head; | |
547 | struct hlist_nulls_node *n; | |
29ba732a MKL |
548 | unsigned long flags; |
549 | struct bucket *b; | |
550 | ||
551 | tgt_l = container_of(node, struct htab_elem, lru_node); | |
552 | b = __select_bucket(htab, tgt_l->hash); | |
553 | head = &b->head; | |
554 | ||
555 | raw_spin_lock_irqsave(&b->lock, flags); | |
556 | ||
4fe84359 | 557 | hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) |
29ba732a | 558 | if (l == tgt_l) { |
4fe84359 | 559 | hlist_nulls_del_rcu(&l->hash_node); |
29ba732a MKL |
560 | break; |
561 | } | |
562 | ||
563 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
564 | ||
565 | return l == tgt_l; | |
566 | } | |
567 | ||
0f8e4bd8 AS |
568 | /* Called from syscall */ |
569 | static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
570 | { | |
571 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 572 | struct hlist_nulls_head *head; |
0f8e4bd8 AS |
573 | struct htab_elem *l, *next_l; |
574 | u32 hash, key_size; | |
8fe45924 | 575 | int i = 0; |
0f8e4bd8 AS |
576 | |
577 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
578 | ||
579 | key_size = map->key_size; | |
580 | ||
8fe45924 TQ |
581 | if (!key) |
582 | goto find_first_elem; | |
583 | ||
0f8e4bd8 AS |
584 | hash = htab_map_hash(key, key_size); |
585 | ||
586 | head = select_bucket(htab, hash); | |
587 | ||
588 | /* lookup the key */ | |
4fe84359 | 589 | l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); |
0f8e4bd8 | 590 | |
8fe45924 | 591 | if (!l) |
0f8e4bd8 | 592 | goto find_first_elem; |
0f8e4bd8 AS |
593 | |
594 | /* key was found, get next key in the same bucket */ | |
4fe84359 | 595 | next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), |
0f8e4bd8 AS |
596 | struct htab_elem, hash_node); |
597 | ||
598 | if (next_l) { | |
599 | /* if next elem in this hash list is non-zero, just return it */ | |
600 | memcpy(next_key, next_l->key, key_size); | |
601 | return 0; | |
602 | } | |
603 | ||
604 | /* no more elements in this hash list, go to the next bucket */ | |
605 | i = hash & (htab->n_buckets - 1); | |
606 | i++; | |
607 | ||
608 | find_first_elem: | |
609 | /* iterate over buckets */ | |
610 | for (; i < htab->n_buckets; i++) { | |
611 | head = select_bucket(htab, i); | |
612 | ||
613 | /* pick first element in the bucket */ | |
4fe84359 | 614 | next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)), |
0f8e4bd8 AS |
615 | struct htab_elem, hash_node); |
616 | if (next_l) { | |
617 | /* if it's not empty, just return it */ | |
618 | memcpy(next_key, next_l->key, key_size); | |
619 | return 0; | |
620 | } | |
621 | } | |
622 | ||
6c905981 | 623 | /* iterated over all buckets and all elements */ |
0f8e4bd8 AS |
624 | return -ENOENT; |
625 | } | |
626 | ||
6c905981 | 627 | static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) |
824bd0ce | 628 | { |
6c905981 AS |
629 | if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) |
630 | free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); | |
824bd0ce AS |
631 | kfree(l); |
632 | } | |
633 | ||
6c905981 | 634 | static void htab_elem_free_rcu(struct rcu_head *head) |
824bd0ce AS |
635 | { |
636 | struct htab_elem *l = container_of(head, struct htab_elem, rcu); | |
6c905981 | 637 | struct bpf_htab *htab = l->htab; |
824bd0ce | 638 | |
6c905981 AS |
639 | /* must increment bpf_prog_active to avoid kprobe+bpf triggering while |
640 | * we're calling kfree, otherwise deadlock is possible if kprobes | |
641 | * are placed somewhere inside of slub | |
642 | */ | |
643 | preempt_disable(); | |
644 | __this_cpu_inc(bpf_prog_active); | |
645 | htab_elem_free(htab, l); | |
646 | __this_cpu_dec(bpf_prog_active); | |
647 | preempt_enable(); | |
824bd0ce AS |
648 | } |
649 | ||
6c905981 | 650 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) |
824bd0ce | 651 | { |
bcc6b1b7 MKL |
652 | struct bpf_map *map = &htab->map; |
653 | ||
654 | if (map->ops->map_fd_put_ptr) { | |
655 | void *ptr = fd_htab_map_get_ptr(map, l); | |
656 | ||
657 | map->ops->map_fd_put_ptr(ptr); | |
658 | } | |
659 | ||
8c290e60 | 660 | if (htab_is_prealloc(htab)) { |
c5581f06 | 661 | __pcpu_freelist_push(&htab->freelist, &l->fnode); |
824bd0ce | 662 | } else { |
6c905981 AS |
663 | atomic_dec(&htab->count); |
664 | l->htab = htab; | |
665 | call_rcu(&l->rcu, htab_elem_free_rcu); | |
824bd0ce AS |
666 | } |
667 | } | |
668 | ||
fd91de7b MKL |
669 | static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, |
670 | void *value, bool onallcpus) | |
671 | { | |
672 | if (!onallcpus) { | |
673 | /* copy true value_size bytes */ | |
674 | memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); | |
675 | } else { | |
676 | u32 size = round_up(htab->map.value_size, 8); | |
677 | int off = 0, cpu; | |
678 | ||
679 | for_each_possible_cpu(cpu) { | |
680 | bpf_long_memcpy(per_cpu_ptr(pptr, cpu), | |
681 | value + off, size); | |
682 | off += size; | |
683 | } | |
684 | } | |
685 | } | |
686 | ||
cd36c3a2 DB |
687 | static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) |
688 | { | |
689 | return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && | |
690 | BITS_PER_LONG == 64; | |
691 | } | |
692 | ||
693 | static u32 htab_size_value(const struct bpf_htab *htab, bool percpu) | |
694 | { | |
695 | u32 size = htab->map.value_size; | |
696 | ||
697 | if (percpu || fd_htab_map_needs_adjust(htab)) | |
698 | size = round_up(size, 8); | |
699 | return size; | |
700 | } | |
701 | ||
824bd0ce AS |
702 | static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, |
703 | void *value, u32 key_size, u32 hash, | |
a6ed3ea6 | 704 | bool percpu, bool onallcpus, |
8c290e60 | 705 | struct htab_elem *old_elem) |
824bd0ce | 706 | { |
cd36c3a2 | 707 | u32 size = htab_size_value(htab, percpu); |
8c290e60 AS |
708 | bool prealloc = htab_is_prealloc(htab); |
709 | struct htab_elem *l_new, **pl_new; | |
824bd0ce AS |
710 | void __percpu *pptr; |
711 | ||
6c905981 | 712 | if (prealloc) { |
8c290e60 AS |
713 | if (old_elem) { |
714 | /* if we're updating the existing element, | |
715 | * use per-cpu extra elems to avoid freelist_pop/push | |
716 | */ | |
717 | pl_new = this_cpu_ptr(htab->extra_elems); | |
718 | l_new = *pl_new; | |
719 | *pl_new = old_elem; | |
720 | } else { | |
721 | struct pcpu_freelist_node *l; | |
9f691549 | 722 | |
c5581f06 | 723 | l = __pcpu_freelist_pop(&htab->freelist); |
8c290e60 AS |
724 | if (!l) |
725 | return ERR_PTR(-E2BIG); | |
9f691549 | 726 | l_new = container_of(l, struct htab_elem, fnode); |
6c905981 | 727 | } |
a6ed3ea6 | 728 | } else { |
8c290e60 AS |
729 | if (atomic_inc_return(&htab->count) > htab->map.max_entries) |
730 | if (!old_elem) { | |
731 | /* when map is full and update() is replacing | |
732 | * old element, it's ok to allocate, since | |
733 | * old element will be freed immediately. | |
734 | * Otherwise return an error | |
735 | */ | |
f005287a MV |
736 | l_new = ERR_PTR(-E2BIG); |
737 | goto dec_count; | |
8c290e60 | 738 | } |
96eabe7a MKL |
739 | l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, |
740 | htab->map.numa_node); | |
f005287a MV |
741 | if (!l_new) { |
742 | l_new = ERR_PTR(-ENOMEM); | |
743 | goto dec_count; | |
744 | } | |
6c905981 | 745 | } |
824bd0ce AS |
746 | |
747 | memcpy(l_new->key, key, key_size); | |
748 | if (percpu) { | |
6c905981 AS |
749 | if (prealloc) { |
750 | pptr = htab_elem_get_ptr(l_new, key_size); | |
751 | } else { | |
752 | /* alloc_percpu zero-fills */ | |
753 | pptr = __alloc_percpu_gfp(size, 8, | |
754 | GFP_ATOMIC | __GFP_NOWARN); | |
755 | if (!pptr) { | |
756 | kfree(l_new); | |
f005287a MV |
757 | l_new = ERR_PTR(-ENOMEM); |
758 | goto dec_count; | |
6c905981 | 759 | } |
824bd0ce AS |
760 | } |
761 | ||
fd91de7b | 762 | pcpu_copy_value(htab, pptr, value, onallcpus); |
15a07b33 | 763 | |
6c905981 AS |
764 | if (!prealloc) |
765 | htab_elem_set_ptr(l_new, key_size, pptr); | |
824bd0ce AS |
766 | } else { |
767 | memcpy(l_new->key + round_up(key_size, 8), value, size); | |
768 | } | |
769 | ||
770 | l_new->hash = hash; | |
771 | return l_new; | |
f005287a MV |
772 | dec_count: |
773 | atomic_dec(&htab->count); | |
774 | return l_new; | |
824bd0ce AS |
775 | } |
776 | ||
777 | static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, | |
778 | u64 map_flags) | |
779 | { | |
824bd0ce AS |
780 | if (l_old && map_flags == BPF_NOEXIST) |
781 | /* elem already exists */ | |
782 | return -EEXIST; | |
783 | ||
784 | if (!l_old && map_flags == BPF_EXIST) | |
785 | /* elem doesn't exist, cannot update it */ | |
786 | return -ENOENT; | |
787 | ||
788 | return 0; | |
789 | } | |
790 | ||
0f8e4bd8 AS |
791 | /* Called from syscall or from eBPF program */ |
792 | static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, | |
793 | u64 map_flags) | |
794 | { | |
795 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
824bd0ce | 796 | struct htab_elem *l_new = NULL, *l_old; |
4fe84359 | 797 | struct hlist_nulls_head *head; |
0f8e4bd8 | 798 | unsigned long flags; |
824bd0ce AS |
799 | struct bucket *b; |
800 | u32 key_size, hash; | |
0f8e4bd8 AS |
801 | int ret; |
802 | ||
824bd0ce | 803 | if (unlikely(map_flags > BPF_EXIST)) |
0f8e4bd8 AS |
804 | /* unknown flags */ |
805 | return -EINVAL; | |
806 | ||
807 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
808 | ||
0f8e4bd8 AS |
809 | key_size = map->key_size; |
810 | ||
824bd0ce AS |
811 | hash = htab_map_hash(key, key_size); |
812 | ||
824bd0ce | 813 | b = __select_bucket(htab, hash); |
688ecfe6 | 814 | head = &b->head; |
0f8e4bd8 AS |
815 | |
816 | /* bpf_map_update_elem() can be called in_irq() */ | |
688ecfe6 | 817 | raw_spin_lock_irqsave(&b->lock, flags); |
0f8e4bd8 | 818 | |
824bd0ce | 819 | l_old = lookup_elem_raw(head, hash, key, key_size); |
0f8e4bd8 | 820 | |
824bd0ce AS |
821 | ret = check_flags(htab, l_old, map_flags); |
822 | if (ret) | |
0f8e4bd8 | 823 | goto err; |
0f8e4bd8 | 824 | |
a6ed3ea6 | 825 | l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, |
8c290e60 | 826 | l_old); |
6c905981 AS |
827 | if (IS_ERR(l_new)) { |
828 | /* all pre-allocated elements are in use or memory exhausted */ | |
829 | ret = PTR_ERR(l_new); | |
830 | goto err; | |
831 | } | |
832 | ||
824bd0ce AS |
833 | /* add new element to the head of the list, so that |
834 | * concurrent search will find it before old elem | |
0f8e4bd8 | 835 | */ |
4fe84359 | 836 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
0f8e4bd8 | 837 | if (l_old) { |
4fe84359 | 838 | hlist_nulls_del_rcu(&l_old->hash_node); |
8c290e60 AS |
839 | if (!htab_is_prealloc(htab)) |
840 | free_htab_elem(htab, l_old); | |
0f8e4bd8 | 841 | } |
6c905981 | 842 | ret = 0; |
0f8e4bd8 | 843 | err: |
688ecfe6 | 844 | raw_spin_unlock_irqrestore(&b->lock, flags); |
0f8e4bd8 AS |
845 | return ret; |
846 | } | |
847 | ||
29ba732a MKL |
848 | static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, |
849 | u64 map_flags) | |
850 | { | |
851 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
852 | struct htab_elem *l_new, *l_old = NULL; | |
4fe84359 | 853 | struct hlist_nulls_head *head; |
29ba732a MKL |
854 | unsigned long flags; |
855 | struct bucket *b; | |
856 | u32 key_size, hash; | |
857 | int ret; | |
858 | ||
859 | if (unlikely(map_flags > BPF_EXIST)) | |
860 | /* unknown flags */ | |
861 | return -EINVAL; | |
862 | ||
863 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
864 | ||
865 | key_size = map->key_size; | |
866 | ||
867 | hash = htab_map_hash(key, key_size); | |
868 | ||
869 | b = __select_bucket(htab, hash); | |
870 | head = &b->head; | |
871 | ||
872 | /* For LRU, we need to alloc before taking bucket's | |
873 | * spinlock because getting free nodes from LRU may need | |
874 | * to remove older elements from htab and this removal | |
875 | * operation will need a bucket lock. | |
876 | */ | |
877 | l_new = prealloc_lru_pop(htab, key, hash); | |
878 | if (!l_new) | |
879 | return -ENOMEM; | |
880 | memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); | |
881 | ||
882 | /* bpf_map_update_elem() can be called in_irq() */ | |
883 | raw_spin_lock_irqsave(&b->lock, flags); | |
884 | ||
885 | l_old = lookup_elem_raw(head, hash, key, key_size); | |
886 | ||
887 | ret = check_flags(htab, l_old, map_flags); | |
888 | if (ret) | |
889 | goto err; | |
890 | ||
891 | /* add new element to the head of the list, so that | |
892 | * concurrent search will find it before old elem | |
893 | */ | |
4fe84359 | 894 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
29ba732a MKL |
895 | if (l_old) { |
896 | bpf_lru_node_set_ref(&l_new->lru_node); | |
4fe84359 | 897 | hlist_nulls_del_rcu(&l_old->hash_node); |
29ba732a MKL |
898 | } |
899 | ret = 0; | |
900 | ||
901 | err: | |
902 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
903 | ||
904 | if (ret) | |
905 | bpf_lru_push_free(&htab->lru, &l_new->lru_node); | |
906 | else if (l_old) | |
907 | bpf_lru_push_free(&htab->lru, &l_old->lru_node); | |
908 | ||
909 | return ret; | |
910 | } | |
911 | ||
15a07b33 AS |
912 | static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, |
913 | void *value, u64 map_flags, | |
914 | bool onallcpus) | |
824bd0ce AS |
915 | { |
916 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
917 | struct htab_elem *l_new = NULL, *l_old; | |
4fe84359 | 918 | struct hlist_nulls_head *head; |
824bd0ce AS |
919 | unsigned long flags; |
920 | struct bucket *b; | |
921 | u32 key_size, hash; | |
922 | int ret; | |
923 | ||
924 | if (unlikely(map_flags > BPF_EXIST)) | |
925 | /* unknown flags */ | |
926 | return -EINVAL; | |
927 | ||
928 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
929 | ||
930 | key_size = map->key_size; | |
931 | ||
932 | hash = htab_map_hash(key, key_size); | |
933 | ||
934 | b = __select_bucket(htab, hash); | |
935 | head = &b->head; | |
936 | ||
937 | /* bpf_map_update_elem() can be called in_irq() */ | |
938 | raw_spin_lock_irqsave(&b->lock, flags); | |
939 | ||
940 | l_old = lookup_elem_raw(head, hash, key, key_size); | |
941 | ||
942 | ret = check_flags(htab, l_old, map_flags); | |
943 | if (ret) | |
944 | goto err; | |
945 | ||
946 | if (l_old) { | |
947 | /* per-cpu hash map can update value in-place */ | |
fd91de7b MKL |
948 | pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), |
949 | value, onallcpus); | |
824bd0ce AS |
950 | } else { |
951 | l_new = alloc_htab_elem(htab, key, value, key_size, | |
8c290e60 | 952 | hash, true, onallcpus, NULL); |
6c905981 AS |
953 | if (IS_ERR(l_new)) { |
954 | ret = PTR_ERR(l_new); | |
824bd0ce AS |
955 | goto err; |
956 | } | |
4fe84359 | 957 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
824bd0ce AS |
958 | } |
959 | ret = 0; | |
960 | err: | |
961 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
962 | return ret; | |
963 | } | |
964 | ||
8f844938 MKL |
965 | static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, |
966 | void *value, u64 map_flags, | |
967 | bool onallcpus) | |
968 | { | |
969 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
970 | struct htab_elem *l_new = NULL, *l_old; | |
4fe84359 | 971 | struct hlist_nulls_head *head; |
8f844938 MKL |
972 | unsigned long flags; |
973 | struct bucket *b; | |
974 | u32 key_size, hash; | |
975 | int ret; | |
976 | ||
977 | if (unlikely(map_flags > BPF_EXIST)) | |
978 | /* unknown flags */ | |
979 | return -EINVAL; | |
980 | ||
981 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
982 | ||
983 | key_size = map->key_size; | |
984 | ||
985 | hash = htab_map_hash(key, key_size); | |
986 | ||
987 | b = __select_bucket(htab, hash); | |
988 | head = &b->head; | |
989 | ||
990 | /* For LRU, we need to alloc before taking bucket's | |
991 | * spinlock because LRU's elem alloc may need | |
992 | * to remove older elem from htab and this removal | |
993 | * operation will need a bucket lock. | |
994 | */ | |
995 | if (map_flags != BPF_EXIST) { | |
996 | l_new = prealloc_lru_pop(htab, key, hash); | |
997 | if (!l_new) | |
998 | return -ENOMEM; | |
999 | } | |
1000 | ||
1001 | /* bpf_map_update_elem() can be called in_irq() */ | |
1002 | raw_spin_lock_irqsave(&b->lock, flags); | |
1003 | ||
1004 | l_old = lookup_elem_raw(head, hash, key, key_size); | |
1005 | ||
1006 | ret = check_flags(htab, l_old, map_flags); | |
1007 | if (ret) | |
1008 | goto err; | |
1009 | ||
1010 | if (l_old) { | |
1011 | bpf_lru_node_set_ref(&l_old->lru_node); | |
1012 | ||
1013 | /* per-cpu hash map can update value in-place */ | |
1014 | pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), | |
1015 | value, onallcpus); | |
1016 | } else { | |
1017 | pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), | |
1018 | value, onallcpus); | |
4fe84359 | 1019 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
8f844938 MKL |
1020 | l_new = NULL; |
1021 | } | |
1022 | ret = 0; | |
1023 | err: | |
1024 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
1025 | if (l_new) | |
1026 | bpf_lru_push_free(&htab->lru, &l_new->lru_node); | |
1027 | return ret; | |
1028 | } | |
1029 | ||
15a07b33 AS |
1030 | static int htab_percpu_map_update_elem(struct bpf_map *map, void *key, |
1031 | void *value, u64 map_flags) | |
1032 | { | |
1033 | return __htab_percpu_map_update_elem(map, key, value, map_flags, false); | |
1034 | } | |
1035 | ||
8f844938 MKL |
1036 | static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, |
1037 | void *value, u64 map_flags) | |
1038 | { | |
1039 | return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, | |
1040 | false); | |
1041 | } | |
1042 | ||
0f8e4bd8 AS |
1043 | /* Called from syscall or from eBPF program */ |
1044 | static int htab_map_delete_elem(struct bpf_map *map, void *key) | |
1045 | { | |
1046 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 1047 | struct hlist_nulls_head *head; |
688ecfe6 | 1048 | struct bucket *b; |
0f8e4bd8 AS |
1049 | struct htab_elem *l; |
1050 | unsigned long flags; | |
1051 | u32 hash, key_size; | |
1052 | int ret = -ENOENT; | |
1053 | ||
1054 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
1055 | ||
1056 | key_size = map->key_size; | |
1057 | ||
1058 | hash = htab_map_hash(key, key_size); | |
688ecfe6 | 1059 | b = __select_bucket(htab, hash); |
1060 | head = &b->head; | |
0f8e4bd8 | 1061 | |
688ecfe6 | 1062 | raw_spin_lock_irqsave(&b->lock, flags); |
0f8e4bd8 | 1063 | |
0f8e4bd8 AS |
1064 | l = lookup_elem_raw(head, hash, key, key_size); |
1065 | ||
1066 | if (l) { | |
4fe84359 | 1067 | hlist_nulls_del_rcu(&l->hash_node); |
6c905981 | 1068 | free_htab_elem(htab, l); |
0f8e4bd8 AS |
1069 | ret = 0; |
1070 | } | |
1071 | ||
688ecfe6 | 1072 | raw_spin_unlock_irqrestore(&b->lock, flags); |
0f8e4bd8 AS |
1073 | return ret; |
1074 | } | |
1075 | ||
29ba732a MKL |
1076 | static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) |
1077 | { | |
1078 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
4fe84359 | 1079 | struct hlist_nulls_head *head; |
29ba732a MKL |
1080 | struct bucket *b; |
1081 | struct htab_elem *l; | |
1082 | unsigned long flags; | |
1083 | u32 hash, key_size; | |
1084 | int ret = -ENOENT; | |
1085 | ||
1086 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
1087 | ||
1088 | key_size = map->key_size; | |
1089 | ||
1090 | hash = htab_map_hash(key, key_size); | |
1091 | b = __select_bucket(htab, hash); | |
1092 | head = &b->head; | |
1093 | ||
1094 | raw_spin_lock_irqsave(&b->lock, flags); | |
1095 | ||
1096 | l = lookup_elem_raw(head, hash, key, key_size); | |
1097 | ||
1098 | if (l) { | |
4fe84359 | 1099 | hlist_nulls_del_rcu(&l->hash_node); |
29ba732a MKL |
1100 | ret = 0; |
1101 | } | |
1102 | ||
1103 | raw_spin_unlock_irqrestore(&b->lock, flags); | |
1104 | if (l) | |
1105 | bpf_lru_push_free(&htab->lru, &l->lru_node); | |
1106 | return ret; | |
1107 | } | |
1108 | ||
0f8e4bd8 AS |
1109 | static void delete_all_elements(struct bpf_htab *htab) |
1110 | { | |
1111 | int i; | |
1112 | ||
1113 | for (i = 0; i < htab->n_buckets; i++) { | |
4fe84359 AS |
1114 | struct hlist_nulls_head *head = select_bucket(htab, i); |
1115 | struct hlist_nulls_node *n; | |
0f8e4bd8 AS |
1116 | struct htab_elem *l; |
1117 | ||
4fe84359 AS |
1118 | hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { |
1119 | hlist_nulls_del_rcu(&l->hash_node); | |
8c290e60 | 1120 | htab_elem_free(htab, l); |
0f8e4bd8 AS |
1121 | } |
1122 | } | |
1123 | } | |
bcc6b1b7 | 1124 | |
0f8e4bd8 AS |
1125 | /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ |
1126 | static void htab_map_free(struct bpf_map *map) | |
1127 | { | |
1128 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
1129 | ||
1130 | /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
1131 | * so the programs (can be more than one that used this map) were | |
1132 | * disconnected from events. Wait for outstanding critical sections in | |
1133 | * these programs to complete | |
1134 | */ | |
1135 | synchronize_rcu(); | |
1136 | ||
6c905981 AS |
1137 | /* some of free_htab_elem() callbacks for elements of this map may |
1138 | * not have executed. Wait for them. | |
0f8e4bd8 | 1139 | */ |
6c905981 | 1140 | rcu_barrier(); |
8c290e60 | 1141 | if (!htab_is_prealloc(htab)) |
6c905981 | 1142 | delete_all_elements(htab); |
29ba732a MKL |
1143 | else |
1144 | prealloc_destroy(htab); | |
1145 | ||
a6ed3ea6 | 1146 | free_percpu(htab->extra_elems); |
d407bd25 | 1147 | bpf_map_area_free(htab->buckets); |
0f8e4bd8 AS |
1148 | kfree(htab); |
1149 | } | |
1150 | ||
40077e0c | 1151 | const struct bpf_map_ops htab_map_ops = { |
0f8e4bd8 AS |
1152 | .map_alloc = htab_map_alloc, |
1153 | .map_free = htab_map_free, | |
1154 | .map_get_next_key = htab_map_get_next_key, | |
1155 | .map_lookup_elem = htab_map_lookup_elem, | |
1156 | .map_update_elem = htab_map_update_elem, | |
1157 | .map_delete_elem = htab_map_delete_elem, | |
9015d2f5 | 1158 | .map_gen_lookup = htab_map_gen_lookup, |
0f8e4bd8 AS |
1159 | }; |
1160 | ||
40077e0c | 1161 | const struct bpf_map_ops htab_lru_map_ops = { |
29ba732a MKL |
1162 | .map_alloc = htab_map_alloc, |
1163 | .map_free = htab_map_free, | |
1164 | .map_get_next_key = htab_map_get_next_key, | |
1165 | .map_lookup_elem = htab_lru_map_lookup_elem, | |
1166 | .map_update_elem = htab_lru_map_update_elem, | |
1167 | .map_delete_elem = htab_lru_map_delete_elem, | |
cc555421 | 1168 | .map_gen_lookup = htab_lru_map_gen_lookup, |
29ba732a MKL |
1169 | }; |
1170 | ||
824bd0ce AS |
1171 | /* Called from eBPF program */ |
1172 | static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) | |
1173 | { | |
1174 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
1175 | ||
1176 | if (l) | |
1177 | return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); | |
1178 | else | |
1179 | return NULL; | |
1180 | } | |
1181 | ||
8f844938 MKL |
1182 | static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) |
1183 | { | |
1184 | struct htab_elem *l = __htab_map_lookup_elem(map, key); | |
1185 | ||
1186 | if (l) { | |
1187 | bpf_lru_node_set_ref(&l->lru_node); | |
1188 | return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); | |
1189 | } | |
1190 | ||
1191 | return NULL; | |
1192 | } | |
1193 | ||
15a07b33 AS |
1194 | int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) |
1195 | { | |
8f844938 | 1196 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); |
15a07b33 AS |
1197 | struct htab_elem *l; |
1198 | void __percpu *pptr; | |
1199 | int ret = -ENOENT; | |
1200 | int cpu, off = 0; | |
1201 | u32 size; | |
1202 | ||
1203 | /* per_cpu areas are zero-filled and bpf programs can only | |
1204 | * access 'value_size' of them, so copying rounded areas | |
1205 | * will not leak any kernel data | |
1206 | */ | |
1207 | size = round_up(map->value_size, 8); | |
1208 | rcu_read_lock(); | |
1209 | l = __htab_map_lookup_elem(map, key); | |
1210 | if (!l) | |
1211 | goto out; | |
8f844938 MKL |
1212 | if (htab_is_lru(htab)) |
1213 | bpf_lru_node_set_ref(&l->lru_node); | |
15a07b33 AS |
1214 | pptr = htab_elem_get_ptr(l, map->key_size); |
1215 | for_each_possible_cpu(cpu) { | |
1216 | bpf_long_memcpy(value + off, | |
1217 | per_cpu_ptr(pptr, cpu), size); | |
1218 | off += size; | |
1219 | } | |
1220 | ret = 0; | |
1221 | out: | |
1222 | rcu_read_unlock(); | |
1223 | return ret; | |
1224 | } | |
1225 | ||
1226 | int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, | |
1227 | u64 map_flags) | |
1228 | { | |
8f844938 | 1229 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); |
6bbd9a05 SL |
1230 | int ret; |
1231 | ||
1232 | rcu_read_lock(); | |
8f844938 MKL |
1233 | if (htab_is_lru(htab)) |
1234 | ret = __htab_lru_percpu_map_update_elem(map, key, value, | |
1235 | map_flags, true); | |
1236 | else | |
1237 | ret = __htab_percpu_map_update_elem(map, key, value, map_flags, | |
1238 | true); | |
6bbd9a05 SL |
1239 | rcu_read_unlock(); |
1240 | ||
1241 | return ret; | |
15a07b33 AS |
1242 | } |
1243 | ||
40077e0c | 1244 | const struct bpf_map_ops htab_percpu_map_ops = { |
824bd0ce AS |
1245 | .map_alloc = htab_map_alloc, |
1246 | .map_free = htab_map_free, | |
1247 | .map_get_next_key = htab_map_get_next_key, | |
1248 | .map_lookup_elem = htab_percpu_map_lookup_elem, | |
1249 | .map_update_elem = htab_percpu_map_update_elem, | |
1250 | .map_delete_elem = htab_map_delete_elem, | |
1251 | }; | |
1252 | ||
40077e0c | 1253 | const struct bpf_map_ops htab_lru_percpu_map_ops = { |
8f844938 MKL |
1254 | .map_alloc = htab_map_alloc, |
1255 | .map_free = htab_map_free, | |
1256 | .map_get_next_key = htab_map_get_next_key, | |
1257 | .map_lookup_elem = htab_lru_percpu_map_lookup_elem, | |
1258 | .map_update_elem = htab_lru_percpu_map_update_elem, | |
1259 | .map_delete_elem = htab_lru_map_delete_elem, | |
1260 | }; | |
1261 | ||
bcc6b1b7 MKL |
1262 | static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr) |
1263 | { | |
bcc6b1b7 MKL |
1264 | if (attr->value_size != sizeof(u32)) |
1265 | return ERR_PTR(-EINVAL); | |
cd36c3a2 | 1266 | return htab_map_alloc(attr); |
bcc6b1b7 MKL |
1267 | } |
1268 | ||
1269 | static void fd_htab_map_free(struct bpf_map *map) | |
1270 | { | |
1271 | struct bpf_htab *htab = container_of(map, struct bpf_htab, map); | |
1272 | struct hlist_nulls_node *n; | |
1273 | struct hlist_nulls_head *head; | |
1274 | struct htab_elem *l; | |
1275 | int i; | |
1276 | ||
1277 | for (i = 0; i < htab->n_buckets; i++) { | |
1278 | head = select_bucket(htab, i); | |
1279 | ||
1280 | hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { | |
1281 | void *ptr = fd_htab_map_get_ptr(map, l); | |
1282 | ||
1283 | map->ops->map_fd_put_ptr(ptr); | |
1284 | } | |
1285 | } | |
1286 | ||
1287 | htab_map_free(map); | |
1288 | } | |
1289 | ||
14dc6f04 MKL |
1290 | /* only called from syscall */ |
1291 | int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) | |
1292 | { | |
1293 | void **ptr; | |
1294 | int ret = 0; | |
1295 | ||
1296 | if (!map->ops->map_fd_sys_lookup_elem) | |
1297 | return -ENOTSUPP; | |
1298 | ||
1299 | rcu_read_lock(); | |
1300 | ptr = htab_map_lookup_elem(map, key); | |
1301 | if (ptr) | |
1302 | *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); | |
1303 | else | |
1304 | ret = -ENOENT; | |
1305 | rcu_read_unlock(); | |
1306 | ||
1307 | return ret; | |
1308 | } | |
1309 | ||
bcc6b1b7 MKL |
1310 | /* only called from syscall */ |
1311 | int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, | |
1312 | void *key, void *value, u64 map_flags) | |
1313 | { | |
1314 | void *ptr; | |
1315 | int ret; | |
1316 | u32 ufd = *(u32 *)value; | |
1317 | ||
1318 | ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); | |
1319 | if (IS_ERR(ptr)) | |
1320 | return PTR_ERR(ptr); | |
1321 | ||
1322 | ret = htab_map_update_elem(map, key, &ptr, map_flags); | |
1323 | if (ret) | |
1324 | map->ops->map_fd_put_ptr(ptr); | |
1325 | ||
1326 | return ret; | |
1327 | } | |
1328 | ||
1329 | static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) | |
1330 | { | |
1331 | struct bpf_map *map, *inner_map_meta; | |
1332 | ||
1333 | inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); | |
1334 | if (IS_ERR(inner_map_meta)) | |
1335 | return inner_map_meta; | |
1336 | ||
1337 | map = fd_htab_map_alloc(attr); | |
1338 | if (IS_ERR(map)) { | |
1339 | bpf_map_meta_free(inner_map_meta); | |
1340 | return map; | |
1341 | } | |
1342 | ||
1343 | map->inner_map_meta = inner_map_meta; | |
1344 | ||
1345 | return map; | |
1346 | } | |
1347 | ||
1348 | static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) | |
1349 | { | |
1350 | struct bpf_map **inner_map = htab_map_lookup_elem(map, key); | |
1351 | ||
1352 | if (!inner_map) | |
1353 | return NULL; | |
1354 | ||
1355 | return READ_ONCE(*inner_map); | |
1356 | } | |
1357 | ||
7b0c2a05 DB |
1358 | static u32 htab_of_map_gen_lookup(struct bpf_map *map, |
1359 | struct bpf_insn *insn_buf) | |
1360 | { | |
1361 | struct bpf_insn *insn = insn_buf; | |
1362 | const int ret = BPF_REG_0; | |
1363 | ||
1364 | *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); | |
1365 | *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); | |
1366 | *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, | |
1367 | offsetof(struct htab_elem, key) + | |
1368 | round_up(map->key_size, 8)); | |
1369 | *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); | |
1370 | ||
1371 | return insn - insn_buf; | |
1372 | } | |
1373 | ||
bcc6b1b7 MKL |
1374 | static void htab_of_map_free(struct bpf_map *map) |
1375 | { | |
1376 | bpf_map_meta_free(map->inner_map_meta); | |
1377 | fd_htab_map_free(map); | |
1378 | } | |
1379 | ||
40077e0c | 1380 | const struct bpf_map_ops htab_of_maps_map_ops = { |
bcc6b1b7 MKL |
1381 | .map_alloc = htab_of_map_alloc, |
1382 | .map_free = htab_of_map_free, | |
1383 | .map_get_next_key = htab_map_get_next_key, | |
1384 | .map_lookup_elem = htab_of_map_lookup_elem, | |
1385 | .map_delete_elem = htab_map_delete_elem, | |
1386 | .map_fd_get_ptr = bpf_map_fd_get_ptr, | |
1387 | .map_fd_put_ptr = bpf_map_fd_put_ptr, | |
14dc6f04 | 1388 | .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, |
7b0c2a05 | 1389 | .map_gen_lookup = htab_of_map_gen_lookup, |
bcc6b1b7 | 1390 | }; |