]>
Commit | Line | Data |
---|---|---|
5b497af4 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
546ac1ff | 2 | /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io |
546ac1ff JF |
3 | */ |
4 | ||
5 | /* Devmaps primary use is as a backend map for XDP BPF helper call | |
6 | * bpf_redirect_map(). Because XDP is mostly concerned with performance we | |
7 | * spent some effort to ensure the datapath with redirect maps does not use | |
8 | * any locking. This is a quick note on the details. | |
9 | * | |
10 | * We have three possible paths to get into the devmap control plane bpf | |
11 | * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall | |
12 | * will invoke an update, delete, or lookup operation. To ensure updates and | |
13 | * deletes appear atomic from the datapath side xchg() is used to modify the | |
14 | * netdev_map array. Then because the datapath does a lookup into the netdev_map | |
15 | * array (read-only) from an RCU critical section we use call_rcu() to wait for | |
16 | * an rcu grace period before free'ing the old data structures. This ensures the | |
17 | * datapath always has a valid copy. However, the datapath does a "flush" | |
18 | * operation that pushes any pending packets in the driver outside the RCU | |
19 | * critical section. Each bpf_dtab_netdev tracks these pending operations using | |
d5df2830 THJ |
20 | * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until |
21 | * this list is empty, indicating outstanding flush operations have completed. | |
546ac1ff JF |
22 | * |
23 | * BPF syscalls may race with BPF program calls on any of the update, delete | |
24 | * or lookup operations. As noted above the xchg() operation also keep the | |
25 | * netdev_map consistent in this case. From the devmap side BPF programs | |
26 | * calling into these operations are the same as multiple user space threads | |
27 | * making system calls. | |
2ddf71e2 JF |
28 | * |
29 | * Finally, any of the above may race with a netdev_unregister notifier. The | |
30 | * unregister notifier must search for net devices in the map structure that | |
31 | * contain a reference to the net device and remove them. This is a two step | |
32 | * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) | |
33 | * check to see if the ifindex is the same as the net_device being removed. | |
4cc7b954 JF |
34 | * When removing the dev a cmpxchg() is used to ensure the correct dev is |
35 | * removed, in the case of a concurrent update or delete operation it is | |
36 | * possible that the initially referenced dev is no longer in the map. As the | |
37 | * notifier hook walks the map we know that new dev references can not be | |
38 | * added by the user because core infrastructure ensures dev_get_by_index() | |
39 | * calls will fail at this point. | |
6f9d451a THJ |
40 | * |
41 | * The devmap_hash type is a map type which interprets keys as ifindexes and | |
42 | * indexes these using a hashmap. This allows maps that use ifindex as key to be | |
43 | * densely packed instead of having holes in the lookup array for unused | |
44 | * ifindexes. The setup and packet enqueue/send code is shared between the two | |
45 | * types of devmap; only the lookup and insertion is different. | |
546ac1ff JF |
46 | */ |
47 | #include <linux/bpf.h> | |
67f29e07 | 48 | #include <net/xdp.h> |
546ac1ff | 49 | #include <linux/filter.h> |
67f29e07 | 50 | #include <trace/events/xdp.h> |
546ac1ff | 51 | |
6e71b04a CF |
52 | #define DEV_CREATE_FLAG_MASK \ |
53 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) | |
54 | ||
75ccae62 | 55 | struct xdp_dev_bulk_queue { |
5d053f9d | 56 | struct xdp_frame *q[DEV_MAP_BULK_SIZE]; |
d5df2830 | 57 | struct list_head flush_node; |
75ccae62 | 58 | struct net_device *dev; |
38edddb8 | 59 | struct net_device *dev_rx; |
5d053f9d JDB |
60 | unsigned int count; |
61 | }; | |
62 | ||
546ac1ff | 63 | struct bpf_dtab_netdev { |
67f29e07 | 64 | struct net_device *dev; /* must be first member, due to tracepoint */ |
6f9d451a | 65 | struct hlist_node index_hlist; |
546ac1ff | 66 | struct bpf_dtab *dtab; |
fbee97fe | 67 | struct bpf_prog *xdp_prog; |
af4d045c | 68 | struct rcu_head rcu; |
75ccae62 | 69 | unsigned int idx; |
7f1c0426 | 70 | struct bpf_devmap_val val; |
546ac1ff JF |
71 | }; |
72 | ||
73 | struct bpf_dtab { | |
74 | struct bpf_map map; | |
071cdece | 75 | struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ |
2ddf71e2 | 76 | struct list_head list; |
6f9d451a THJ |
77 | |
78 | /* these are only used for DEVMAP_HASH type maps */ | |
79 | struct hlist_head *dev_index_head; | |
80 | spinlock_t index_lock; | |
81 | unsigned int items; | |
82 | u32 n_buckets; | |
546ac1ff JF |
83 | }; |
84 | ||
1d233886 | 85 | static DEFINE_PER_CPU(struct list_head, dev_flush_list); |
4cc7b954 | 86 | static DEFINE_SPINLOCK(dev_map_lock); |
2ddf71e2 JF |
87 | static LIST_HEAD(dev_map_list); |
88 | ||
99c51064 THJ |
89 | static struct hlist_head *dev_map_create_hash(unsigned int entries, |
90 | int numa_node) | |
6f9d451a THJ |
91 | { |
92 | int i; | |
93 | struct hlist_head *hash; | |
94 | ||
99c51064 | 95 | hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); |
6f9d451a THJ |
96 | if (hash != NULL) |
97 | for (i = 0; i < entries; i++) | |
98 | INIT_HLIST_HEAD(&hash[i]); | |
99 | ||
100 | return hash; | |
101 | } | |
102 | ||
071cdece THJ |
103 | static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, |
104 | int idx) | |
105 | { | |
106 | return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; | |
107 | } | |
108 | ||
fca16e51 | 109 | static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) |
546ac1ff | 110 | { |
fbee97fe | 111 | u32 valsize = attr->value_size; |
96360004 BT |
112 | u64 cost = 0; |
113 | int err; | |
546ac1ff | 114 | |
fbee97fe DA |
115 | /* check sanity of attributes. 2 value sizes supported: |
116 | * 4 bytes: ifindex | |
117 | * 8 bytes: ifindex + prog fd | |
118 | */ | |
546ac1ff | 119 | if (attr->max_entries == 0 || attr->key_size != 4 || |
fbee97fe DA |
120 | (valsize != offsetofend(struct bpf_devmap_val, ifindex) && |
121 | valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) || | |
122 | attr->map_flags & ~DEV_CREATE_FLAG_MASK) | |
fca16e51 | 123 | return -EINVAL; |
546ac1ff | 124 | |
0cdbb4b0 THJ |
125 | /* Lookup returns a pointer straight to dev->ifindex, so make sure the |
126 | * verifier prevents writes from the BPF side | |
127 | */ | |
128 | attr->map_flags |= BPF_F_RDONLY_PROG; | |
129 | ||
546ac1ff | 130 | |
bd475643 | 131 | bpf_map_init_from_attr(&dtab->map, attr); |
546ac1ff | 132 | |
6f9d451a THJ |
133 | if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
134 | dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); | |
135 | ||
136 | if (!dtab->n_buckets) /* Overflow check */ | |
137 | return -EINVAL; | |
05679ca6 | 138 | cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets; |
071cdece THJ |
139 | } else { |
140 | cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); | |
6f9d451a THJ |
141 | } |
142 | ||
b936ca64 | 143 | /* if map size is larger than memlock limit, reject it */ |
c85d6913 | 144 | err = bpf_map_charge_init(&dtab->map.memory, cost); |
546ac1ff | 145 | if (err) |
fca16e51 | 146 | return -EINVAL; |
582db7e0 | 147 | |
6f9d451a | 148 | if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
99c51064 THJ |
149 | dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, |
150 | dtab->map.numa_node); | |
6f9d451a | 151 | if (!dtab->dev_index_head) |
96360004 | 152 | goto free_charge; |
6f9d451a THJ |
153 | |
154 | spin_lock_init(&dtab->index_lock); | |
071cdece THJ |
155 | } else { |
156 | dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * | |
157 | sizeof(struct bpf_dtab_netdev *), | |
158 | dtab->map.numa_node); | |
159 | if (!dtab->netdev_map) | |
96360004 | 160 | goto free_charge; |
6f9d451a THJ |
161 | } |
162 | ||
fca16e51 | 163 | return 0; |
d5df2830 | 164 | |
b936ca64 RG |
165 | free_charge: |
166 | bpf_map_charge_finish(&dtab->map.memory); | |
fca16e51 THJ |
167 | return -ENOMEM; |
168 | } | |
169 | ||
170 | static struct bpf_map *dev_map_alloc(union bpf_attr *attr) | |
171 | { | |
172 | struct bpf_dtab *dtab; | |
173 | int err; | |
174 | ||
175 | if (!capable(CAP_NET_ADMIN)) | |
176 | return ERR_PTR(-EPERM); | |
177 | ||
178 | dtab = kzalloc(sizeof(*dtab), GFP_USER); | |
179 | if (!dtab) | |
180 | return ERR_PTR(-ENOMEM); | |
181 | ||
182 | err = dev_map_init_map(dtab, attr); | |
183 | if (err) { | |
184 | kfree(dtab); | |
185 | return ERR_PTR(err); | |
186 | } | |
187 | ||
188 | spin_lock(&dev_map_lock); | |
189 | list_add_tail_rcu(&dtab->list, &dev_map_list); | |
190 | spin_unlock(&dev_map_lock); | |
191 | ||
192 | return &dtab->map; | |
546ac1ff JF |
193 | } |
194 | ||
195 | static void dev_map_free(struct bpf_map *map) | |
196 | { | |
197 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
0536b852 | 198 | int i; |
546ac1ff JF |
199 | |
200 | /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
201 | * so the programs (can be more than one that used this map) were | |
42a84a8c JF |
202 | * disconnected from events. The following synchronize_rcu() guarantees |
203 | * both rcu read critical sections complete and waits for | |
204 | * preempt-disable regions (NAPI being the relevant context here) so we | |
205 | * are certain there will be no further reads against the netdev_map and | |
206 | * all flush operations are complete. Flush operations can only be done | |
207 | * from NAPI context for this reason. | |
546ac1ff | 208 | */ |
274043c6 DB |
209 | |
210 | spin_lock(&dev_map_lock); | |
211 | list_del_rcu(&dtab->list); | |
212 | spin_unlock(&dev_map_lock); | |
213 | ||
f6069b9a | 214 | bpf_clear_redirect_map(map); |
546ac1ff JF |
215 | synchronize_rcu(); |
216 | ||
2baae354 ED |
217 | /* Make sure prior __dev_map_entry_free() have completed. */ |
218 | rcu_barrier(); | |
219 | ||
071cdece THJ |
220 | if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
221 | for (i = 0; i < dtab->n_buckets; i++) { | |
222 | struct bpf_dtab_netdev *dev; | |
223 | struct hlist_head *head; | |
224 | struct hlist_node *next; | |
225 | ||
226 | head = dev_map_index_hash(dtab, i); | |
227 | ||
228 | hlist_for_each_entry_safe(dev, next, head, index_hlist) { | |
229 | hlist_del_rcu(&dev->index_hlist); | |
fbee97fe DA |
230 | if (dev->xdp_prog) |
231 | bpf_prog_put(dev->xdp_prog); | |
071cdece THJ |
232 | dev_put(dev->dev); |
233 | kfree(dev); | |
234 | } | |
235 | } | |
236 | ||
99c51064 | 237 | bpf_map_area_free(dtab->dev_index_head); |
071cdece THJ |
238 | } else { |
239 | for (i = 0; i < dtab->map.max_entries; i++) { | |
240 | struct bpf_dtab_netdev *dev; | |
241 | ||
242 | dev = dtab->netdev_map[i]; | |
243 | if (!dev) | |
244 | continue; | |
245 | ||
fbee97fe DA |
246 | if (dev->xdp_prog) |
247 | bpf_prog_put(dev->xdp_prog); | |
071cdece THJ |
248 | dev_put(dev->dev); |
249 | kfree(dev); | |
250 | } | |
251 | ||
252 | bpf_map_area_free(dtab->netdev_map); | |
546ac1ff JF |
253 | } |
254 | ||
546ac1ff JF |
255 | kfree(dtab); |
256 | } | |
257 | ||
258 | static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
259 | { | |
260 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
261 | u32 index = key ? *(u32 *)key : U32_MAX; | |
af4d045c | 262 | u32 *next = next_key; |
546ac1ff JF |
263 | |
264 | if (index >= dtab->map.max_entries) { | |
265 | *next = 0; | |
266 | return 0; | |
267 | } | |
268 | ||
269 | if (index == dtab->map.max_entries - 1) | |
270 | return -ENOENT; | |
546ac1ff JF |
271 | *next = index + 1; |
272 | return 0; | |
273 | } | |
274 | ||
6f9d451a THJ |
275 | struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) |
276 | { | |
277 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
278 | struct hlist_head *head = dev_map_index_hash(dtab, key); | |
279 | struct bpf_dtab_netdev *dev; | |
280 | ||
485ec2ea AG |
281 | hlist_for_each_entry_rcu(dev, head, index_hlist, |
282 | lockdep_is_held(&dtab->index_lock)) | |
6f9d451a THJ |
283 | if (dev->idx == key) |
284 | return dev; | |
285 | ||
286 | return NULL; | |
287 | } | |
288 | ||
289 | static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, | |
290 | void *next_key) | |
291 | { | |
292 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
293 | u32 idx, *next = next_key; | |
294 | struct bpf_dtab_netdev *dev, *next_dev; | |
295 | struct hlist_head *head; | |
296 | int i = 0; | |
297 | ||
298 | if (!key) | |
299 | goto find_first; | |
300 | ||
301 | idx = *(u32 *)key; | |
302 | ||
303 | dev = __dev_map_hash_lookup_elem(map, idx); | |
304 | if (!dev) | |
305 | goto find_first; | |
306 | ||
307 | next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), | |
308 | struct bpf_dtab_netdev, index_hlist); | |
309 | ||
310 | if (next_dev) { | |
311 | *next = next_dev->idx; | |
312 | return 0; | |
313 | } | |
314 | ||
315 | i = idx & (dtab->n_buckets - 1); | |
316 | i++; | |
317 | ||
318 | find_first: | |
319 | for (; i < dtab->n_buckets; i++) { | |
320 | head = dev_map_index_hash(dtab, i); | |
321 | ||
322 | next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), | |
323 | struct bpf_dtab_netdev, | |
324 | index_hlist); | |
325 | if (next_dev) { | |
326 | *next = next_dev->idx; | |
327 | return 0; | |
328 | } | |
329 | } | |
330 | ||
331 | return -ENOENT; | |
332 | } | |
333 | ||
fbee97fe DA |
334 | bool dev_map_can_have_prog(struct bpf_map *map) |
335 | { | |
336 | if ((map->map_type == BPF_MAP_TYPE_DEVMAP || | |
337 | map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) && | |
338 | map->value_size != offsetofend(struct bpf_devmap_val, ifindex)) | |
339 | return true; | |
340 | ||
341 | return false; | |
342 | } | |
343 | ||
ebc4ecd4 | 344 | static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) |
5d053f9d | 345 | { |
75ccae62 | 346 | struct net_device *dev = bq->dev; |
e74de52e | 347 | int sent = 0, drops = 0, err = 0; |
5d053f9d JDB |
348 | int i; |
349 | ||
350 | if (unlikely(!bq->count)) | |
ebc4ecd4 | 351 | return; |
5d053f9d JDB |
352 | |
353 | for (i = 0; i < bq->count; i++) { | |
354 | struct xdp_frame *xdpf = bq->q[i]; | |
355 | ||
356 | prefetch(xdpf); | |
357 | } | |
358 | ||
c1ece6b2 | 359 | sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); |
735fc405 | 360 | if (sent < 0) { |
e74de52e | 361 | err = sent; |
735fc405 JDB |
362 | sent = 0; |
363 | goto error; | |
5d053f9d | 364 | } |
735fc405 JDB |
365 | drops = bq->count - sent; |
366 | out: | |
5d053f9d JDB |
367 | bq->count = 0; |
368 | ||
58aa94f9 | 369 | trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); |
38edddb8 | 370 | bq->dev_rx = NULL; |
d5df2830 | 371 | __list_del_clearprev(&bq->flush_node); |
ebc4ecd4 | 372 | return; |
735fc405 JDB |
373 | error: |
374 | /* If ndo_xdp_xmit fails with an errno, no frames have been | |
375 | * xmit'ed and it's our responsibility to them free all. | |
376 | */ | |
377 | for (i = 0; i < bq->count; i++) { | |
378 | struct xdp_frame *xdpf = bq->q[i]; | |
379 | ||
0536b852 | 380 | xdp_return_frame_rx_napi(xdpf); |
735fc405 JDB |
381 | drops++; |
382 | } | |
383 | goto out; | |
5d053f9d JDB |
384 | } |
385 | ||
1d233886 | 386 | /* __dev_flush is called from xdp_do_flush() which _must_ be signaled |
11393cc9 JF |
387 | * from the driver before returning from its napi->poll() routine. The poll() |
388 | * routine is called either from busy_poll context or net_rx_action signaled | |
389 | * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the | |
d5df2830 THJ |
390 | * net device can be torn down. On devmap tear down we ensure the flush list |
391 | * is empty before completing to ensure all flush operations have completed. | |
b23bfa56 JF |
392 | * When drivers update the bpf program they may need to ensure any flush ops |
393 | * are also complete. Using synchronize_rcu or call_rcu will suffice for this | |
394 | * because both wait for napi context to exit. | |
11393cc9 | 395 | */ |
1d233886 | 396 | void __dev_flush(void) |
11393cc9 | 397 | { |
1d233886 | 398 | struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); |
75ccae62 | 399 | struct xdp_dev_bulk_queue *bq, *tmp; |
11393cc9 | 400 | |
d5df2830 | 401 | list_for_each_entry_safe(bq, tmp, flush_list, flush_node) |
0536b852 | 402 | bq_xmit_all(bq, XDP_XMIT_FLUSH); |
11393cc9 JF |
403 | } |
404 | ||
546ac1ff JF |
405 | /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or |
406 | * update happens in parallel here a dev_put wont happen until after reading the | |
407 | * ifindex. | |
408 | */ | |
67f29e07 | 409 | struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) |
546ac1ff JF |
410 | { |
411 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
67f29e07 | 412 | struct bpf_dtab_netdev *obj; |
546ac1ff | 413 | |
af4d045c | 414 | if (key >= map->max_entries) |
546ac1ff JF |
415 | return NULL; |
416 | ||
67f29e07 JDB |
417 | obj = READ_ONCE(dtab->netdev_map[key]); |
418 | return obj; | |
419 | } | |
420 | ||
5d053f9d JDB |
421 | /* Runs under RCU-read-side, plus in softirq under NAPI protection. |
422 | * Thus, safe percpu variable access. | |
423 | */ | |
ebc4ecd4 BT |
424 | static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, |
425 | struct net_device *dev_rx) | |
5d053f9d | 426 | { |
1d233886 | 427 | struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); |
75ccae62 | 428 | struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); |
5d053f9d JDB |
429 | |
430 | if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) | |
0536b852 | 431 | bq_xmit_all(bq, 0); |
5d053f9d | 432 | |
38edddb8 JDB |
433 | /* Ingress dev_rx will be the same for all xdp_frame's in |
434 | * bulk_queue, because bq stored per-CPU and must be flushed | |
435 | * from net_device drivers NAPI func end. | |
436 | */ | |
437 | if (!bq->dev_rx) | |
438 | bq->dev_rx = dev_rx; | |
439 | ||
5d053f9d | 440 | bq->q[bq->count++] = xdpf; |
d5df2830 THJ |
441 | |
442 | if (!bq->flush_node.prev) | |
443 | list_add(&bq->flush_node, flush_list); | |
5d053f9d JDB |
444 | } |
445 | ||
1d233886 THJ |
446 | static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, |
447 | struct net_device *dev_rx) | |
67f29e07 | 448 | { |
67f29e07 | 449 | struct xdp_frame *xdpf; |
d8d7218a | 450 | int err; |
67f29e07 JDB |
451 | |
452 | if (!dev->netdev_ops->ndo_xdp_xmit) | |
453 | return -EOPNOTSUPP; | |
454 | ||
d8d7218a TM |
455 | err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); |
456 | if (unlikely(err)) | |
457 | return err; | |
458 | ||
1b698fa5 | 459 | xdpf = xdp_convert_buff_to_frame(xdp); |
67f29e07 JDB |
460 | if (unlikely(!xdpf)) |
461 | return -EOVERFLOW; | |
462 | ||
ebc4ecd4 BT |
463 | bq_enqueue(dev, xdpf, dev_rx); |
464 | return 0; | |
546ac1ff JF |
465 | } |
466 | ||
fbee97fe DA |
467 | static struct xdp_buff *dev_map_run_prog(struct net_device *dev, |
468 | struct xdp_buff *xdp, | |
469 | struct bpf_prog *xdp_prog) | |
470 | { | |
64b59025 | 471 | struct xdp_txq_info txq = { .dev = dev }; |
fbee97fe DA |
472 | u32 act; |
473 | ||
26afa0a4 | 474 | xdp_set_data_meta_invalid(xdp); |
64b59025 DA |
475 | xdp->txq = &txq; |
476 | ||
fbee97fe DA |
477 | act = bpf_prog_run_xdp(xdp_prog, xdp); |
478 | switch (act) { | |
479 | case XDP_PASS: | |
480 | return xdp; | |
481 | case XDP_DROP: | |
482 | break; | |
483 | default: | |
484 | bpf_warn_invalid_xdp_action(act); | |
485 | fallthrough; | |
486 | case XDP_ABORTED: | |
487 | trace_xdp_exception(dev, xdp_prog, act); | |
488 | break; | |
489 | } | |
490 | ||
491 | xdp_return_buff(xdp); | |
492 | return NULL; | |
493 | } | |
494 | ||
1d233886 THJ |
495 | int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, |
496 | struct net_device *dev_rx) | |
497 | { | |
498 | return __xdp_enqueue(dev, xdp, dev_rx); | |
499 | } | |
500 | ||
501 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, | |
502 | struct net_device *dev_rx) | |
503 | { | |
504 | struct net_device *dev = dst->dev; | |
505 | ||
fbee97fe DA |
506 | if (dst->xdp_prog) { |
507 | xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog); | |
508 | if (!xdp) | |
509 | return 0; | |
510 | } | |
1d233886 THJ |
511 | return __xdp_enqueue(dev, xdp, dev_rx); |
512 | } | |
513 | ||
6d5fc195 TM |
514 | int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, |
515 | struct bpf_prog *xdp_prog) | |
516 | { | |
517 | int err; | |
518 | ||
d8d7218a | 519 | err = xdp_ok_fwd_dev(dst->dev, skb->len); |
6d5fc195 TM |
520 | if (unlikely(err)) |
521 | return err; | |
522 | skb->dev = dst->dev; | |
523 | generic_xdp_tx(skb, xdp_prog); | |
524 | ||
525 | return 0; | |
526 | } | |
527 | ||
af4d045c DB |
528 | static void *dev_map_lookup_elem(struct bpf_map *map, void *key) |
529 | { | |
67f29e07 | 530 | struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); |
af4d045c | 531 | |
7f1c0426 | 532 | return obj ? &obj->val : NULL; |
af4d045c DB |
533 | } |
534 | ||
6f9d451a THJ |
535 | static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) |
536 | { | |
537 | struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, | |
538 | *(u32 *)key); | |
7f1c0426 | 539 | return obj ? &obj->val : NULL; |
6f9d451a THJ |
540 | } |
541 | ||
546ac1ff JF |
542 | static void __dev_map_entry_free(struct rcu_head *rcu) |
543 | { | |
af4d045c | 544 | struct bpf_dtab_netdev *dev; |
546ac1ff | 545 | |
af4d045c | 546 | dev = container_of(rcu, struct bpf_dtab_netdev, rcu); |
fbee97fe DA |
547 | if (dev->xdp_prog) |
548 | bpf_prog_put(dev->xdp_prog); | |
af4d045c DB |
549 | dev_put(dev->dev); |
550 | kfree(dev); | |
546ac1ff JF |
551 | } |
552 | ||
553 | static int dev_map_delete_elem(struct bpf_map *map, void *key) | |
554 | { | |
555 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
556 | struct bpf_dtab_netdev *old_dev; | |
557 | int k = *(u32 *)key; | |
558 | ||
559 | if (k >= map->max_entries) | |
560 | return -EINVAL; | |
561 | ||
af4d045c | 562 | /* Use call_rcu() here to ensure any rcu critical sections have |
42a84a8c JF |
563 | * completed as well as any flush operations because call_rcu |
564 | * will wait for preempt-disable region to complete, NAPI in this | |
565 | * context. And additionally, the driver tear down ensures all | |
566 | * soft irqs are complete before removing the net device in the | |
567 | * case of dev_put equals zero. | |
546ac1ff JF |
568 | */ |
569 | old_dev = xchg(&dtab->netdev_map[k], NULL); | |
570 | if (old_dev) | |
571 | call_rcu(&old_dev->rcu, __dev_map_entry_free); | |
572 | return 0; | |
573 | } | |
574 | ||
6f9d451a THJ |
575 | static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) |
576 | { | |
577 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
578 | struct bpf_dtab_netdev *old_dev; | |
579 | int k = *(u32 *)key; | |
580 | unsigned long flags; | |
581 | int ret = -ENOENT; | |
582 | ||
583 | spin_lock_irqsave(&dtab->index_lock, flags); | |
584 | ||
585 | old_dev = __dev_map_hash_lookup_elem(map, k); | |
586 | if (old_dev) { | |
587 | dtab->items--; | |
588 | hlist_del_init_rcu(&old_dev->index_hlist); | |
589 | call_rcu(&old_dev->rcu, __dev_map_entry_free); | |
590 | ret = 0; | |
591 | } | |
592 | spin_unlock_irqrestore(&dtab->index_lock, flags); | |
593 | ||
594 | return ret; | |
595 | } | |
596 | ||
fca16e51 THJ |
597 | static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, |
598 | struct bpf_dtab *dtab, | |
7f1c0426 | 599 | struct bpf_devmap_val *val, |
fca16e51 | 600 | unsigned int idx) |
546ac1ff | 601 | { |
fbee97fe | 602 | struct bpf_prog *prog = NULL; |
fca16e51 | 603 | struct bpf_dtab_netdev *dev; |
fca16e51 | 604 | |
75ccae62 THJ |
605 | dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN, |
606 | dtab->map.numa_node); | |
fca16e51 THJ |
607 | if (!dev) |
608 | return ERR_PTR(-ENOMEM); | |
609 | ||
7f1c0426 DA |
610 | dev->dev = dev_get_by_index(net, val->ifindex); |
611 | if (!dev->dev) | |
612 | goto err_out; | |
fca16e51 | 613 | |
281920b7 | 614 | if (val->bpf_prog.fd > 0) { |
fbee97fe DA |
615 | prog = bpf_prog_get_type_dev(val->bpf_prog.fd, |
616 | BPF_PROG_TYPE_XDP, false); | |
617 | if (IS_ERR(prog)) | |
618 | goto err_put_dev; | |
619 | if (prog->expected_attach_type != BPF_XDP_DEVMAP) | |
620 | goto err_put_prog; | |
621 | } | |
622 | ||
fca16e51 THJ |
623 | dev->idx = idx; |
624 | dev->dtab = dtab; | |
fbee97fe DA |
625 | if (prog) { |
626 | dev->xdp_prog = prog; | |
627 | dev->val.bpf_prog.id = prog->aux->id; | |
628 | } else { | |
629 | dev->xdp_prog = NULL; | |
630 | dev->val.bpf_prog.id = 0; | |
631 | } | |
7f1c0426 | 632 | dev->val.ifindex = val->ifindex; |
fca16e51 THJ |
633 | |
634 | return dev; | |
fbee97fe DA |
635 | err_put_prog: |
636 | bpf_prog_put(prog); | |
637 | err_put_dev: | |
638 | dev_put(dev->dev); | |
7f1c0426 DA |
639 | err_out: |
640 | kfree(dev); | |
641 | return ERR_PTR(-EINVAL); | |
fca16e51 THJ |
642 | } |
643 | ||
644 | static int __dev_map_update_elem(struct net *net, struct bpf_map *map, | |
645 | void *key, void *value, u64 map_flags) | |
646 | { | |
647 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
546ac1ff | 648 | struct bpf_dtab_netdev *dev, *old_dev; |
281920b7 | 649 | struct bpf_devmap_val val = {}; |
d5df2830 | 650 | u32 i = *(u32 *)key; |
546ac1ff JF |
651 | |
652 | if (unlikely(map_flags > BPF_EXIST)) | |
653 | return -EINVAL; | |
546ac1ff JF |
654 | if (unlikely(i >= dtab->map.max_entries)) |
655 | return -E2BIG; | |
546ac1ff JF |
656 | if (unlikely(map_flags == BPF_NOEXIST)) |
657 | return -EEXIST; | |
658 | ||
7f1c0426 DA |
659 | /* already verified value_size <= sizeof val */ |
660 | memcpy(&val, value, map->value_size); | |
661 | ||
662 | if (!val.ifindex) { | |
546ac1ff | 663 | dev = NULL; |
fbee97fe | 664 | /* can not specify fd if ifindex is 0 */ |
281920b7 | 665 | if (val.bpf_prog.fd > 0) |
fbee97fe | 666 | return -EINVAL; |
546ac1ff | 667 | } else { |
7f1c0426 | 668 | dev = __dev_map_alloc_node(net, dtab, &val, i); |
fca16e51 THJ |
669 | if (IS_ERR(dev)) |
670 | return PTR_ERR(dev); | |
546ac1ff JF |
671 | } |
672 | ||
673 | /* Use call_rcu() here to ensure rcu critical sections have completed | |
674 | * Remembering the driver side flush operation will happen before the | |
675 | * net device is removed. | |
676 | */ | |
677 | old_dev = xchg(&dtab->netdev_map[i], dev); | |
678 | if (old_dev) | |
679 | call_rcu(&old_dev->rcu, __dev_map_entry_free); | |
680 | ||
681 | return 0; | |
682 | } | |
683 | ||
fca16e51 THJ |
684 | static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, |
685 | u64 map_flags) | |
686 | { | |
687 | return __dev_map_update_elem(current->nsproxy->net_ns, | |
688 | map, key, value, map_flags); | |
689 | } | |
690 | ||
6f9d451a THJ |
691 | static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, |
692 | void *key, void *value, u64 map_flags) | |
693 | { | |
694 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
695 | struct bpf_dtab_netdev *dev, *old_dev; | |
281920b7 | 696 | struct bpf_devmap_val val = {}; |
6f9d451a THJ |
697 | u32 idx = *(u32 *)key; |
698 | unsigned long flags; | |
af58e7ee | 699 | int err = -EEXIST; |
6f9d451a | 700 | |
7f1c0426 DA |
701 | /* already verified value_size <= sizeof val */ |
702 | memcpy(&val, value, map->value_size); | |
703 | ||
704 | if (unlikely(map_flags > BPF_EXIST || !val.ifindex)) | |
6f9d451a THJ |
705 | return -EINVAL; |
706 | ||
af58e7ee THJ |
707 | spin_lock_irqsave(&dtab->index_lock, flags); |
708 | ||
6f9d451a THJ |
709 | old_dev = __dev_map_hash_lookup_elem(map, idx); |
710 | if (old_dev && (map_flags & BPF_NOEXIST)) | |
af58e7ee | 711 | goto out_err; |
6f9d451a | 712 | |
7f1c0426 | 713 | dev = __dev_map_alloc_node(net, dtab, &val, idx); |
af58e7ee THJ |
714 | if (IS_ERR(dev)) { |
715 | err = PTR_ERR(dev); | |
716 | goto out_err; | |
717 | } | |
6f9d451a THJ |
718 | |
719 | if (old_dev) { | |
720 | hlist_del_rcu(&old_dev->index_hlist); | |
721 | } else { | |
722 | if (dtab->items >= dtab->map.max_entries) { | |
723 | spin_unlock_irqrestore(&dtab->index_lock, flags); | |
724 | call_rcu(&dev->rcu, __dev_map_entry_free); | |
725 | return -E2BIG; | |
726 | } | |
727 | dtab->items++; | |
728 | } | |
729 | ||
730 | hlist_add_head_rcu(&dev->index_hlist, | |
731 | dev_map_index_hash(dtab, idx)); | |
732 | spin_unlock_irqrestore(&dtab->index_lock, flags); | |
733 | ||
734 | if (old_dev) | |
735 | call_rcu(&old_dev->rcu, __dev_map_entry_free); | |
736 | ||
737 | return 0; | |
af58e7ee THJ |
738 | |
739 | out_err: | |
740 | spin_unlock_irqrestore(&dtab->index_lock, flags); | |
741 | return err; | |
6f9d451a THJ |
742 | } |
743 | ||
744 | static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, | |
745 | u64 map_flags) | |
746 | { | |
747 | return __dev_map_hash_update_elem(current->nsproxy->net_ns, | |
748 | map, key, value, map_flags); | |
749 | } | |
750 | ||
2872e9ac | 751 | static int dev_map_btf_id; |
546ac1ff | 752 | const struct bpf_map_ops dev_map_ops = { |
f4d05259 | 753 | .map_meta_equal = bpf_map_meta_equal, |
546ac1ff JF |
754 | .map_alloc = dev_map_alloc, |
755 | .map_free = dev_map_free, | |
756 | .map_get_next_key = dev_map_get_next_key, | |
757 | .map_lookup_elem = dev_map_lookup_elem, | |
758 | .map_update_elem = dev_map_update_elem, | |
759 | .map_delete_elem = dev_map_delete_elem, | |
e8d2bec0 | 760 | .map_check_btf = map_check_no_btf, |
2872e9ac AI |
761 | .map_btf_name = "bpf_dtab", |
762 | .map_btf_id = &dev_map_btf_id, | |
546ac1ff | 763 | }; |
2ddf71e2 | 764 | |
2872e9ac | 765 | static int dev_map_hash_map_btf_id; |
6f9d451a | 766 | const struct bpf_map_ops dev_map_hash_ops = { |
f4d05259 | 767 | .map_meta_equal = bpf_map_meta_equal, |
6f9d451a THJ |
768 | .map_alloc = dev_map_alloc, |
769 | .map_free = dev_map_free, | |
770 | .map_get_next_key = dev_map_hash_get_next_key, | |
771 | .map_lookup_elem = dev_map_hash_lookup_elem, | |
772 | .map_update_elem = dev_map_hash_update_elem, | |
773 | .map_delete_elem = dev_map_hash_delete_elem, | |
774 | .map_check_btf = map_check_no_btf, | |
2872e9ac AI |
775 | .map_btf_name = "bpf_dtab", |
776 | .map_btf_id = &dev_map_hash_map_btf_id, | |
6f9d451a THJ |
777 | }; |
778 | ||
ce197d83 THJ |
779 | static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, |
780 | struct net_device *netdev) | |
781 | { | |
782 | unsigned long flags; | |
783 | u32 i; | |
784 | ||
785 | spin_lock_irqsave(&dtab->index_lock, flags); | |
786 | for (i = 0; i < dtab->n_buckets; i++) { | |
787 | struct bpf_dtab_netdev *dev; | |
788 | struct hlist_head *head; | |
789 | struct hlist_node *next; | |
790 | ||
791 | head = dev_map_index_hash(dtab, i); | |
792 | ||
793 | hlist_for_each_entry_safe(dev, next, head, index_hlist) { | |
794 | if (netdev != dev->dev) | |
795 | continue; | |
796 | ||
797 | dtab->items--; | |
798 | hlist_del_rcu(&dev->index_hlist); | |
799 | call_rcu(&dev->rcu, __dev_map_entry_free); | |
800 | } | |
801 | } | |
802 | spin_unlock_irqrestore(&dtab->index_lock, flags); | |
803 | } | |
804 | ||
2ddf71e2 JF |
805 | static int dev_map_notification(struct notifier_block *notifier, |
806 | ulong event, void *ptr) | |
807 | { | |
808 | struct net_device *netdev = netdev_notifier_info_to_dev(ptr); | |
809 | struct bpf_dtab *dtab; | |
75ccae62 | 810 | int i, cpu; |
2ddf71e2 JF |
811 | |
812 | switch (event) { | |
75ccae62 THJ |
813 | case NETDEV_REGISTER: |
814 | if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) | |
815 | break; | |
816 | ||
817 | /* will be freed in free_netdev() */ | |
818 | netdev->xdp_bulkq = | |
819 | __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue), | |
820 | sizeof(void *), GFP_ATOMIC); | |
821 | if (!netdev->xdp_bulkq) | |
822 | return NOTIFY_BAD; | |
823 | ||
824 | for_each_possible_cpu(cpu) | |
825 | per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; | |
826 | break; | |
2ddf71e2 | 827 | case NETDEV_UNREGISTER: |
4cc7b954 JF |
828 | /* This rcu_read_lock/unlock pair is needed because |
829 | * dev_map_list is an RCU list AND to ensure a delete | |
830 | * operation does not free a netdev_map entry while we | |
831 | * are comparing it against the netdev being unregistered. | |
832 | */ | |
833 | rcu_read_lock(); | |
834 | list_for_each_entry_rcu(dtab, &dev_map_list, list) { | |
ce197d83 THJ |
835 | if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
836 | dev_map_hash_remove_netdev(dtab, netdev); | |
837 | continue; | |
838 | } | |
839 | ||
2ddf71e2 | 840 | for (i = 0; i < dtab->map.max_entries; i++) { |
4cc7b954 | 841 | struct bpf_dtab_netdev *dev, *odev; |
2ddf71e2 | 842 | |
4cc7b954 | 843 | dev = READ_ONCE(dtab->netdev_map[i]); |
f592f804 | 844 | if (!dev || netdev != dev->dev) |
2ddf71e2 | 845 | continue; |
4cc7b954 JF |
846 | odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); |
847 | if (dev == odev) | |
2ddf71e2 JF |
848 | call_rcu(&dev->rcu, |
849 | __dev_map_entry_free); | |
850 | } | |
851 | } | |
4cc7b954 | 852 | rcu_read_unlock(); |
2ddf71e2 JF |
853 | break; |
854 | default: | |
855 | break; | |
856 | } | |
857 | return NOTIFY_OK; | |
858 | } | |
859 | ||
860 | static struct notifier_block dev_map_notifier = { | |
861 | .notifier_call = dev_map_notification, | |
862 | }; | |
863 | ||
864 | static int __init dev_map_init(void) | |
865 | { | |
96360004 BT |
866 | int cpu; |
867 | ||
67f29e07 JDB |
868 | /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ |
869 | BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != | |
870 | offsetof(struct _bpf_dtab_netdev, dev)); | |
2ddf71e2 | 871 | register_netdevice_notifier(&dev_map_notifier); |
96360004 BT |
872 | |
873 | for_each_possible_cpu(cpu) | |
1d233886 | 874 | INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); |
2ddf71e2 JF |
875 | return 0; |
876 | } | |
877 | ||
878 | subsys_initcall(dev_map_init); |