]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - kernel/bpf/sockmap.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / sockmap.c
1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12
13 /* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
15 *
16 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
21 * redirect.
22 *
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
26 *
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
30 */
31 #include <linux/bpf.h>
32 #include <net/sock.h>
33 #include <linux/filter.h>
34 #include <linux/errno.h>
35 #include <linux/file.h>
36 #include <linux/kernel.h>
37 #include <linux/net.h>
38 #include <linux/skbuff.h>
39 #include <linux/workqueue.h>
40 #include <linux/list.h>
41 #include <net/strparser.h>
42 #include <net/tcp.h>
43
44 #define SOCK_CREATE_FLAG_MASK \
45 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
46
47 struct bpf_stab {
48 struct bpf_map map;
49 struct sock **sock_map;
50 struct bpf_prog *bpf_parse;
51 struct bpf_prog *bpf_verdict;
52 };
53
54 enum smap_psock_state {
55 SMAP_TX_RUNNING,
56 };
57
58 struct smap_psock_map_entry {
59 struct list_head list;
60 struct sock **entry;
61 };
62
63 struct smap_psock {
64 struct rcu_head rcu;
65 /* refcnt is used inside sk_callback_lock */
66 u32 refcnt;
67
68 /* datapath variables */
69 struct sk_buff_head rxqueue;
70 bool strp_enabled;
71
72 /* datapath error path cache across tx work invocations */
73 int save_rem;
74 int save_off;
75 struct sk_buff *save_skb;
76
77 struct strparser strp;
78 struct bpf_prog *bpf_parse;
79 struct bpf_prog *bpf_verdict;
80 struct list_head maps;
81
82 /* Back reference used when sock callback trigger sockmap operations */
83 struct sock *sock;
84 unsigned long state;
85
86 struct work_struct tx_work;
87 struct work_struct gc_work;
88
89 void (*save_data_ready)(struct sock *sk);
90 void (*save_write_space)(struct sock *sk);
91 void (*save_state_change)(struct sock *sk);
92 };
93
94 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
95 {
96 return rcu_dereference_sk_user_data(sk);
97 }
98
99 /* compute the linear packet data range [data, data_end) for skb when
100 * sk_skb type programs are in use.
101 */
102 static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
103 {
104 TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
105 }
106
107 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
108 {
109 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
110 int rc;
111
112 if (unlikely(!prog))
113 return SK_DROP;
114
115 skb_orphan(skb);
116 /* We need to ensure that BPF metadata for maps is also cleared
117 * when we orphan the skb so that we don't have the possibility
118 * to reference a stale map.
119 */
120 TCP_SKB_CB(skb)->bpf.map = NULL;
121 skb->sk = psock->sock;
122 bpf_compute_data_pointers(skb);
123 preempt_disable();
124 rc = (*prog->bpf_func)(skb, prog->insnsi);
125 preempt_enable();
126 skb->sk = NULL;
127
128 return rc == SK_PASS ?
129 (TCP_SKB_CB(skb)->bpf.map ? SK_REDIRECT : SK_PASS) : SK_DROP;
130 }
131
132 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
133 {
134 struct sock *sk;
135 int rc;
136
137 rc = smap_verdict_func(psock, skb);
138 switch (rc) {
139 case SK_REDIRECT:
140 sk = do_sk_redirect_map(skb);
141 if (likely(sk)) {
142 struct smap_psock *peer = smap_psock_sk(sk);
143
144 if (likely(peer &&
145 test_bit(SMAP_TX_RUNNING, &peer->state) &&
146 !sock_flag(sk, SOCK_DEAD) &&
147 sock_writeable(sk))) {
148 skb_set_owner_w(skb, sk);
149 skb_queue_tail(&peer->rxqueue, skb);
150 schedule_work(&peer->tx_work);
151 break;
152 }
153 }
154 /* Fall through and free skb otherwise */
155 case SK_DROP:
156 default:
157 kfree_skb(skb);
158 }
159 }
160
161 static void smap_report_sk_error(struct smap_psock *psock, int err)
162 {
163 struct sock *sk = psock->sock;
164
165 sk->sk_err = err;
166 sk->sk_error_report(sk);
167 }
168
169 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
170
171 /* Called with lock_sock(sk) held */
172 static void smap_state_change(struct sock *sk)
173 {
174 struct smap_psock_map_entry *e, *tmp;
175 struct smap_psock *psock;
176 struct socket_wq *wq;
177 struct sock *osk;
178
179 rcu_read_lock();
180
181 /* Allowing transitions into an established syn_recv states allows
182 * for early binding sockets to a smap object before the connection
183 * is established.
184 */
185 switch (sk->sk_state) {
186 case TCP_SYN_SENT:
187 case TCP_SYN_RECV:
188 case TCP_ESTABLISHED:
189 break;
190 case TCP_CLOSE_WAIT:
191 case TCP_CLOSING:
192 case TCP_LAST_ACK:
193 case TCP_FIN_WAIT1:
194 case TCP_FIN_WAIT2:
195 case TCP_LISTEN:
196 break;
197 case TCP_CLOSE:
198 /* Only release if the map entry is in fact the sock in
199 * question. There is a case where the operator deletes
200 * the sock from the map, but the TCP sock is closed before
201 * the psock is detached. Use cmpxchg to verify correct
202 * sock is removed.
203 */
204 psock = smap_psock_sk(sk);
205 if (unlikely(!psock))
206 break;
207 write_lock_bh(&sk->sk_callback_lock);
208 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
209 osk = cmpxchg(e->entry, sk, NULL);
210 if (osk == sk) {
211 list_del(&e->list);
212 smap_release_sock(psock, sk);
213 }
214 }
215 write_unlock_bh(&sk->sk_callback_lock);
216 break;
217 default:
218 psock = smap_psock_sk(sk);
219 if (unlikely(!psock))
220 break;
221 smap_report_sk_error(psock, EPIPE);
222 break;
223 }
224
225 wq = rcu_dereference(sk->sk_wq);
226 if (skwq_has_sleeper(wq))
227 wake_up_interruptible_all(&wq->wait);
228 rcu_read_unlock();
229 }
230
231 static void smap_read_sock_strparser(struct strparser *strp,
232 struct sk_buff *skb)
233 {
234 struct smap_psock *psock;
235
236 rcu_read_lock();
237 psock = container_of(strp, struct smap_psock, strp);
238 smap_do_verdict(psock, skb);
239 rcu_read_unlock();
240 }
241
242 /* Called with lock held on socket */
243 static void smap_data_ready(struct sock *sk)
244 {
245 struct smap_psock *psock;
246
247 rcu_read_lock();
248 psock = smap_psock_sk(sk);
249 if (likely(psock)) {
250 write_lock_bh(&sk->sk_callback_lock);
251 strp_data_ready(&psock->strp);
252 write_unlock_bh(&sk->sk_callback_lock);
253 }
254 rcu_read_unlock();
255 }
256
257 static void smap_tx_work(struct work_struct *w)
258 {
259 struct smap_psock *psock;
260 struct sk_buff *skb;
261 int rem, off, n;
262
263 psock = container_of(w, struct smap_psock, tx_work);
264
265 /* lock sock to avoid losing sk_socket at some point during loop */
266 lock_sock(psock->sock);
267 if (psock->save_skb) {
268 skb = psock->save_skb;
269 rem = psock->save_rem;
270 off = psock->save_off;
271 psock->save_skb = NULL;
272 goto start;
273 }
274
275 while ((skb = skb_dequeue(&psock->rxqueue))) {
276 rem = skb->len;
277 off = 0;
278 start:
279 do {
280 if (likely(psock->sock->sk_socket))
281 n = skb_send_sock_locked(psock->sock,
282 skb, off, rem);
283 else
284 n = -EINVAL;
285 if (n <= 0) {
286 if (n == -EAGAIN) {
287 /* Retry when space is available */
288 psock->save_skb = skb;
289 psock->save_rem = rem;
290 psock->save_off = off;
291 goto out;
292 }
293 /* Hard errors break pipe and stop xmit */
294 smap_report_sk_error(psock, n ? -n : EPIPE);
295 clear_bit(SMAP_TX_RUNNING, &psock->state);
296 kfree_skb(skb);
297 goto out;
298 }
299 rem -= n;
300 off += n;
301 } while (rem);
302 kfree_skb(skb);
303 }
304 out:
305 release_sock(psock->sock);
306 }
307
308 static void smap_write_space(struct sock *sk)
309 {
310 struct smap_psock *psock;
311
312 rcu_read_lock();
313 psock = smap_psock_sk(sk);
314 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
315 schedule_work(&psock->tx_work);
316 rcu_read_unlock();
317 }
318
319 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
320 {
321 if (!psock->strp_enabled)
322 return;
323 sk->sk_data_ready = psock->save_data_ready;
324 sk->sk_write_space = psock->save_write_space;
325 sk->sk_state_change = psock->save_state_change;
326 psock->save_data_ready = NULL;
327 psock->save_write_space = NULL;
328 psock->save_state_change = NULL;
329 strp_stop(&psock->strp);
330 psock->strp_enabled = false;
331 }
332
333 static void smap_destroy_psock(struct rcu_head *rcu)
334 {
335 struct smap_psock *psock = container_of(rcu,
336 struct smap_psock, rcu);
337
338 /* Now that a grace period has passed there is no longer
339 * any reference to this sock in the sockmap so we can
340 * destroy the psock, strparser, and bpf programs. But,
341 * because we use workqueue sync operations we can not
342 * do it in rcu context
343 */
344 schedule_work(&psock->gc_work);
345 }
346
347 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
348 {
349 psock->refcnt--;
350 if (psock->refcnt)
351 return;
352
353 smap_stop_sock(psock, sock);
354 clear_bit(SMAP_TX_RUNNING, &psock->state);
355 rcu_assign_sk_user_data(sock, NULL);
356 call_rcu_sched(&psock->rcu, smap_destroy_psock);
357 }
358
359 static int smap_parse_func_strparser(struct strparser *strp,
360 struct sk_buff *skb)
361 {
362 struct smap_psock *psock;
363 struct bpf_prog *prog;
364 int rc;
365
366 rcu_read_lock();
367 psock = container_of(strp, struct smap_psock, strp);
368 prog = READ_ONCE(psock->bpf_parse);
369
370 if (unlikely(!prog)) {
371 rcu_read_unlock();
372 return skb->len;
373 }
374
375 /* Attach socket for bpf program to use if needed we can do this
376 * because strparser clones the skb before handing it to a upper
377 * layer, meaning skb_orphan has been called. We NULL sk on the
378 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
379 * later and because we are not charging the memory of this skb to
380 * any socket yet.
381 */
382 skb->sk = psock->sock;
383 bpf_compute_data_pointers(skb);
384 rc = (*prog->bpf_func)(skb, prog->insnsi);
385 skb->sk = NULL;
386 rcu_read_unlock();
387 return rc;
388 }
389
390
391 static int smap_read_sock_done(struct strparser *strp, int err)
392 {
393 return err;
394 }
395
396 static int smap_init_sock(struct smap_psock *psock,
397 struct sock *sk)
398 {
399 static const struct strp_callbacks cb = {
400 .rcv_msg = smap_read_sock_strparser,
401 .parse_msg = smap_parse_func_strparser,
402 .read_sock_done = smap_read_sock_done,
403 };
404
405 return strp_init(&psock->strp, sk, &cb);
406 }
407
408 static void smap_init_progs(struct smap_psock *psock,
409 struct bpf_stab *stab,
410 struct bpf_prog *verdict,
411 struct bpf_prog *parse)
412 {
413 struct bpf_prog *orig_parse, *orig_verdict;
414
415 orig_parse = xchg(&psock->bpf_parse, parse);
416 orig_verdict = xchg(&psock->bpf_verdict, verdict);
417
418 if (orig_verdict)
419 bpf_prog_put(orig_verdict);
420 if (orig_parse)
421 bpf_prog_put(orig_parse);
422 }
423
424 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
425 {
426 if (sk->sk_data_ready == smap_data_ready)
427 return;
428 psock->save_data_ready = sk->sk_data_ready;
429 psock->save_write_space = sk->sk_write_space;
430 psock->save_state_change = sk->sk_state_change;
431 sk->sk_data_ready = smap_data_ready;
432 sk->sk_write_space = smap_write_space;
433 sk->sk_state_change = smap_state_change;
434 psock->strp_enabled = true;
435 }
436
437 static void sock_map_remove_complete(struct bpf_stab *stab)
438 {
439 bpf_map_area_free(stab->sock_map);
440 kfree(stab);
441 }
442
443 static void smap_gc_work(struct work_struct *w)
444 {
445 struct smap_psock_map_entry *e, *tmp;
446 struct smap_psock *psock;
447
448 psock = container_of(w, struct smap_psock, gc_work);
449
450 /* no callback lock needed because we already detached sockmap ops */
451 if (psock->strp_enabled)
452 strp_done(&psock->strp);
453
454 cancel_work_sync(&psock->tx_work);
455 __skb_queue_purge(&psock->rxqueue);
456
457 /* At this point all strparser and xmit work must be complete */
458 if (psock->bpf_parse)
459 bpf_prog_put(psock->bpf_parse);
460 if (psock->bpf_verdict)
461 bpf_prog_put(psock->bpf_verdict);
462
463 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
464 list_del(&e->list);
465 kfree(e);
466 }
467
468 sock_put(psock->sock);
469 kfree(psock);
470 }
471
472 static struct smap_psock *smap_init_psock(struct sock *sock,
473 struct bpf_stab *stab)
474 {
475 struct smap_psock *psock;
476
477 psock = kzalloc_node(sizeof(struct smap_psock),
478 GFP_ATOMIC | __GFP_NOWARN,
479 stab->map.numa_node);
480 if (!psock)
481 return ERR_PTR(-ENOMEM);
482
483 psock->sock = sock;
484 skb_queue_head_init(&psock->rxqueue);
485 INIT_WORK(&psock->tx_work, smap_tx_work);
486 INIT_WORK(&psock->gc_work, smap_gc_work);
487 INIT_LIST_HEAD(&psock->maps);
488 psock->refcnt = 1;
489
490 rcu_assign_sk_user_data(sock, psock);
491 sock_hold(sock);
492 return psock;
493 }
494
495 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
496 {
497 struct bpf_stab *stab;
498 int err = -EINVAL;
499 u64 cost;
500
501 if (!capable(CAP_NET_ADMIN))
502 return ERR_PTR(-EPERM);
503
504 /* check sanity of attributes */
505 if (attr->max_entries == 0 || attr->key_size != 4 ||
506 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
507 return ERR_PTR(-EINVAL);
508
509 if (attr->value_size > KMALLOC_MAX_SIZE)
510 return ERR_PTR(-E2BIG);
511
512 stab = kzalloc(sizeof(*stab), GFP_USER);
513 if (!stab)
514 return ERR_PTR(-ENOMEM);
515
516 /* mandatory map attributes */
517 stab->map.map_type = attr->map_type;
518 stab->map.key_size = attr->key_size;
519 stab->map.value_size = attr->value_size;
520 stab->map.max_entries = attr->max_entries;
521 stab->map.map_flags = attr->map_flags;
522 stab->map.numa_node = bpf_map_attr_numa_node(attr);
523
524 /* make sure page count doesn't overflow */
525 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
526 if (cost >= U32_MAX - PAGE_SIZE)
527 goto free_stab;
528
529 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
530
531 /* if map size is larger than memlock limit, reject it early */
532 err = bpf_map_precharge_memlock(stab->map.pages);
533 if (err)
534 goto free_stab;
535
536 err = -ENOMEM;
537 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
538 sizeof(struct sock *),
539 stab->map.numa_node);
540 if (!stab->sock_map)
541 goto free_stab;
542
543 return &stab->map;
544 free_stab:
545 kfree(stab);
546 return ERR_PTR(err);
547 }
548
549 static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
550 {
551 struct smap_psock_map_entry *e, *tmp;
552
553 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
554 if (e->entry == entry) {
555 list_del(&e->list);
556 break;
557 }
558 }
559 }
560
561 static void sock_map_free(struct bpf_map *map)
562 {
563 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
564 int i;
565
566 synchronize_rcu();
567
568 /* At this point no update, lookup or delete operations can happen.
569 * However, be aware we can still get a socket state event updates,
570 * and data ready callabacks that reference the psock from sk_user_data
571 * Also psock worker threads are still in-flight. So smap_release_sock
572 * will only free the psock after cancel_sync on the worker threads
573 * and a grace period expire to ensure psock is really safe to remove.
574 */
575 rcu_read_lock();
576 for (i = 0; i < stab->map.max_entries; i++) {
577 struct smap_psock *psock;
578 struct sock *sock;
579
580 sock = xchg(&stab->sock_map[i], NULL);
581 if (!sock)
582 continue;
583
584 write_lock_bh(&sock->sk_callback_lock);
585 psock = smap_psock_sk(sock);
586 smap_list_remove(psock, &stab->sock_map[i]);
587 smap_release_sock(psock, sock);
588 write_unlock_bh(&sock->sk_callback_lock);
589 }
590 rcu_read_unlock();
591
592 if (stab->bpf_verdict)
593 bpf_prog_put(stab->bpf_verdict);
594 if (stab->bpf_parse)
595 bpf_prog_put(stab->bpf_parse);
596
597 sock_map_remove_complete(stab);
598 }
599
600 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
601 {
602 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
603 u32 i = key ? *(u32 *)key : U32_MAX;
604 u32 *next = (u32 *)next_key;
605
606 if (i >= stab->map.max_entries) {
607 *next = 0;
608 return 0;
609 }
610
611 if (i == stab->map.max_entries - 1)
612 return -ENOENT;
613
614 *next = i + 1;
615 return 0;
616 }
617
618 struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
619 {
620 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
621
622 if (key >= map->max_entries)
623 return NULL;
624
625 return READ_ONCE(stab->sock_map[key]);
626 }
627
628 static int sock_map_delete_elem(struct bpf_map *map, void *key)
629 {
630 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
631 struct smap_psock *psock;
632 int k = *(u32 *)key;
633 struct sock *sock;
634
635 if (k >= map->max_entries)
636 return -EINVAL;
637
638 sock = xchg(&stab->sock_map[k], NULL);
639 if (!sock)
640 return -EINVAL;
641
642 write_lock_bh(&sock->sk_callback_lock);
643 psock = smap_psock_sk(sock);
644 if (!psock)
645 goto out;
646
647 if (psock->bpf_parse)
648 smap_stop_sock(psock, sock);
649 smap_list_remove(psock, &stab->sock_map[k]);
650 smap_release_sock(psock, sock);
651 out:
652 write_unlock_bh(&sock->sk_callback_lock);
653 return 0;
654 }
655
656 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
657 * done inside rcu critical sections. This ensures on updates that the psock
658 * will not be released via smap_release_sock() until concurrent updates/deletes
659 * complete. All operations operate on sock_map using cmpxchg and xchg
660 * operations to ensure we do not get stale references. Any reads into the
661 * map must be done with READ_ONCE() because of this.
662 *
663 * A psock is destroyed via call_rcu and after any worker threads are cancelled
664 * and syncd so we are certain all references from the update/lookup/delete
665 * operations as well as references in the data path are no longer in use.
666 *
667 * Psocks may exist in multiple maps, but only a single set of parse/verdict
668 * programs may be inherited from the maps it belongs to. A reference count
669 * is kept with the total number of references to the psock from all maps. The
670 * psock will not be released until this reaches zero. The psock and sock
671 * user data data use the sk_callback_lock to protect critical data structures
672 * from concurrent access. This allows us to avoid two updates from modifying
673 * the user data in sock and the lock is required anyways for modifying
674 * callbacks, we simply increase its scope slightly.
675 *
676 * Rules to follow,
677 * - psock must always be read inside RCU critical section
678 * - sk_user_data must only be modified inside sk_callback_lock and read
679 * inside RCU critical section.
680 * - psock->maps list must only be read & modified inside sk_callback_lock
681 * - sock_map must use READ_ONCE and (cmp)xchg operations
682 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
683 */
684 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
685 struct bpf_map *map,
686 void *key, u64 flags)
687 {
688 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
689 struct smap_psock_map_entry *e = NULL;
690 struct bpf_prog *verdict, *parse;
691 struct sock *osock, *sock;
692 struct smap_psock *psock;
693 u32 i = *(u32 *)key;
694 int err;
695
696 if (unlikely(flags > BPF_EXIST))
697 return -EINVAL;
698
699 if (unlikely(i >= stab->map.max_entries))
700 return -E2BIG;
701
702 sock = READ_ONCE(stab->sock_map[i]);
703 if (flags == BPF_EXIST && !sock)
704 return -ENOENT;
705 else if (flags == BPF_NOEXIST && sock)
706 return -EEXIST;
707
708 sock = skops->sk;
709
710 /* 1. If sock map has BPF programs those will be inherited by the
711 * sock being added. If the sock is already attached to BPF programs
712 * this results in an error.
713 */
714 verdict = READ_ONCE(stab->bpf_verdict);
715 parse = READ_ONCE(stab->bpf_parse);
716
717 if (parse && verdict) {
718 /* bpf prog refcnt may be zero if a concurrent attach operation
719 * removes the program after the above READ_ONCE() but before
720 * we increment the refcnt. If this is the case abort with an
721 * error.
722 */
723 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
724 if (IS_ERR(verdict))
725 return PTR_ERR(verdict);
726
727 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
728 if (IS_ERR(parse)) {
729 bpf_prog_put(verdict);
730 return PTR_ERR(parse);
731 }
732 }
733
734 write_lock_bh(&sock->sk_callback_lock);
735 psock = smap_psock_sk(sock);
736
737 /* 2. Do not allow inheriting programs if psock exists and has
738 * already inherited programs. This would create confusion on
739 * which parser/verdict program is running. If no psock exists
740 * create one. Inside sk_callback_lock to ensure concurrent create
741 * doesn't update user data.
742 */
743 if (psock) {
744 if (READ_ONCE(psock->bpf_parse) && parse) {
745 err = -EBUSY;
746 goto out_progs;
747 }
748 psock->refcnt++;
749 } else {
750 psock = smap_init_psock(sock, stab);
751 if (IS_ERR(psock)) {
752 err = PTR_ERR(psock);
753 goto out_progs;
754 }
755
756 set_bit(SMAP_TX_RUNNING, &psock->state);
757 }
758
759 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
760 if (!e) {
761 err = -ENOMEM;
762 goto out_progs;
763 }
764 e->entry = &stab->sock_map[i];
765
766 /* 3. At this point we have a reference to a valid psock that is
767 * running. Attach any BPF programs needed.
768 */
769 if (parse && verdict && !psock->strp_enabled) {
770 err = smap_init_sock(psock, sock);
771 if (err)
772 goto out_free;
773 smap_init_progs(psock, stab, verdict, parse);
774 smap_start_sock(psock, sock);
775 }
776
777 /* 4. Place psock in sockmap for use and stop any programs on
778 * the old sock assuming its not the same sock we are replacing
779 * it with. Because we can only have a single set of programs if
780 * old_sock has a strp we can stop it.
781 */
782 list_add_tail(&e->list, &psock->maps);
783 write_unlock_bh(&sock->sk_callback_lock);
784
785 osock = xchg(&stab->sock_map[i], sock);
786 if (osock) {
787 struct smap_psock *opsock = smap_psock_sk(osock);
788
789 write_lock_bh(&osock->sk_callback_lock);
790 if (osock != sock && parse)
791 smap_stop_sock(opsock, osock);
792 smap_list_remove(opsock, &stab->sock_map[i]);
793 smap_release_sock(opsock, osock);
794 write_unlock_bh(&osock->sk_callback_lock);
795 }
796 return 0;
797 out_free:
798 smap_release_sock(psock, sock);
799 out_progs:
800 if (verdict)
801 bpf_prog_put(verdict);
802 if (parse)
803 bpf_prog_put(parse);
804 write_unlock_bh(&sock->sk_callback_lock);
805 kfree(e);
806 return err;
807 }
808
809 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
810 {
811 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
812 struct bpf_prog *orig;
813
814 if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
815 return -EINVAL;
816
817 switch (type) {
818 case BPF_SK_SKB_STREAM_PARSER:
819 orig = xchg(&stab->bpf_parse, prog);
820 break;
821 case BPF_SK_SKB_STREAM_VERDICT:
822 orig = xchg(&stab->bpf_verdict, prog);
823 break;
824 default:
825 return -EOPNOTSUPP;
826 }
827
828 if (orig)
829 bpf_prog_put(orig);
830
831 return 0;
832 }
833
834 static void *sock_map_lookup(struct bpf_map *map, void *key)
835 {
836 return NULL;
837 }
838
839 static int sock_map_update_elem(struct bpf_map *map,
840 void *key, void *value, u64 flags)
841 {
842 struct bpf_sock_ops_kern skops;
843 u32 fd = *(u32 *)value;
844 struct socket *socket;
845 int err;
846
847 socket = sockfd_lookup(fd, &err);
848 if (!socket)
849 return err;
850
851 skops.sk = socket->sk;
852 if (!skops.sk) {
853 fput(socket->file);
854 return -EINVAL;
855 }
856
857 if (skops.sk->sk_type != SOCK_STREAM ||
858 skops.sk->sk_protocol != IPPROTO_TCP) {
859 fput(socket->file);
860 return -EOPNOTSUPP;
861 }
862
863 err = sock_map_ctx_update_elem(&skops, map, key, flags);
864 fput(socket->file);
865 return err;
866 }
867
868 const struct bpf_map_ops sock_map_ops = {
869 .map_alloc = sock_map_alloc,
870 .map_free = sock_map_free,
871 .map_lookup_elem = sock_map_lookup,
872 .map_get_next_key = sock_map_get_next_key,
873 .map_update_elem = sock_map_update_elem,
874 .map_delete_elem = sock_map_delete_elem,
875 };
876
877 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
878 struct bpf_map *, map, void *, key, u64, flags)
879 {
880 WARN_ON_ONCE(!rcu_read_lock_held());
881 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
882 }
883
884 const struct bpf_func_proto bpf_sock_map_update_proto = {
885 .func = bpf_sock_map_update,
886 .gpl_only = false,
887 .pkt_access = true,
888 .ret_type = RET_INTEGER,
889 .arg1_type = ARG_PTR_TO_CTX,
890 .arg2_type = ARG_CONST_MAP_PTR,
891 .arg3_type = ARG_PTR_TO_MAP_KEY,
892 .arg4_type = ARG_ANYTHING,
893 };