]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/bpf/sockmap.c
UBUNTU: Start new release
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / sockmap.c
CommitLineData
174a79ff
JF
1/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12
13/* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
15 *
2f857d04
JF
16 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
21 * redirect.
22 *
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
174a79ff
JF
26 *
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
30 */
31#include <linux/bpf.h>
32#include <net/sock.h>
33#include <linux/filter.h>
34#include <linux/errno.h>
35#include <linux/file.h>
36#include <linux/kernel.h>
37#include <linux/net.h>
38#include <linux/skbuff.h>
39#include <linux/workqueue.h>
40#include <linux/list.h>
41#include <net/strparser.h>
34f79502 42#include <net/tcp.h>
174a79ff 43
6e71b04a
CF
44#define SOCK_CREATE_FLAG_MASK \
45 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
46
174a79ff
JF
47struct bpf_stab {
48 struct bpf_map map;
49 struct sock **sock_map;
50 struct bpf_prog *bpf_parse;
51 struct bpf_prog *bpf_verdict;
174a79ff
JF
52};
53
54enum smap_psock_state {
55 SMAP_TX_RUNNING,
56};
57
2f857d04
JF
58struct smap_psock_map_entry {
59 struct list_head list;
60 struct sock **entry;
61};
62
174a79ff
JF
63struct smap_psock {
64 struct rcu_head rcu;
2f857d04
JF
65 /* refcnt is used inside sk_callback_lock */
66 u32 refcnt;
174a79ff
JF
67
68 /* datapath variables */
69 struct sk_buff_head rxqueue;
70 bool strp_enabled;
71
72 /* datapath error path cache across tx work invocations */
73 int save_rem;
74 int save_off;
75 struct sk_buff *save_skb;
76
77 struct strparser strp;
78 struct bpf_prog *bpf_parse;
79 struct bpf_prog *bpf_verdict;
2f857d04 80 struct list_head maps;
174a79ff
JF
81
82 /* Back reference used when sock callback trigger sockmap operations */
174a79ff
JF
83 struct sock *sock;
84 unsigned long state;
85
86 struct work_struct tx_work;
87 struct work_struct gc_work;
88
89 void (*save_data_ready)(struct sock *sk);
90 void (*save_write_space)(struct sock *sk);
91 void (*save_state_change)(struct sock *sk);
92};
93
94static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
95{
2f857d04 96 return rcu_dereference_sk_user_data(sk);
174a79ff
JF
97}
98
8108a775
JF
99/* compute the linear packet data range [data, data_end) for skb when
100 * sk_skb type programs are in use.
101 */
102static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
103{
104 TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
105}
106
04686ef2
JF
107enum __sk_action {
108 __SK_DROP = 0,
109 __SK_PASS,
110 __SK_REDIRECT,
111};
112
174a79ff
JF
113static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
114{
115 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
116 int rc;
117
118 if (unlikely(!prog))
04686ef2 119 return __SK_DROP;
174a79ff
JF
120
121 skb_orphan(skb);
34f79502
JF
122 /* We need to ensure that BPF metadata for maps is also cleared
123 * when we orphan the skb so that we don't have the possibility
124 * to reference a stale map.
125 */
126 TCP_SKB_CB(skb)->bpf.map = NULL;
174a79ff 127 skb->sk = psock->sock;
6aaae2b6 128 bpf_compute_data_pointers(skb);
34f79502 129 preempt_disable();
174a79ff 130 rc = (*prog->bpf_func)(skb, prog->insnsi);
34f79502 131 preempt_enable();
174a79ff
JF
132 skb->sk = NULL;
133
04686ef2 134 /* Moving return codes from UAPI namespace into internal namespace */
bfa64075 135 return rc == SK_PASS ?
04686ef2
JF
136 (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
137 __SK_DROP;
174a79ff
JF
138}
139
140static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
141{
90a9631c 142 struct sock *sk;
174a79ff
JF
143 int rc;
144
174a79ff
JF
145 rc = smap_verdict_func(psock, skb);
146 switch (rc) {
04686ef2 147 case __SK_REDIRECT:
34f79502 148 sk = do_sk_redirect_map(skb);
90a9631c
JF
149 if (likely(sk)) {
150 struct smap_psock *peer = smap_psock_sk(sk);
174a79ff
JF
151
152 if (likely(peer &&
153 test_bit(SMAP_TX_RUNNING, &peer->state) &&
90a9631c
JF
154 !sock_flag(sk, SOCK_DEAD) &&
155 sock_writeable(sk))) {
156 skb_set_owner_w(skb, sk);
174a79ff
JF
157 skb_queue_tail(&peer->rxqueue, skb);
158 schedule_work(&peer->tx_work);
159 break;
160 }
161 }
162 /* Fall through and free skb otherwise */
04686ef2 163 case __SK_DROP:
174a79ff 164 default:
174a79ff
JF
165 kfree_skb(skb);
166 }
167}
168
169static void smap_report_sk_error(struct smap_psock *psock, int err)
170{
171 struct sock *sk = psock->sock;
172
173 sk->sk_err = err;
174 sk->sk_error_report(sk);
175}
176
2f857d04 177static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
174a79ff
JF
178
179/* Called with lock_sock(sk) held */
180static void smap_state_change(struct sock *sk)
181{
2f857d04 182 struct smap_psock_map_entry *e, *tmp;
174a79ff 183 struct smap_psock *psock;
78aeaaef 184 struct socket_wq *wq;
174a79ff
JF
185 struct sock *osk;
186
187 rcu_read_lock();
188
189 /* Allowing transitions into an established syn_recv states allows
190 * for early binding sockets to a smap object before the connection
191 * is established.
192 */
193 switch (sk->sk_state) {
78aeaaef 194 case TCP_SYN_SENT:
174a79ff
JF
195 case TCP_SYN_RECV:
196 case TCP_ESTABLISHED:
197 break;
198 case TCP_CLOSE_WAIT:
199 case TCP_CLOSING:
200 case TCP_LAST_ACK:
201 case TCP_FIN_WAIT1:
202 case TCP_FIN_WAIT2:
203 case TCP_LISTEN:
204 break;
205 case TCP_CLOSE:
206 /* Only release if the map entry is in fact the sock in
207 * question. There is a case where the operator deletes
208 * the sock from the map, but the TCP sock is closed before
209 * the psock is detached. Use cmpxchg to verify correct
210 * sock is removed.
211 */
212 psock = smap_psock_sk(sk);
213 if (unlikely(!psock))
214 break;
2f857d04
JF
215 write_lock_bh(&sk->sk_callback_lock);
216 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
217 osk = cmpxchg(e->entry, sk, NULL);
218 if (osk == sk) {
219 list_del(&e->list);
220 smap_release_sock(psock, sk);
221 }
222 }
223 write_unlock_bh(&sk->sk_callback_lock);
174a79ff
JF
224 break;
225 default:
cf56e3b9
JF
226 psock = smap_psock_sk(sk);
227 if (unlikely(!psock))
228 break;
174a79ff
JF
229 smap_report_sk_error(psock, EPIPE);
230 break;
231 }
78aeaaef
JF
232
233 wq = rcu_dereference(sk->sk_wq);
234 if (skwq_has_sleeper(wq))
235 wake_up_interruptible_all(&wq->wait);
174a79ff
JF
236 rcu_read_unlock();
237}
238
239static void smap_read_sock_strparser(struct strparser *strp,
240 struct sk_buff *skb)
241{
242 struct smap_psock *psock;
243
244 rcu_read_lock();
245 psock = container_of(strp, struct smap_psock, strp);
246 smap_do_verdict(psock, skb);
247 rcu_read_unlock();
248}
249
250/* Called with lock held on socket */
251static void smap_data_ready(struct sock *sk)
252{
253 struct smap_psock *psock;
254
d26e597d 255 rcu_read_lock();
174a79ff 256 psock = smap_psock_sk(sk);
d26e597d
JF
257 if (likely(psock)) {
258 write_lock_bh(&sk->sk_callback_lock);
174a79ff 259 strp_data_ready(&psock->strp);
d26e597d
JF
260 write_unlock_bh(&sk->sk_callback_lock);
261 }
262 rcu_read_unlock();
174a79ff
JF
263}
264
265static void smap_tx_work(struct work_struct *w)
266{
267 struct smap_psock *psock;
268 struct sk_buff *skb;
269 int rem, off, n;
270
271 psock = container_of(w, struct smap_psock, tx_work);
272
273 /* lock sock to avoid losing sk_socket at some point during loop */
274 lock_sock(psock->sock);
275 if (psock->save_skb) {
276 skb = psock->save_skb;
277 rem = psock->save_rem;
278 off = psock->save_off;
279 psock->save_skb = NULL;
280 goto start;
281 }
282
283 while ((skb = skb_dequeue(&psock->rxqueue))) {
284 rem = skb->len;
285 off = 0;
286start:
287 do {
288 if (likely(psock->sock->sk_socket))
289 n = skb_send_sock_locked(psock->sock,
290 skb, off, rem);
291 else
292 n = -EINVAL;
293 if (n <= 0) {
294 if (n == -EAGAIN) {
295 /* Retry when space is available */
296 psock->save_skb = skb;
297 psock->save_rem = rem;
298 psock->save_off = off;
299 goto out;
300 }
301 /* Hard errors break pipe and stop xmit */
302 smap_report_sk_error(psock, n ? -n : EPIPE);
303 clear_bit(SMAP_TX_RUNNING, &psock->state);
174a79ff
JF
304 kfree_skb(skb);
305 goto out;
306 }
307 rem -= n;
308 off += n;
309 } while (rem);
174a79ff
JF
310 kfree_skb(skb);
311 }
312out:
313 release_sock(psock->sock);
314}
315
316static void smap_write_space(struct sock *sk)
317{
318 struct smap_psock *psock;
319
320 rcu_read_lock();
321 psock = smap_psock_sk(sk);
322 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
323 schedule_work(&psock->tx_work);
324 rcu_read_unlock();
325}
326
327static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
328{
174a79ff 329 if (!psock->strp_enabled)
2f857d04 330 return;
174a79ff
JF
331 sk->sk_data_ready = psock->save_data_ready;
332 sk->sk_write_space = psock->save_write_space;
333 sk->sk_state_change = psock->save_state_change;
334 psock->save_data_ready = NULL;
335 psock->save_write_space = NULL;
336 psock->save_state_change = NULL;
337 strp_stop(&psock->strp);
338 psock->strp_enabled = false;
174a79ff
JF
339}
340
341static void smap_destroy_psock(struct rcu_head *rcu)
342{
343 struct smap_psock *psock = container_of(rcu,
344 struct smap_psock, rcu);
345
346 /* Now that a grace period has passed there is no longer
347 * any reference to this sock in the sockmap so we can
348 * destroy the psock, strparser, and bpf programs. But,
349 * because we use workqueue sync operations we can not
350 * do it in rcu context
351 */
352 schedule_work(&psock->gc_work);
353}
354
2f857d04 355static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
174a79ff 356{
2f857d04
JF
357 psock->refcnt--;
358 if (psock->refcnt)
359 return;
174a79ff
JF
360
361 smap_stop_sock(psock, sock);
362 clear_bit(SMAP_TX_RUNNING, &psock->state);
363 rcu_assign_sk_user_data(sock, NULL);
364 call_rcu_sched(&psock->rcu, smap_destroy_psock);
365}
366
367static int smap_parse_func_strparser(struct strparser *strp,
368 struct sk_buff *skb)
369{
370 struct smap_psock *psock;
371 struct bpf_prog *prog;
372 int rc;
373
374 rcu_read_lock();
375 psock = container_of(strp, struct smap_psock, strp);
376 prog = READ_ONCE(psock->bpf_parse);
377
378 if (unlikely(!prog)) {
379 rcu_read_unlock();
380 return skb->len;
381 }
382
383 /* Attach socket for bpf program to use if needed we can do this
384 * because strparser clones the skb before handing it to a upper
385 * layer, meaning skb_orphan has been called. We NULL sk on the
386 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
387 * later and because we are not charging the memory of this skb to
388 * any socket yet.
389 */
390 skb->sk = psock->sock;
6aaae2b6 391 bpf_compute_data_pointers(skb);
174a79ff
JF
392 rc = (*prog->bpf_func)(skb, prog->insnsi);
393 skb->sk = NULL;
394 rcu_read_unlock();
395 return rc;
396}
397
398
399static int smap_read_sock_done(struct strparser *strp, int err)
400{
401 return err;
402}
403
404static int smap_init_sock(struct smap_psock *psock,
405 struct sock *sk)
406{
3fd87127
EB
407 static const struct strp_callbacks cb = {
408 .rcv_msg = smap_read_sock_strparser,
409 .parse_msg = smap_parse_func_strparser,
410 .read_sock_done = smap_read_sock_done,
411 };
174a79ff 412
174a79ff
JF
413 return strp_init(&psock->strp, sk, &cb);
414}
415
416static void smap_init_progs(struct smap_psock *psock,
417 struct bpf_stab *stab,
418 struct bpf_prog *verdict,
419 struct bpf_prog *parse)
420{
421 struct bpf_prog *orig_parse, *orig_verdict;
422
423 orig_parse = xchg(&psock->bpf_parse, parse);
424 orig_verdict = xchg(&psock->bpf_verdict, verdict);
425
426 if (orig_verdict)
427 bpf_prog_put(orig_verdict);
428 if (orig_parse)
429 bpf_prog_put(orig_parse);
430}
431
432static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
433{
434 if (sk->sk_data_ready == smap_data_ready)
435 return;
436 psock->save_data_ready = sk->sk_data_ready;
437 psock->save_write_space = sk->sk_write_space;
438 psock->save_state_change = sk->sk_state_change;
439 sk->sk_data_ready = smap_data_ready;
440 sk->sk_write_space = smap_write_space;
441 sk->sk_state_change = smap_state_change;
442 psock->strp_enabled = true;
443}
444
445static void sock_map_remove_complete(struct bpf_stab *stab)
446{
447 bpf_map_area_free(stab->sock_map);
448 kfree(stab);
449}
450
451static void smap_gc_work(struct work_struct *w)
452{
2f857d04 453 struct smap_psock_map_entry *e, *tmp;
174a79ff
JF
454 struct smap_psock *psock;
455
456 psock = container_of(w, struct smap_psock, gc_work);
457
458 /* no callback lock needed because we already detached sockmap ops */
459 if (psock->strp_enabled)
460 strp_done(&psock->strp);
461
462 cancel_work_sync(&psock->tx_work);
463 __skb_queue_purge(&psock->rxqueue);
464
465 /* At this point all strparser and xmit work must be complete */
466 if (psock->bpf_parse)
467 bpf_prog_put(psock->bpf_parse);
468 if (psock->bpf_verdict)
469 bpf_prog_put(psock->bpf_verdict);
470
2f857d04
JF
471 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
472 list_del(&e->list);
473 kfree(e);
474 }
174a79ff
JF
475
476 sock_put(psock->sock);
477 kfree(psock);
478}
479
480static struct smap_psock *smap_init_psock(struct sock *sock,
481 struct bpf_stab *stab)
482{
483 struct smap_psock *psock;
484
96eabe7a
MKL
485 psock = kzalloc_node(sizeof(struct smap_psock),
486 GFP_ATOMIC | __GFP_NOWARN,
487 stab->map.numa_node);
174a79ff
JF
488 if (!psock)
489 return ERR_PTR(-ENOMEM);
490
491 psock->sock = sock;
492 skb_queue_head_init(&psock->rxqueue);
493 INIT_WORK(&psock->tx_work, smap_tx_work);
494 INIT_WORK(&psock->gc_work, smap_gc_work);
2f857d04
JF
495 INIT_LIST_HEAD(&psock->maps);
496 psock->refcnt = 1;
174a79ff
JF
497
498 rcu_assign_sk_user_data(sock, psock);
499 sock_hold(sock);
500 return psock;
501}
502
503static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
504{
505 struct bpf_stab *stab;
506 int err = -EINVAL;
507 u64 cost;
508
fb50df8d
JF
509 if (!capable(CAP_NET_ADMIN))
510 return ERR_PTR(-EPERM);
511
174a79ff
JF
512 /* check sanity of attributes */
513 if (attr->max_entries == 0 || attr->key_size != 4 ||
6e71b04a 514 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
174a79ff
JF
515 return ERR_PTR(-EINVAL);
516
517 if (attr->value_size > KMALLOC_MAX_SIZE)
518 return ERR_PTR(-E2BIG);
519
520 stab = kzalloc(sizeof(*stab), GFP_USER);
521 if (!stab)
522 return ERR_PTR(-ENOMEM);
523
524 /* mandatory map attributes */
525 stab->map.map_type = attr->map_type;
526 stab->map.key_size = attr->key_size;
527 stab->map.value_size = attr->value_size;
528 stab->map.max_entries = attr->max_entries;
529 stab->map.map_flags = attr->map_flags;
96eabe7a 530 stab->map.numa_node = bpf_map_attr_numa_node(attr);
174a79ff
JF
531
532 /* make sure page count doesn't overflow */
533 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
534 if (cost >= U32_MAX - PAGE_SIZE)
535 goto free_stab;
536
537 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
538
539 /* if map size is larger than memlock limit, reject it early */
540 err = bpf_map_precharge_memlock(stab->map.pages);
541 if (err)
542 goto free_stab;
543
f740c34e 544 err = -ENOMEM;
174a79ff 545 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
96eabe7a
MKL
546 sizeof(struct sock *),
547 stab->map.numa_node);
174a79ff
JF
548 if (!stab->sock_map)
549 goto free_stab;
550
174a79ff
JF
551 return &stab->map;
552free_stab:
553 kfree(stab);
554 return ERR_PTR(err);
555}
556
2f857d04
JF
557static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
558{
559 struct smap_psock_map_entry *e, *tmp;
560
561 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
562 if (e->entry == entry) {
563 list_del(&e->list);
564 break;
565 }
566 }
567}
568
174a79ff
JF
569static void sock_map_free(struct bpf_map *map)
570{
571 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
572 int i;
573
574 synchronize_rcu();
575
576 /* At this point no update, lookup or delete operations can happen.
577 * However, be aware we can still get a socket state event updates,
578 * and data ready callabacks that reference the psock from sk_user_data
579 * Also psock worker threads are still in-flight. So smap_release_sock
580 * will only free the psock after cancel_sync on the worker threads
581 * and a grace period expire to ensure psock is really safe to remove.
582 */
583 rcu_read_lock();
584 for (i = 0; i < stab->map.max_entries; i++) {
2f857d04 585 struct smap_psock *psock;
174a79ff
JF
586 struct sock *sock;
587
588 sock = xchg(&stab->sock_map[i], NULL);
589 if (!sock)
590 continue;
591
2f857d04
JF
592 write_lock_bh(&sock->sk_callback_lock);
593 psock = smap_psock_sk(sock);
5731a879
JF
594 /* This check handles a racing sock event that can get the
595 * sk_callback_lock before this case but after xchg happens
596 * causing the refcnt to hit zero and sock user data (psock)
597 * to be null and queued for garbage collection.
598 */
599 if (likely(psock)) {
600 smap_list_remove(psock, &stab->sock_map[i]);
601 smap_release_sock(psock, sock);
602 }
2f857d04 603 write_unlock_bh(&sock->sk_callback_lock);
174a79ff
JF
604 }
605 rcu_read_unlock();
606
2f857d04 607 sock_map_remove_complete(stab);
174a79ff
JF
608}
609
610static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
611{
612 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
613 u32 i = key ? *(u32 *)key : U32_MAX;
614 u32 *next = (u32 *)next_key;
615
616 if (i >= stab->map.max_entries) {
617 *next = 0;
618 return 0;
619 }
620
621 if (i == stab->map.max_entries - 1)
622 return -ENOENT;
623
624 *next = i + 1;
625 return 0;
626}
627
628struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
629{
630 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
631
632 if (key >= map->max_entries)
633 return NULL;
634
635 return READ_ONCE(stab->sock_map[key]);
636}
637
638static int sock_map_delete_elem(struct bpf_map *map, void *key)
639{
640 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
2f857d04 641 struct smap_psock *psock;
174a79ff
JF
642 int k = *(u32 *)key;
643 struct sock *sock;
644
645 if (k >= map->max_entries)
646 return -EINVAL;
647
648 sock = xchg(&stab->sock_map[k], NULL);
649 if (!sock)
650 return -EINVAL;
651
2f857d04
JF
652 write_lock_bh(&sock->sk_callback_lock);
653 psock = smap_psock_sk(sock);
654 if (!psock)
655 goto out;
656
657 if (psock->bpf_parse)
658 smap_stop_sock(psock, sock);
659 smap_list_remove(psock, &stab->sock_map[k]);
660 smap_release_sock(psock, sock);
661out:
662 write_unlock_bh(&sock->sk_callback_lock);
174a79ff
JF
663 return 0;
664}
665
666/* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
667 * done inside rcu critical sections. This ensures on updates that the psock
668 * will not be released via smap_release_sock() until concurrent updates/deletes
669 * complete. All operations operate on sock_map using cmpxchg and xchg
670 * operations to ensure we do not get stale references. Any reads into the
671 * map must be done with READ_ONCE() because of this.
672 *
673 * A psock is destroyed via call_rcu and after any worker threads are cancelled
674 * and syncd so we are certain all references from the update/lookup/delete
675 * operations as well as references in the data path are no longer in use.
676 *
2f857d04
JF
677 * Psocks may exist in multiple maps, but only a single set of parse/verdict
678 * programs may be inherited from the maps it belongs to. A reference count
679 * is kept with the total number of references to the psock from all maps. The
680 * psock will not be released until this reaches zero. The psock and sock
681 * user data data use the sk_callback_lock to protect critical data structures
682 * from concurrent access. This allows us to avoid two updates from modifying
683 * the user data in sock and the lock is required anyways for modifying
684 * callbacks, we simply increase its scope slightly.
174a79ff 685 *
2f857d04
JF
686 * Rules to follow,
687 * - psock must always be read inside RCU critical section
688 * - sk_user_data must only be modified inside sk_callback_lock and read
689 * inside RCU critical section.
690 * - psock->maps list must only be read & modified inside sk_callback_lock
691 * - sock_map must use READ_ONCE and (cmp)xchg operations
692 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
174a79ff
JF
693 */
694static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
695 struct bpf_map *map,
2f857d04 696 void *key, u64 flags)
174a79ff
JF
697{
698 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
2f857d04 699 struct smap_psock_map_entry *e = NULL;
174a79ff 700 struct bpf_prog *verdict, *parse;
2f857d04
JF
701 struct sock *osock, *sock;
702 struct smap_psock *psock;
174a79ff 703 u32 i = *(u32 *)key;
2f857d04 704 int err;
174a79ff
JF
705
706 if (unlikely(flags > BPF_EXIST))
707 return -EINVAL;
708
709 if (unlikely(i >= stab->map.max_entries))
710 return -E2BIG;
711
174a79ff 712 sock = READ_ONCE(stab->sock_map[i]);
2f857d04
JF
713 if (flags == BPF_EXIST && !sock)
714 return -ENOENT;
715 else if (flags == BPF_NOEXIST && sock)
174a79ff 716 return -EEXIST;
174a79ff 717
2f857d04 718 sock = skops->sk;
174a79ff 719
2f857d04
JF
720 /* 1. If sock map has BPF programs those will be inherited by the
721 * sock being added. If the sock is already attached to BPF programs
722 * this results in an error.
723 */
724 verdict = READ_ONCE(stab->bpf_verdict);
725 parse = READ_ONCE(stab->bpf_parse);
174a79ff 726
2f857d04 727 if (parse && verdict) {
174a79ff
JF
728 /* bpf prog refcnt may be zero if a concurrent attach operation
729 * removes the program after the above READ_ONCE() but before
730 * we increment the refcnt. If this is the case abort with an
731 * error.
732 */
733 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
734 if (IS_ERR(verdict))
735 return PTR_ERR(verdict);
736
737 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
738 if (IS_ERR(parse)) {
739 bpf_prog_put(verdict);
740 return PTR_ERR(parse);
741 }
742 }
743
2f857d04
JF
744 write_lock_bh(&sock->sk_callback_lock);
745 psock = smap_psock_sk(sock);
746
747 /* 2. Do not allow inheriting programs if psock exists and has
748 * already inherited programs. This would create confusion on
749 * which parser/verdict program is running. If no psock exists
750 * create one. Inside sk_callback_lock to ensure concurrent create
751 * doesn't update user data.
752 */
753 if (psock) {
754 if (READ_ONCE(psock->bpf_parse) && parse) {
755 err = -EBUSY;
756 goto out_progs;
757 }
758 psock->refcnt++;
759 } else {
174a79ff
JF
760 psock = smap_init_psock(sock, stab);
761 if (IS_ERR(psock)) {
2f857d04
JF
762 err = PTR_ERR(psock);
763 goto out_progs;
174a79ff 764 }
2f857d04 765
174a79ff
JF
766 set_bit(SMAP_TX_RUNNING, &psock->state);
767 }
768
2f857d04
JF
769 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
770 if (!e) {
771 err = -ENOMEM;
772 goto out_progs;
773 }
774 e->entry = &stab->sock_map[i];
775
776 /* 3. At this point we have a reference to a valid psock that is
777 * running. Attach any BPF programs needed.
778 */
779 if (parse && verdict && !psock->strp_enabled) {
174a79ff
JF
780 err = smap_init_sock(psock, sock);
781 if (err)
2f857d04 782 goto out_free;
174a79ff
JF
783 smap_init_progs(psock, stab, verdict, parse);
784 smap_start_sock(psock, sock);
174a79ff
JF
785 }
786
2f857d04
JF
787 /* 4. Place psock in sockmap for use and stop any programs on
788 * the old sock assuming its not the same sock we are replacing
789 * it with. Because we can only have a single set of programs if
790 * old_sock has a strp we can stop it.
791 */
792 list_add_tail(&e->list, &psock->maps);
793 write_unlock_bh(&sock->sk_callback_lock);
174a79ff 794
2f857d04
JF
795 osock = xchg(&stab->sock_map[i], sock);
796 if (osock) {
797 struct smap_psock *opsock = smap_psock_sk(osock);
798
799 write_lock_bh(&osock->sk_callback_lock);
800 if (osock != sock && parse)
801 smap_stop_sock(opsock, osock);
802 smap_list_remove(opsock, &stab->sock_map[i]);
803 smap_release_sock(opsock, osock);
804 write_unlock_bh(&osock->sk_callback_lock);
805 }
174a79ff 806 return 0;
2f857d04
JF
807out_free:
808 smap_release_sock(psock, sock);
809out_progs:
810 if (verdict)
811 bpf_prog_put(verdict);
812 if (parse)
813 bpf_prog_put(parse);
174a79ff 814 write_unlock_bh(&sock->sk_callback_lock);
2f857d04 815 kfree(e);
174a79ff
JF
816 return err;
817}
818
5a67da2a 819int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
174a79ff
JF
820{
821 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
464bc0fd 822 struct bpf_prog *orig;
174a79ff 823
81374aaa
JF
824 if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
825 return -EINVAL;
826
464bc0fd
JF
827 switch (type) {
828 case BPF_SK_SKB_STREAM_PARSER:
829 orig = xchg(&stab->bpf_parse, prog);
830 break;
831 case BPF_SK_SKB_STREAM_VERDICT:
832 orig = xchg(&stab->bpf_verdict, prog);
833 break;
834 default:
835 return -EOPNOTSUPP;
836 }
174a79ff 837
464bc0fd
JF
838 if (orig)
839 bpf_prog_put(orig);
174a79ff
JF
840
841 return 0;
842}
843
844static void *sock_map_lookup(struct bpf_map *map, void *key)
845{
846 return NULL;
847}
848
849static int sock_map_update_elem(struct bpf_map *map,
850 void *key, void *value, u64 flags)
851{
852 struct bpf_sock_ops_kern skops;
853 u32 fd = *(u32 *)value;
854 struct socket *socket;
855 int err;
856
857 socket = sockfd_lookup(fd, &err);
858 if (!socket)
859 return err;
860
861 skops.sk = socket->sk;
862 if (!skops.sk) {
863 fput(socket->file);
864 return -EINVAL;
865 }
866
435bf0d3
JF
867 if (skops.sk->sk_type != SOCK_STREAM ||
868 skops.sk->sk_protocol != IPPROTO_TCP) {
869 fput(socket->file);
870 return -EOPNOTSUPP;
871 }
872
2f857d04 873 err = sock_map_ctx_update_elem(&skops, map, key, flags);
174a79ff
JF
874 fput(socket->file);
875 return err;
876}
877
d256ab5b
JF
878static void sock_map_release(struct bpf_map *map, struct file *map_file)
879{
880 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
881 struct bpf_prog *orig;
882
883 orig = xchg(&stab->bpf_parse, NULL);
884 if (orig)
885 bpf_prog_put(orig);
886 orig = xchg(&stab->bpf_verdict, NULL);
887 if (orig)
888 bpf_prog_put(orig);
889}
890
174a79ff
JF
891const struct bpf_map_ops sock_map_ops = {
892 .map_alloc = sock_map_alloc,
893 .map_free = sock_map_free,
894 .map_lookup_elem = sock_map_lookup,
895 .map_get_next_key = sock_map_get_next_key,
896 .map_update_elem = sock_map_update_elem,
897 .map_delete_elem = sock_map_delete_elem,
d256ab5b 898 .map_release = sock_map_release,
174a79ff
JF
899};
900
2f857d04
JF
901BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
902 struct bpf_map *, map, void *, key, u64, flags)
174a79ff
JF
903{
904 WARN_ON_ONCE(!rcu_read_lock_held());
2f857d04 905 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
174a79ff
JF
906}
907
908const struct bpf_func_proto bpf_sock_map_update_proto = {
909 .func = bpf_sock_map_update,
910 .gpl_only = false,
911 .pkt_access = true,
912 .ret_type = RET_INTEGER,
913 .arg1_type = ARG_PTR_TO_CTX,
914 .arg2_type = ARG_CONST_MAP_PTR,
915 .arg3_type = ARG_PTR_TO_MAP_KEY,
916 .arg4_type = ARG_ANYTHING,
174a79ff 917};