]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/bpf/sockmap.c
bpf: remove mark access for SK_SKB program types
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / sockmap.c
CommitLineData
174a79ff
JF
1/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12
13/* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
15 *
2f857d04
JF
16 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
21 * redirect.
22 *
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
174a79ff
JF
26 *
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
30 */
31#include <linux/bpf.h>
32#include <net/sock.h>
33#include <linux/filter.h>
34#include <linux/errno.h>
35#include <linux/file.h>
36#include <linux/kernel.h>
37#include <linux/net.h>
38#include <linux/skbuff.h>
39#include <linux/workqueue.h>
40#include <linux/list.h>
41#include <net/strparser.h>
34f79502 42#include <net/tcp.h>
174a79ff
JF
43
44struct bpf_stab {
45 struct bpf_map map;
46 struct sock **sock_map;
47 struct bpf_prog *bpf_parse;
48 struct bpf_prog *bpf_verdict;
174a79ff
JF
49};
50
51enum smap_psock_state {
52 SMAP_TX_RUNNING,
53};
54
2f857d04
JF
55struct smap_psock_map_entry {
56 struct list_head list;
57 struct sock **entry;
58};
59
174a79ff
JF
60struct smap_psock {
61 struct rcu_head rcu;
2f857d04
JF
62 /* refcnt is used inside sk_callback_lock */
63 u32 refcnt;
174a79ff
JF
64
65 /* datapath variables */
66 struct sk_buff_head rxqueue;
67 bool strp_enabled;
68
69 /* datapath error path cache across tx work invocations */
70 int save_rem;
71 int save_off;
72 struct sk_buff *save_skb;
73
74 struct strparser strp;
75 struct bpf_prog *bpf_parse;
76 struct bpf_prog *bpf_verdict;
2f857d04 77 struct list_head maps;
174a79ff
JF
78
79 /* Back reference used when sock callback trigger sockmap operations */
174a79ff
JF
80 struct sock *sock;
81 unsigned long state;
82
83 struct work_struct tx_work;
84 struct work_struct gc_work;
85
86 void (*save_data_ready)(struct sock *sk);
87 void (*save_write_space)(struct sock *sk);
88 void (*save_state_change)(struct sock *sk);
89};
90
91static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
92{
2f857d04 93 return rcu_dereference_sk_user_data(sk);
174a79ff
JF
94}
95
96static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
97{
98 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
99 int rc;
100
101 if (unlikely(!prog))
102 return SK_DROP;
103
104 skb_orphan(skb);
34f79502
JF
105 /* We need to ensure that BPF metadata for maps is also cleared
106 * when we orphan the skb so that we don't have the possibility
107 * to reference a stale map.
108 */
109 TCP_SKB_CB(skb)->bpf.map = NULL;
174a79ff
JF
110 skb->sk = psock->sock;
111 bpf_compute_data_end(skb);
34f79502 112 preempt_disable();
174a79ff 113 rc = (*prog->bpf_func)(skb, prog->insnsi);
34f79502 114 preempt_enable();
174a79ff
JF
115 skb->sk = NULL;
116
117 return rc;
118}
119
120static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
121{
90a9631c 122 struct sock *sk;
174a79ff
JF
123 int rc;
124
174a79ff
JF
125 rc = smap_verdict_func(psock, skb);
126 switch (rc) {
127 case SK_REDIRECT:
34f79502 128 sk = do_sk_redirect_map(skb);
90a9631c
JF
129 if (likely(sk)) {
130 struct smap_psock *peer = smap_psock_sk(sk);
174a79ff
JF
131
132 if (likely(peer &&
133 test_bit(SMAP_TX_RUNNING, &peer->state) &&
90a9631c
JF
134 !sock_flag(sk, SOCK_DEAD) &&
135 sock_writeable(sk))) {
136 skb_set_owner_w(skb, sk);
174a79ff
JF
137 skb_queue_tail(&peer->rxqueue, skb);
138 schedule_work(&peer->tx_work);
139 break;
140 }
141 }
142 /* Fall through and free skb otherwise */
143 case SK_DROP:
144 default:
174a79ff
JF
145 kfree_skb(skb);
146 }
147}
148
149static void smap_report_sk_error(struct smap_psock *psock, int err)
150{
151 struct sock *sk = psock->sock;
152
153 sk->sk_err = err;
154 sk->sk_error_report(sk);
155}
156
2f857d04 157static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
174a79ff
JF
158
159/* Called with lock_sock(sk) held */
160static void smap_state_change(struct sock *sk)
161{
2f857d04 162 struct smap_psock_map_entry *e, *tmp;
174a79ff 163 struct smap_psock *psock;
78aeaaef 164 struct socket_wq *wq;
174a79ff
JF
165 struct sock *osk;
166
167 rcu_read_lock();
168
169 /* Allowing transitions into an established syn_recv states allows
170 * for early binding sockets to a smap object before the connection
171 * is established.
172 */
173 switch (sk->sk_state) {
78aeaaef 174 case TCP_SYN_SENT:
174a79ff
JF
175 case TCP_SYN_RECV:
176 case TCP_ESTABLISHED:
177 break;
178 case TCP_CLOSE_WAIT:
179 case TCP_CLOSING:
180 case TCP_LAST_ACK:
181 case TCP_FIN_WAIT1:
182 case TCP_FIN_WAIT2:
183 case TCP_LISTEN:
184 break;
185 case TCP_CLOSE:
186 /* Only release if the map entry is in fact the sock in
187 * question. There is a case where the operator deletes
188 * the sock from the map, but the TCP sock is closed before
189 * the psock is detached. Use cmpxchg to verify correct
190 * sock is removed.
191 */
192 psock = smap_psock_sk(sk);
193 if (unlikely(!psock))
194 break;
2f857d04
JF
195 write_lock_bh(&sk->sk_callback_lock);
196 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
197 osk = cmpxchg(e->entry, sk, NULL);
198 if (osk == sk) {
199 list_del(&e->list);
200 smap_release_sock(psock, sk);
201 }
202 }
203 write_unlock_bh(&sk->sk_callback_lock);
174a79ff
JF
204 break;
205 default:
cf56e3b9
JF
206 psock = smap_psock_sk(sk);
207 if (unlikely(!psock))
208 break;
174a79ff
JF
209 smap_report_sk_error(psock, EPIPE);
210 break;
211 }
78aeaaef
JF
212
213 wq = rcu_dereference(sk->sk_wq);
214 if (skwq_has_sleeper(wq))
215 wake_up_interruptible_all(&wq->wait);
174a79ff
JF
216 rcu_read_unlock();
217}
218
219static void smap_read_sock_strparser(struct strparser *strp,
220 struct sk_buff *skb)
221{
222 struct smap_psock *psock;
223
224 rcu_read_lock();
225 psock = container_of(strp, struct smap_psock, strp);
226 smap_do_verdict(psock, skb);
227 rcu_read_unlock();
228}
229
230/* Called with lock held on socket */
231static void smap_data_ready(struct sock *sk)
232{
233 struct smap_psock *psock;
234
d26e597d 235 rcu_read_lock();
174a79ff 236 psock = smap_psock_sk(sk);
d26e597d
JF
237 if (likely(psock)) {
238 write_lock_bh(&sk->sk_callback_lock);
174a79ff 239 strp_data_ready(&psock->strp);
d26e597d
JF
240 write_unlock_bh(&sk->sk_callback_lock);
241 }
242 rcu_read_unlock();
174a79ff
JF
243}
244
245static void smap_tx_work(struct work_struct *w)
246{
247 struct smap_psock *psock;
248 struct sk_buff *skb;
249 int rem, off, n;
250
251 psock = container_of(w, struct smap_psock, tx_work);
252
253 /* lock sock to avoid losing sk_socket at some point during loop */
254 lock_sock(psock->sock);
255 if (psock->save_skb) {
256 skb = psock->save_skb;
257 rem = psock->save_rem;
258 off = psock->save_off;
259 psock->save_skb = NULL;
260 goto start;
261 }
262
263 while ((skb = skb_dequeue(&psock->rxqueue))) {
264 rem = skb->len;
265 off = 0;
266start:
267 do {
268 if (likely(psock->sock->sk_socket))
269 n = skb_send_sock_locked(psock->sock,
270 skb, off, rem);
271 else
272 n = -EINVAL;
273 if (n <= 0) {
274 if (n == -EAGAIN) {
275 /* Retry when space is available */
276 psock->save_skb = skb;
277 psock->save_rem = rem;
278 psock->save_off = off;
279 goto out;
280 }
281 /* Hard errors break pipe and stop xmit */
282 smap_report_sk_error(psock, n ? -n : EPIPE);
283 clear_bit(SMAP_TX_RUNNING, &psock->state);
174a79ff
JF
284 kfree_skb(skb);
285 goto out;
286 }
287 rem -= n;
288 off += n;
289 } while (rem);
174a79ff
JF
290 kfree_skb(skb);
291 }
292out:
293 release_sock(psock->sock);
294}
295
296static void smap_write_space(struct sock *sk)
297{
298 struct smap_psock *psock;
299
300 rcu_read_lock();
301 psock = smap_psock_sk(sk);
302 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
303 schedule_work(&psock->tx_work);
304 rcu_read_unlock();
305}
306
307static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
308{
174a79ff 309 if (!psock->strp_enabled)
2f857d04 310 return;
174a79ff
JF
311 sk->sk_data_ready = psock->save_data_ready;
312 sk->sk_write_space = psock->save_write_space;
313 sk->sk_state_change = psock->save_state_change;
314 psock->save_data_ready = NULL;
315 psock->save_write_space = NULL;
316 psock->save_state_change = NULL;
317 strp_stop(&psock->strp);
318 psock->strp_enabled = false;
174a79ff
JF
319}
320
321static void smap_destroy_psock(struct rcu_head *rcu)
322{
323 struct smap_psock *psock = container_of(rcu,
324 struct smap_psock, rcu);
325
326 /* Now that a grace period has passed there is no longer
327 * any reference to this sock in the sockmap so we can
328 * destroy the psock, strparser, and bpf programs. But,
329 * because we use workqueue sync operations we can not
330 * do it in rcu context
331 */
332 schedule_work(&psock->gc_work);
333}
334
2f857d04 335static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
174a79ff 336{
2f857d04
JF
337 psock->refcnt--;
338 if (psock->refcnt)
339 return;
174a79ff
JF
340
341 smap_stop_sock(psock, sock);
342 clear_bit(SMAP_TX_RUNNING, &psock->state);
343 rcu_assign_sk_user_data(sock, NULL);
344 call_rcu_sched(&psock->rcu, smap_destroy_psock);
345}
346
347static int smap_parse_func_strparser(struct strparser *strp,
348 struct sk_buff *skb)
349{
350 struct smap_psock *psock;
351 struct bpf_prog *prog;
352 int rc;
353
354 rcu_read_lock();
355 psock = container_of(strp, struct smap_psock, strp);
356 prog = READ_ONCE(psock->bpf_parse);
357
358 if (unlikely(!prog)) {
359 rcu_read_unlock();
360 return skb->len;
361 }
362
363 /* Attach socket for bpf program to use if needed we can do this
364 * because strparser clones the skb before handing it to a upper
365 * layer, meaning skb_orphan has been called. We NULL sk on the
366 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
367 * later and because we are not charging the memory of this skb to
368 * any socket yet.
369 */
370 skb->sk = psock->sock;
371 bpf_compute_data_end(skb);
372 rc = (*prog->bpf_func)(skb, prog->insnsi);
373 skb->sk = NULL;
374 rcu_read_unlock();
375 return rc;
376}
377
378
379static int smap_read_sock_done(struct strparser *strp, int err)
380{
381 return err;
382}
383
384static int smap_init_sock(struct smap_psock *psock,
385 struct sock *sk)
386{
3fd87127
EB
387 static const struct strp_callbacks cb = {
388 .rcv_msg = smap_read_sock_strparser,
389 .parse_msg = smap_parse_func_strparser,
390 .read_sock_done = smap_read_sock_done,
391 };
174a79ff 392
174a79ff
JF
393 return strp_init(&psock->strp, sk, &cb);
394}
395
396static void smap_init_progs(struct smap_psock *psock,
397 struct bpf_stab *stab,
398 struct bpf_prog *verdict,
399 struct bpf_prog *parse)
400{
401 struct bpf_prog *orig_parse, *orig_verdict;
402
403 orig_parse = xchg(&psock->bpf_parse, parse);
404 orig_verdict = xchg(&psock->bpf_verdict, verdict);
405
406 if (orig_verdict)
407 bpf_prog_put(orig_verdict);
408 if (orig_parse)
409 bpf_prog_put(orig_parse);
410}
411
412static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
413{
414 if (sk->sk_data_ready == smap_data_ready)
415 return;
416 psock->save_data_ready = sk->sk_data_ready;
417 psock->save_write_space = sk->sk_write_space;
418 psock->save_state_change = sk->sk_state_change;
419 sk->sk_data_ready = smap_data_ready;
420 sk->sk_write_space = smap_write_space;
421 sk->sk_state_change = smap_state_change;
422 psock->strp_enabled = true;
423}
424
425static void sock_map_remove_complete(struct bpf_stab *stab)
426{
427 bpf_map_area_free(stab->sock_map);
428 kfree(stab);
429}
430
431static void smap_gc_work(struct work_struct *w)
432{
2f857d04 433 struct smap_psock_map_entry *e, *tmp;
174a79ff
JF
434 struct smap_psock *psock;
435
436 psock = container_of(w, struct smap_psock, gc_work);
437
438 /* no callback lock needed because we already detached sockmap ops */
439 if (psock->strp_enabled)
440 strp_done(&psock->strp);
441
442 cancel_work_sync(&psock->tx_work);
443 __skb_queue_purge(&psock->rxqueue);
444
445 /* At this point all strparser and xmit work must be complete */
446 if (psock->bpf_parse)
447 bpf_prog_put(psock->bpf_parse);
448 if (psock->bpf_verdict)
449 bpf_prog_put(psock->bpf_verdict);
450
2f857d04
JF
451 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
452 list_del(&e->list);
453 kfree(e);
454 }
174a79ff
JF
455
456 sock_put(psock->sock);
457 kfree(psock);
458}
459
460static struct smap_psock *smap_init_psock(struct sock *sock,
461 struct bpf_stab *stab)
462{
463 struct smap_psock *psock;
464
96eabe7a
MKL
465 psock = kzalloc_node(sizeof(struct smap_psock),
466 GFP_ATOMIC | __GFP_NOWARN,
467 stab->map.numa_node);
174a79ff
JF
468 if (!psock)
469 return ERR_PTR(-ENOMEM);
470
471 psock->sock = sock;
472 skb_queue_head_init(&psock->rxqueue);
473 INIT_WORK(&psock->tx_work, smap_tx_work);
474 INIT_WORK(&psock->gc_work, smap_gc_work);
2f857d04
JF
475 INIT_LIST_HEAD(&psock->maps);
476 psock->refcnt = 1;
174a79ff
JF
477
478 rcu_assign_sk_user_data(sock, psock);
479 sock_hold(sock);
480 return psock;
481}
482
483static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
484{
485 struct bpf_stab *stab;
486 int err = -EINVAL;
487 u64 cost;
488
489 /* check sanity of attributes */
490 if (attr->max_entries == 0 || attr->key_size != 4 ||
96eabe7a 491 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
174a79ff
JF
492 return ERR_PTR(-EINVAL);
493
494 if (attr->value_size > KMALLOC_MAX_SIZE)
495 return ERR_PTR(-E2BIG);
496
497 stab = kzalloc(sizeof(*stab), GFP_USER);
498 if (!stab)
499 return ERR_PTR(-ENOMEM);
500
501 /* mandatory map attributes */
502 stab->map.map_type = attr->map_type;
503 stab->map.key_size = attr->key_size;
504 stab->map.value_size = attr->value_size;
505 stab->map.max_entries = attr->max_entries;
506 stab->map.map_flags = attr->map_flags;
96eabe7a 507 stab->map.numa_node = bpf_map_attr_numa_node(attr);
174a79ff
JF
508
509 /* make sure page count doesn't overflow */
510 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
511 if (cost >= U32_MAX - PAGE_SIZE)
512 goto free_stab;
513
514 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
515
516 /* if map size is larger than memlock limit, reject it early */
517 err = bpf_map_precharge_memlock(stab->map.pages);
518 if (err)
519 goto free_stab;
520
f740c34e 521 err = -ENOMEM;
174a79ff 522 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
96eabe7a
MKL
523 sizeof(struct sock *),
524 stab->map.numa_node);
174a79ff
JF
525 if (!stab->sock_map)
526 goto free_stab;
527
174a79ff
JF
528 return &stab->map;
529free_stab:
530 kfree(stab);
531 return ERR_PTR(err);
532}
533
2f857d04
JF
534static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
535{
536 struct smap_psock_map_entry *e, *tmp;
537
538 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
539 if (e->entry == entry) {
540 list_del(&e->list);
541 break;
542 }
543 }
544}
545
174a79ff
JF
546static void sock_map_free(struct bpf_map *map)
547{
548 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
549 int i;
550
551 synchronize_rcu();
552
553 /* At this point no update, lookup or delete operations can happen.
554 * However, be aware we can still get a socket state event updates,
555 * and data ready callabacks that reference the psock from sk_user_data
556 * Also psock worker threads are still in-flight. So smap_release_sock
557 * will only free the psock after cancel_sync on the worker threads
558 * and a grace period expire to ensure psock is really safe to remove.
559 */
560 rcu_read_lock();
561 for (i = 0; i < stab->map.max_entries; i++) {
2f857d04 562 struct smap_psock *psock;
174a79ff
JF
563 struct sock *sock;
564
565 sock = xchg(&stab->sock_map[i], NULL);
566 if (!sock)
567 continue;
568
2f857d04
JF
569 write_lock_bh(&sock->sk_callback_lock);
570 psock = smap_psock_sk(sock);
571 smap_list_remove(psock, &stab->sock_map[i]);
572 smap_release_sock(psock, sock);
573 write_unlock_bh(&sock->sk_callback_lock);
174a79ff
JF
574 }
575 rcu_read_unlock();
576
577 if (stab->bpf_verdict)
578 bpf_prog_put(stab->bpf_verdict);
579 if (stab->bpf_parse)
580 bpf_prog_put(stab->bpf_parse);
581
2f857d04 582 sock_map_remove_complete(stab);
174a79ff
JF
583}
584
585static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
586{
587 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
588 u32 i = key ? *(u32 *)key : U32_MAX;
589 u32 *next = (u32 *)next_key;
590
591 if (i >= stab->map.max_entries) {
592 *next = 0;
593 return 0;
594 }
595
596 if (i == stab->map.max_entries - 1)
597 return -ENOENT;
598
599 *next = i + 1;
600 return 0;
601}
602
603struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
604{
605 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
606
607 if (key >= map->max_entries)
608 return NULL;
609
610 return READ_ONCE(stab->sock_map[key]);
611}
612
613static int sock_map_delete_elem(struct bpf_map *map, void *key)
614{
615 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
2f857d04 616 struct smap_psock *psock;
174a79ff
JF
617 int k = *(u32 *)key;
618 struct sock *sock;
619
620 if (k >= map->max_entries)
621 return -EINVAL;
622
623 sock = xchg(&stab->sock_map[k], NULL);
624 if (!sock)
625 return -EINVAL;
626
2f857d04
JF
627 write_lock_bh(&sock->sk_callback_lock);
628 psock = smap_psock_sk(sock);
629 if (!psock)
630 goto out;
631
632 if (psock->bpf_parse)
633 smap_stop_sock(psock, sock);
634 smap_list_remove(psock, &stab->sock_map[k]);
635 smap_release_sock(psock, sock);
636out:
637 write_unlock_bh(&sock->sk_callback_lock);
174a79ff
JF
638 return 0;
639}
640
641/* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
642 * done inside rcu critical sections. This ensures on updates that the psock
643 * will not be released via smap_release_sock() until concurrent updates/deletes
644 * complete. All operations operate on sock_map using cmpxchg and xchg
645 * operations to ensure we do not get stale references. Any reads into the
646 * map must be done with READ_ONCE() because of this.
647 *
648 * A psock is destroyed via call_rcu and after any worker threads are cancelled
649 * and syncd so we are certain all references from the update/lookup/delete
650 * operations as well as references in the data path are no longer in use.
651 *
2f857d04
JF
652 * Psocks may exist in multiple maps, but only a single set of parse/verdict
653 * programs may be inherited from the maps it belongs to. A reference count
654 * is kept with the total number of references to the psock from all maps. The
655 * psock will not be released until this reaches zero. The psock and sock
656 * user data data use the sk_callback_lock to protect critical data structures
657 * from concurrent access. This allows us to avoid two updates from modifying
658 * the user data in sock and the lock is required anyways for modifying
659 * callbacks, we simply increase its scope slightly.
174a79ff 660 *
2f857d04
JF
661 * Rules to follow,
662 * - psock must always be read inside RCU critical section
663 * - sk_user_data must only be modified inside sk_callback_lock and read
664 * inside RCU critical section.
665 * - psock->maps list must only be read & modified inside sk_callback_lock
666 * - sock_map must use READ_ONCE and (cmp)xchg operations
667 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
174a79ff
JF
668 */
669static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
670 struct bpf_map *map,
2f857d04 671 void *key, u64 flags)
174a79ff
JF
672{
673 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
2f857d04 674 struct smap_psock_map_entry *e = NULL;
174a79ff 675 struct bpf_prog *verdict, *parse;
2f857d04
JF
676 struct sock *osock, *sock;
677 struct smap_psock *psock;
174a79ff 678 u32 i = *(u32 *)key;
2f857d04 679 int err;
174a79ff
JF
680
681 if (unlikely(flags > BPF_EXIST))
682 return -EINVAL;
683
684 if (unlikely(i >= stab->map.max_entries))
685 return -E2BIG;
686
174a79ff 687 sock = READ_ONCE(stab->sock_map[i]);
2f857d04
JF
688 if (flags == BPF_EXIST && !sock)
689 return -ENOENT;
690 else if (flags == BPF_NOEXIST && sock)
174a79ff 691 return -EEXIST;
174a79ff 692
2f857d04 693 sock = skops->sk;
174a79ff 694
2f857d04
JF
695 /* 1. If sock map has BPF programs those will be inherited by the
696 * sock being added. If the sock is already attached to BPF programs
697 * this results in an error.
698 */
699 verdict = READ_ONCE(stab->bpf_verdict);
700 parse = READ_ONCE(stab->bpf_parse);
174a79ff 701
2f857d04 702 if (parse && verdict) {
174a79ff
JF
703 /* bpf prog refcnt may be zero if a concurrent attach operation
704 * removes the program after the above READ_ONCE() but before
705 * we increment the refcnt. If this is the case abort with an
706 * error.
707 */
708 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
709 if (IS_ERR(verdict))
710 return PTR_ERR(verdict);
711
712 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
713 if (IS_ERR(parse)) {
714 bpf_prog_put(verdict);
715 return PTR_ERR(parse);
716 }
717 }
718
2f857d04
JF
719 write_lock_bh(&sock->sk_callback_lock);
720 psock = smap_psock_sk(sock);
721
722 /* 2. Do not allow inheriting programs if psock exists and has
723 * already inherited programs. This would create confusion on
724 * which parser/verdict program is running. If no psock exists
725 * create one. Inside sk_callback_lock to ensure concurrent create
726 * doesn't update user data.
727 */
728 if (psock) {
729 if (READ_ONCE(psock->bpf_parse) && parse) {
730 err = -EBUSY;
731 goto out_progs;
732 }
733 psock->refcnt++;
734 } else {
174a79ff
JF
735 psock = smap_init_psock(sock, stab);
736 if (IS_ERR(psock)) {
2f857d04
JF
737 err = PTR_ERR(psock);
738 goto out_progs;
174a79ff 739 }
2f857d04 740
174a79ff
JF
741 set_bit(SMAP_TX_RUNNING, &psock->state);
742 }
743
2f857d04
JF
744 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
745 if (!e) {
746 err = -ENOMEM;
747 goto out_progs;
748 }
749 e->entry = &stab->sock_map[i];
750
751 /* 3. At this point we have a reference to a valid psock that is
752 * running. Attach any BPF programs needed.
753 */
754 if (parse && verdict && !psock->strp_enabled) {
174a79ff
JF
755 err = smap_init_sock(psock, sock);
756 if (err)
2f857d04 757 goto out_free;
174a79ff
JF
758 smap_init_progs(psock, stab, verdict, parse);
759 smap_start_sock(psock, sock);
174a79ff
JF
760 }
761
2f857d04
JF
762 /* 4. Place psock in sockmap for use and stop any programs on
763 * the old sock assuming its not the same sock we are replacing
764 * it with. Because we can only have a single set of programs if
765 * old_sock has a strp we can stop it.
766 */
767 list_add_tail(&e->list, &psock->maps);
768 write_unlock_bh(&sock->sk_callback_lock);
174a79ff 769
2f857d04
JF
770 osock = xchg(&stab->sock_map[i], sock);
771 if (osock) {
772 struct smap_psock *opsock = smap_psock_sk(osock);
773
774 write_lock_bh(&osock->sk_callback_lock);
775 if (osock != sock && parse)
776 smap_stop_sock(opsock, osock);
777 smap_list_remove(opsock, &stab->sock_map[i]);
778 smap_release_sock(opsock, osock);
779 write_unlock_bh(&osock->sk_callback_lock);
780 }
174a79ff 781 return 0;
2f857d04
JF
782out_free:
783 smap_release_sock(psock, sock);
784out_progs:
785 if (verdict)
786 bpf_prog_put(verdict);
787 if (parse)
788 bpf_prog_put(parse);
174a79ff 789 write_unlock_bh(&sock->sk_callback_lock);
2f857d04 790 kfree(e);
174a79ff
JF
791 return err;
792}
793
5a67da2a 794int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
174a79ff
JF
795{
796 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
464bc0fd 797 struct bpf_prog *orig;
174a79ff 798
81374aaa
JF
799 if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
800 return -EINVAL;
801
464bc0fd
JF
802 switch (type) {
803 case BPF_SK_SKB_STREAM_PARSER:
804 orig = xchg(&stab->bpf_parse, prog);
805 break;
806 case BPF_SK_SKB_STREAM_VERDICT:
807 orig = xchg(&stab->bpf_verdict, prog);
808 break;
809 default:
810 return -EOPNOTSUPP;
811 }
174a79ff 812
464bc0fd
JF
813 if (orig)
814 bpf_prog_put(orig);
174a79ff
JF
815
816 return 0;
817}
818
819static void *sock_map_lookup(struct bpf_map *map, void *key)
820{
821 return NULL;
822}
823
824static int sock_map_update_elem(struct bpf_map *map,
825 void *key, void *value, u64 flags)
826{
827 struct bpf_sock_ops_kern skops;
828 u32 fd = *(u32 *)value;
829 struct socket *socket;
830 int err;
831
832 socket = sockfd_lookup(fd, &err);
833 if (!socket)
834 return err;
835
836 skops.sk = socket->sk;
837 if (!skops.sk) {
838 fput(socket->file);
839 return -EINVAL;
840 }
841
435bf0d3
JF
842 if (skops.sk->sk_type != SOCK_STREAM ||
843 skops.sk->sk_protocol != IPPROTO_TCP) {
844 fput(socket->file);
845 return -EOPNOTSUPP;
846 }
847
2f857d04 848 err = sock_map_ctx_update_elem(&skops, map, key, flags);
174a79ff
JF
849 fput(socket->file);
850 return err;
851}
852
853const struct bpf_map_ops sock_map_ops = {
854 .map_alloc = sock_map_alloc,
855 .map_free = sock_map_free,
856 .map_lookup_elem = sock_map_lookup,
857 .map_get_next_key = sock_map_get_next_key,
858 .map_update_elem = sock_map_update_elem,
859 .map_delete_elem = sock_map_delete_elem,
174a79ff
JF
860};
861
2f857d04
JF
862BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
863 struct bpf_map *, map, void *, key, u64, flags)
174a79ff
JF
864{
865 WARN_ON_ONCE(!rcu_read_lock_held());
2f857d04 866 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
174a79ff
JF
867}
868
869const struct bpf_func_proto bpf_sock_map_update_proto = {
870 .func = bpf_sock_map_update,
871 .gpl_only = false,
872 .pkt_access = true,
873 .ret_type = RET_INTEGER,
874 .arg1_type = ARG_PTR_TO_CTX,
875 .arg2_type = ARG_CONST_MAP_PTR,
876 .arg3_type = ARG_PTR_TO_MAP_KEY,
877 .arg4_type = ARG_ANYTHING,
174a79ff 878};