]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/bpf/sockmap.c
net: Add comment that early_demux can change via sysctl
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / sockmap.c
CommitLineData
174a79ff
JF
1/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12
13/* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
15 *
2f857d04
JF
16 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
21 * redirect.
22 *
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
174a79ff
JF
26 *
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
30 */
31#include <linux/bpf.h>
32#include <net/sock.h>
33#include <linux/filter.h>
34#include <linux/errno.h>
35#include <linux/file.h>
36#include <linux/kernel.h>
37#include <linux/net.h>
38#include <linux/skbuff.h>
39#include <linux/workqueue.h>
40#include <linux/list.h>
41#include <net/strparser.h>
42
43struct bpf_stab {
44 struct bpf_map map;
45 struct sock **sock_map;
46 struct bpf_prog *bpf_parse;
47 struct bpf_prog *bpf_verdict;
174a79ff
JF
48};
49
50enum smap_psock_state {
51 SMAP_TX_RUNNING,
52};
53
2f857d04
JF
54struct smap_psock_map_entry {
55 struct list_head list;
56 struct sock **entry;
57};
58
174a79ff
JF
59struct smap_psock {
60 struct rcu_head rcu;
2f857d04
JF
61 /* refcnt is used inside sk_callback_lock */
62 u32 refcnt;
174a79ff
JF
63
64 /* datapath variables */
65 struct sk_buff_head rxqueue;
66 bool strp_enabled;
67
68 /* datapath error path cache across tx work invocations */
69 int save_rem;
70 int save_off;
71 struct sk_buff *save_skb;
72
73 struct strparser strp;
74 struct bpf_prog *bpf_parse;
75 struct bpf_prog *bpf_verdict;
2f857d04 76 struct list_head maps;
174a79ff
JF
77
78 /* Back reference used when sock callback trigger sockmap operations */
174a79ff
JF
79 struct sock *sock;
80 unsigned long state;
81
82 struct work_struct tx_work;
83 struct work_struct gc_work;
84
85 void (*save_data_ready)(struct sock *sk);
86 void (*save_write_space)(struct sock *sk);
87 void (*save_state_change)(struct sock *sk);
88};
89
90static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
91{
2f857d04 92 return rcu_dereference_sk_user_data(sk);
174a79ff
JF
93}
94
95static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
96{
97 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
98 int rc;
99
100 if (unlikely(!prog))
101 return SK_DROP;
102
103 skb_orphan(skb);
104 skb->sk = psock->sock;
105 bpf_compute_data_end(skb);
106 rc = (*prog->bpf_func)(skb, prog->insnsi);
107 skb->sk = NULL;
108
109 return rc;
110}
111
112static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
113{
114 struct sock *sock;
115 int rc;
116
117 /* Because we use per cpu values to feed input from sock redirect
118 * in BPF program to do_sk_redirect_map() call we need to ensure we
119 * are not preempted. RCU read lock is not sufficient in this case
120 * with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
121 */
122 preempt_disable();
123 rc = smap_verdict_func(psock, skb);
124 switch (rc) {
125 case SK_REDIRECT:
126 sock = do_sk_redirect_map();
127 preempt_enable();
128 if (likely(sock)) {
129 struct smap_psock *peer = smap_psock_sk(sock);
130
131 if (likely(peer &&
132 test_bit(SMAP_TX_RUNNING, &peer->state) &&
133 sk_stream_memory_free(peer->sock))) {
134 peer->sock->sk_wmem_queued += skb->truesize;
135 sk_mem_charge(peer->sock, skb->truesize);
136 skb_queue_tail(&peer->rxqueue, skb);
137 schedule_work(&peer->tx_work);
138 break;
139 }
140 }
141 /* Fall through and free skb otherwise */
142 case SK_DROP:
143 default:
976d28bf
DB
144 if (rc != SK_REDIRECT)
145 preempt_enable();
174a79ff
JF
146 kfree_skb(skb);
147 }
148}
149
150static void smap_report_sk_error(struct smap_psock *psock, int err)
151{
152 struct sock *sk = psock->sock;
153
154 sk->sk_err = err;
155 sk->sk_error_report(sk);
156}
157
2f857d04 158static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
174a79ff
JF
159
160/* Called with lock_sock(sk) held */
161static void smap_state_change(struct sock *sk)
162{
2f857d04 163 struct smap_psock_map_entry *e, *tmp;
174a79ff 164 struct smap_psock *psock;
78aeaaef 165 struct socket_wq *wq;
174a79ff
JF
166 struct sock *osk;
167
168 rcu_read_lock();
169
170 /* Allowing transitions into an established syn_recv states allows
171 * for early binding sockets to a smap object before the connection
172 * is established.
173 */
174 switch (sk->sk_state) {
78aeaaef 175 case TCP_SYN_SENT:
174a79ff
JF
176 case TCP_SYN_RECV:
177 case TCP_ESTABLISHED:
178 break;
179 case TCP_CLOSE_WAIT:
180 case TCP_CLOSING:
181 case TCP_LAST_ACK:
182 case TCP_FIN_WAIT1:
183 case TCP_FIN_WAIT2:
184 case TCP_LISTEN:
185 break;
186 case TCP_CLOSE:
187 /* Only release if the map entry is in fact the sock in
188 * question. There is a case where the operator deletes
189 * the sock from the map, but the TCP sock is closed before
190 * the psock is detached. Use cmpxchg to verify correct
191 * sock is removed.
192 */
193 psock = smap_psock_sk(sk);
194 if (unlikely(!psock))
195 break;
2f857d04
JF
196 write_lock_bh(&sk->sk_callback_lock);
197 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
198 osk = cmpxchg(e->entry, sk, NULL);
199 if (osk == sk) {
200 list_del(&e->list);
201 smap_release_sock(psock, sk);
202 }
203 }
204 write_unlock_bh(&sk->sk_callback_lock);
174a79ff
JF
205 break;
206 default:
cf56e3b9
JF
207 psock = smap_psock_sk(sk);
208 if (unlikely(!psock))
209 break;
174a79ff
JF
210 smap_report_sk_error(psock, EPIPE);
211 break;
212 }
78aeaaef
JF
213
214 wq = rcu_dereference(sk->sk_wq);
215 if (skwq_has_sleeper(wq))
216 wake_up_interruptible_all(&wq->wait);
174a79ff
JF
217 rcu_read_unlock();
218}
219
220static void smap_read_sock_strparser(struct strparser *strp,
221 struct sk_buff *skb)
222{
223 struct smap_psock *psock;
224
225 rcu_read_lock();
226 psock = container_of(strp, struct smap_psock, strp);
227 smap_do_verdict(psock, skb);
228 rcu_read_unlock();
229}
230
231/* Called with lock held on socket */
232static void smap_data_ready(struct sock *sk)
233{
234 struct smap_psock *psock;
235
d26e597d 236 rcu_read_lock();
174a79ff 237 psock = smap_psock_sk(sk);
d26e597d
JF
238 if (likely(psock)) {
239 write_lock_bh(&sk->sk_callback_lock);
174a79ff 240 strp_data_ready(&psock->strp);
d26e597d
JF
241 write_unlock_bh(&sk->sk_callback_lock);
242 }
243 rcu_read_unlock();
174a79ff
JF
244}
245
246static void smap_tx_work(struct work_struct *w)
247{
248 struct smap_psock *psock;
249 struct sk_buff *skb;
250 int rem, off, n;
251
252 psock = container_of(w, struct smap_psock, tx_work);
253
254 /* lock sock to avoid losing sk_socket at some point during loop */
255 lock_sock(psock->sock);
256 if (psock->save_skb) {
257 skb = psock->save_skb;
258 rem = psock->save_rem;
259 off = psock->save_off;
260 psock->save_skb = NULL;
261 goto start;
262 }
263
264 while ((skb = skb_dequeue(&psock->rxqueue))) {
265 rem = skb->len;
266 off = 0;
267start:
268 do {
269 if (likely(psock->sock->sk_socket))
270 n = skb_send_sock_locked(psock->sock,
271 skb, off, rem);
272 else
273 n = -EINVAL;
274 if (n <= 0) {
275 if (n == -EAGAIN) {
276 /* Retry when space is available */
277 psock->save_skb = skb;
278 psock->save_rem = rem;
279 psock->save_off = off;
280 goto out;
281 }
282 /* Hard errors break pipe and stop xmit */
283 smap_report_sk_error(psock, n ? -n : EPIPE);
284 clear_bit(SMAP_TX_RUNNING, &psock->state);
285 sk_mem_uncharge(psock->sock, skb->truesize);
286 psock->sock->sk_wmem_queued -= skb->truesize;
287 kfree_skb(skb);
288 goto out;
289 }
290 rem -= n;
291 off += n;
292 } while (rem);
293 sk_mem_uncharge(psock->sock, skb->truesize);
294 psock->sock->sk_wmem_queued -= skb->truesize;
295 kfree_skb(skb);
296 }
297out:
298 release_sock(psock->sock);
299}
300
301static void smap_write_space(struct sock *sk)
302{
303 struct smap_psock *psock;
304
305 rcu_read_lock();
306 psock = smap_psock_sk(sk);
307 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
308 schedule_work(&psock->tx_work);
309 rcu_read_unlock();
310}
311
312static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
313{
174a79ff 314 if (!psock->strp_enabled)
2f857d04 315 return;
174a79ff
JF
316 sk->sk_data_ready = psock->save_data_ready;
317 sk->sk_write_space = psock->save_write_space;
318 sk->sk_state_change = psock->save_state_change;
319 psock->save_data_ready = NULL;
320 psock->save_write_space = NULL;
321 psock->save_state_change = NULL;
322 strp_stop(&psock->strp);
323 psock->strp_enabled = false;
174a79ff
JF
324}
325
326static void smap_destroy_psock(struct rcu_head *rcu)
327{
328 struct smap_psock *psock = container_of(rcu,
329 struct smap_psock, rcu);
330
331 /* Now that a grace period has passed there is no longer
332 * any reference to this sock in the sockmap so we can
333 * destroy the psock, strparser, and bpf programs. But,
334 * because we use workqueue sync operations we can not
335 * do it in rcu context
336 */
337 schedule_work(&psock->gc_work);
338}
339
2f857d04 340static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
174a79ff 341{
2f857d04
JF
342 psock->refcnt--;
343 if (psock->refcnt)
344 return;
174a79ff
JF
345
346 smap_stop_sock(psock, sock);
347 clear_bit(SMAP_TX_RUNNING, &psock->state);
348 rcu_assign_sk_user_data(sock, NULL);
349 call_rcu_sched(&psock->rcu, smap_destroy_psock);
350}
351
352static int smap_parse_func_strparser(struct strparser *strp,
353 struct sk_buff *skb)
354{
355 struct smap_psock *psock;
356 struct bpf_prog *prog;
357 int rc;
358
359 rcu_read_lock();
360 psock = container_of(strp, struct smap_psock, strp);
361 prog = READ_ONCE(psock->bpf_parse);
362
363 if (unlikely(!prog)) {
364 rcu_read_unlock();
365 return skb->len;
366 }
367
368 /* Attach socket for bpf program to use if needed we can do this
369 * because strparser clones the skb before handing it to a upper
370 * layer, meaning skb_orphan has been called. We NULL sk on the
371 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
372 * later and because we are not charging the memory of this skb to
373 * any socket yet.
374 */
375 skb->sk = psock->sock;
376 bpf_compute_data_end(skb);
377 rc = (*prog->bpf_func)(skb, prog->insnsi);
378 skb->sk = NULL;
379 rcu_read_unlock();
380 return rc;
381}
382
383
384static int smap_read_sock_done(struct strparser *strp, int err)
385{
386 return err;
387}
388
389static int smap_init_sock(struct smap_psock *psock,
390 struct sock *sk)
391{
3fd87127
EB
392 static const struct strp_callbacks cb = {
393 .rcv_msg = smap_read_sock_strparser,
394 .parse_msg = smap_parse_func_strparser,
395 .read_sock_done = smap_read_sock_done,
396 };
174a79ff 397
174a79ff
JF
398 return strp_init(&psock->strp, sk, &cb);
399}
400
401static void smap_init_progs(struct smap_psock *psock,
402 struct bpf_stab *stab,
403 struct bpf_prog *verdict,
404 struct bpf_prog *parse)
405{
406 struct bpf_prog *orig_parse, *orig_verdict;
407
408 orig_parse = xchg(&psock->bpf_parse, parse);
409 orig_verdict = xchg(&psock->bpf_verdict, verdict);
410
411 if (orig_verdict)
412 bpf_prog_put(orig_verdict);
413 if (orig_parse)
414 bpf_prog_put(orig_parse);
415}
416
417static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
418{
419 if (sk->sk_data_ready == smap_data_ready)
420 return;
421 psock->save_data_ready = sk->sk_data_ready;
422 psock->save_write_space = sk->sk_write_space;
423 psock->save_state_change = sk->sk_state_change;
424 sk->sk_data_ready = smap_data_ready;
425 sk->sk_write_space = smap_write_space;
426 sk->sk_state_change = smap_state_change;
427 psock->strp_enabled = true;
428}
429
430static void sock_map_remove_complete(struct bpf_stab *stab)
431{
432 bpf_map_area_free(stab->sock_map);
433 kfree(stab);
434}
435
436static void smap_gc_work(struct work_struct *w)
437{
2f857d04 438 struct smap_psock_map_entry *e, *tmp;
174a79ff
JF
439 struct smap_psock *psock;
440
441 psock = container_of(w, struct smap_psock, gc_work);
442
443 /* no callback lock needed because we already detached sockmap ops */
444 if (psock->strp_enabled)
445 strp_done(&psock->strp);
446
447 cancel_work_sync(&psock->tx_work);
448 __skb_queue_purge(&psock->rxqueue);
449
450 /* At this point all strparser and xmit work must be complete */
451 if (psock->bpf_parse)
452 bpf_prog_put(psock->bpf_parse);
453 if (psock->bpf_verdict)
454 bpf_prog_put(psock->bpf_verdict);
455
2f857d04
JF
456 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
457 list_del(&e->list);
458 kfree(e);
459 }
174a79ff
JF
460
461 sock_put(psock->sock);
462 kfree(psock);
463}
464
465static struct smap_psock *smap_init_psock(struct sock *sock,
466 struct bpf_stab *stab)
467{
468 struct smap_psock *psock;
469
96eabe7a
MKL
470 psock = kzalloc_node(sizeof(struct smap_psock),
471 GFP_ATOMIC | __GFP_NOWARN,
472 stab->map.numa_node);
174a79ff
JF
473 if (!psock)
474 return ERR_PTR(-ENOMEM);
475
476 psock->sock = sock;
477 skb_queue_head_init(&psock->rxqueue);
478 INIT_WORK(&psock->tx_work, smap_tx_work);
479 INIT_WORK(&psock->gc_work, smap_gc_work);
2f857d04
JF
480 INIT_LIST_HEAD(&psock->maps);
481 psock->refcnt = 1;
174a79ff
JF
482
483 rcu_assign_sk_user_data(sock, psock);
484 sock_hold(sock);
485 return psock;
486}
487
488static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
489{
490 struct bpf_stab *stab;
491 int err = -EINVAL;
492 u64 cost;
493
494 /* check sanity of attributes */
495 if (attr->max_entries == 0 || attr->key_size != 4 ||
96eabe7a 496 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
174a79ff
JF
497 return ERR_PTR(-EINVAL);
498
499 if (attr->value_size > KMALLOC_MAX_SIZE)
500 return ERR_PTR(-E2BIG);
501
502 stab = kzalloc(sizeof(*stab), GFP_USER);
503 if (!stab)
504 return ERR_PTR(-ENOMEM);
505
506 /* mandatory map attributes */
507 stab->map.map_type = attr->map_type;
508 stab->map.key_size = attr->key_size;
509 stab->map.value_size = attr->value_size;
510 stab->map.max_entries = attr->max_entries;
511 stab->map.map_flags = attr->map_flags;
96eabe7a 512 stab->map.numa_node = bpf_map_attr_numa_node(attr);
174a79ff
JF
513
514 /* make sure page count doesn't overflow */
515 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
516 if (cost >= U32_MAX - PAGE_SIZE)
517 goto free_stab;
518
519 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
520
521 /* if map size is larger than memlock limit, reject it early */
522 err = bpf_map_precharge_memlock(stab->map.pages);
523 if (err)
524 goto free_stab;
525
526 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
96eabe7a
MKL
527 sizeof(struct sock *),
528 stab->map.numa_node);
174a79ff
JF
529 if (!stab->sock_map)
530 goto free_stab;
531
174a79ff
JF
532 return &stab->map;
533free_stab:
534 kfree(stab);
535 return ERR_PTR(err);
536}
537
2f857d04
JF
538static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
539{
540 struct smap_psock_map_entry *e, *tmp;
541
542 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
543 if (e->entry == entry) {
544 list_del(&e->list);
545 break;
546 }
547 }
548}
549
174a79ff
JF
550static void sock_map_free(struct bpf_map *map)
551{
552 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
553 int i;
554
555 synchronize_rcu();
556
557 /* At this point no update, lookup or delete operations can happen.
558 * However, be aware we can still get a socket state event updates,
559 * and data ready callabacks that reference the psock from sk_user_data
560 * Also psock worker threads are still in-flight. So smap_release_sock
561 * will only free the psock after cancel_sync on the worker threads
562 * and a grace period expire to ensure psock is really safe to remove.
563 */
564 rcu_read_lock();
565 for (i = 0; i < stab->map.max_entries; i++) {
2f857d04 566 struct smap_psock *psock;
174a79ff
JF
567 struct sock *sock;
568
569 sock = xchg(&stab->sock_map[i], NULL);
570 if (!sock)
571 continue;
572
2f857d04
JF
573 write_lock_bh(&sock->sk_callback_lock);
574 psock = smap_psock_sk(sock);
575 smap_list_remove(psock, &stab->sock_map[i]);
576 smap_release_sock(psock, sock);
577 write_unlock_bh(&sock->sk_callback_lock);
174a79ff
JF
578 }
579 rcu_read_unlock();
580
581 if (stab->bpf_verdict)
582 bpf_prog_put(stab->bpf_verdict);
583 if (stab->bpf_parse)
584 bpf_prog_put(stab->bpf_parse);
585
2f857d04 586 sock_map_remove_complete(stab);
174a79ff
JF
587}
588
589static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
590{
591 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
592 u32 i = key ? *(u32 *)key : U32_MAX;
593 u32 *next = (u32 *)next_key;
594
595 if (i >= stab->map.max_entries) {
596 *next = 0;
597 return 0;
598 }
599
600 if (i == stab->map.max_entries - 1)
601 return -ENOENT;
602
603 *next = i + 1;
604 return 0;
605}
606
607struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
608{
609 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
610
611 if (key >= map->max_entries)
612 return NULL;
613
614 return READ_ONCE(stab->sock_map[key]);
615}
616
617static int sock_map_delete_elem(struct bpf_map *map, void *key)
618{
619 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
2f857d04 620 struct smap_psock *psock;
174a79ff
JF
621 int k = *(u32 *)key;
622 struct sock *sock;
623
624 if (k >= map->max_entries)
625 return -EINVAL;
626
627 sock = xchg(&stab->sock_map[k], NULL);
628 if (!sock)
629 return -EINVAL;
630
2f857d04
JF
631 write_lock_bh(&sock->sk_callback_lock);
632 psock = smap_psock_sk(sock);
633 if (!psock)
634 goto out;
635
636 if (psock->bpf_parse)
637 smap_stop_sock(psock, sock);
638 smap_list_remove(psock, &stab->sock_map[k]);
639 smap_release_sock(psock, sock);
640out:
641 write_unlock_bh(&sock->sk_callback_lock);
174a79ff
JF
642 return 0;
643}
644
645/* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
646 * done inside rcu critical sections. This ensures on updates that the psock
647 * will not be released via smap_release_sock() until concurrent updates/deletes
648 * complete. All operations operate on sock_map using cmpxchg and xchg
649 * operations to ensure we do not get stale references. Any reads into the
650 * map must be done with READ_ONCE() because of this.
651 *
652 * A psock is destroyed via call_rcu and after any worker threads are cancelled
653 * and syncd so we are certain all references from the update/lookup/delete
654 * operations as well as references in the data path are no longer in use.
655 *
2f857d04
JF
656 * Psocks may exist in multiple maps, but only a single set of parse/verdict
657 * programs may be inherited from the maps it belongs to. A reference count
658 * is kept with the total number of references to the psock from all maps. The
659 * psock will not be released until this reaches zero. The psock and sock
660 * user data data use the sk_callback_lock to protect critical data structures
661 * from concurrent access. This allows us to avoid two updates from modifying
662 * the user data in sock and the lock is required anyways for modifying
663 * callbacks, we simply increase its scope slightly.
174a79ff 664 *
2f857d04
JF
665 * Rules to follow,
666 * - psock must always be read inside RCU critical section
667 * - sk_user_data must only be modified inside sk_callback_lock and read
668 * inside RCU critical section.
669 * - psock->maps list must only be read & modified inside sk_callback_lock
670 * - sock_map must use READ_ONCE and (cmp)xchg operations
671 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
174a79ff
JF
672 */
673static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
674 struct bpf_map *map,
2f857d04 675 void *key, u64 flags)
174a79ff
JF
676{
677 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
2f857d04 678 struct smap_psock_map_entry *e = NULL;
174a79ff 679 struct bpf_prog *verdict, *parse;
2f857d04
JF
680 struct sock *osock, *sock;
681 struct smap_psock *psock;
174a79ff 682 u32 i = *(u32 *)key;
2f857d04 683 int err;
174a79ff
JF
684
685 if (unlikely(flags > BPF_EXIST))
686 return -EINVAL;
687
688 if (unlikely(i >= stab->map.max_entries))
689 return -E2BIG;
690
174a79ff 691 sock = READ_ONCE(stab->sock_map[i]);
2f857d04
JF
692 if (flags == BPF_EXIST && !sock)
693 return -ENOENT;
694 else if (flags == BPF_NOEXIST && sock)
174a79ff 695 return -EEXIST;
174a79ff 696
2f857d04 697 sock = skops->sk;
174a79ff 698
2f857d04
JF
699 /* 1. If sock map has BPF programs those will be inherited by the
700 * sock being added. If the sock is already attached to BPF programs
701 * this results in an error.
702 */
703 verdict = READ_ONCE(stab->bpf_verdict);
704 parse = READ_ONCE(stab->bpf_parse);
174a79ff 705
2f857d04 706 if (parse && verdict) {
174a79ff
JF
707 /* bpf prog refcnt may be zero if a concurrent attach operation
708 * removes the program after the above READ_ONCE() but before
709 * we increment the refcnt. If this is the case abort with an
710 * error.
711 */
712 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
713 if (IS_ERR(verdict))
714 return PTR_ERR(verdict);
715
716 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
717 if (IS_ERR(parse)) {
718 bpf_prog_put(verdict);
719 return PTR_ERR(parse);
720 }
721 }
722
2f857d04
JF
723 write_lock_bh(&sock->sk_callback_lock);
724 psock = smap_psock_sk(sock);
725
726 /* 2. Do not allow inheriting programs if psock exists and has
727 * already inherited programs. This would create confusion on
728 * which parser/verdict program is running. If no psock exists
729 * create one. Inside sk_callback_lock to ensure concurrent create
730 * doesn't update user data.
731 */
732 if (psock) {
733 if (READ_ONCE(psock->bpf_parse) && parse) {
734 err = -EBUSY;
735 goto out_progs;
736 }
737 psock->refcnt++;
738 } else {
174a79ff
JF
739 psock = smap_init_psock(sock, stab);
740 if (IS_ERR(psock)) {
2f857d04
JF
741 err = PTR_ERR(psock);
742 goto out_progs;
174a79ff 743 }
2f857d04 744
174a79ff
JF
745 set_bit(SMAP_TX_RUNNING, &psock->state);
746 }
747
2f857d04
JF
748 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
749 if (!e) {
750 err = -ENOMEM;
751 goto out_progs;
752 }
753 e->entry = &stab->sock_map[i];
754
755 /* 3. At this point we have a reference to a valid psock that is
756 * running. Attach any BPF programs needed.
757 */
758 if (parse && verdict && !psock->strp_enabled) {
174a79ff
JF
759 err = smap_init_sock(psock, sock);
760 if (err)
2f857d04 761 goto out_free;
174a79ff
JF
762 smap_init_progs(psock, stab, verdict, parse);
763 smap_start_sock(psock, sock);
174a79ff
JF
764 }
765
2f857d04
JF
766 /* 4. Place psock in sockmap for use and stop any programs on
767 * the old sock assuming its not the same sock we are replacing
768 * it with. Because we can only have a single set of programs if
769 * old_sock has a strp we can stop it.
770 */
771 list_add_tail(&e->list, &psock->maps);
772 write_unlock_bh(&sock->sk_callback_lock);
174a79ff 773
2f857d04
JF
774 osock = xchg(&stab->sock_map[i], sock);
775 if (osock) {
776 struct smap_psock *opsock = smap_psock_sk(osock);
777
778 write_lock_bh(&osock->sk_callback_lock);
779 if (osock != sock && parse)
780 smap_stop_sock(opsock, osock);
781 smap_list_remove(opsock, &stab->sock_map[i]);
782 smap_release_sock(opsock, osock);
783 write_unlock_bh(&osock->sk_callback_lock);
784 }
174a79ff 785 return 0;
2f857d04
JF
786out_free:
787 smap_release_sock(psock, sock);
788out_progs:
789 if (verdict)
790 bpf_prog_put(verdict);
791 if (parse)
792 bpf_prog_put(parse);
174a79ff 793 write_unlock_bh(&sock->sk_callback_lock);
2f857d04 794 kfree(e);
174a79ff
JF
795 return err;
796}
797
464bc0fd 798int sock_map_attach_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
174a79ff
JF
799{
800 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
464bc0fd 801 struct bpf_prog *orig;
174a79ff 802
81374aaa
JF
803 if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
804 return -EINVAL;
805
464bc0fd
JF
806 switch (type) {
807 case BPF_SK_SKB_STREAM_PARSER:
808 orig = xchg(&stab->bpf_parse, prog);
809 break;
810 case BPF_SK_SKB_STREAM_VERDICT:
811 orig = xchg(&stab->bpf_verdict, prog);
812 break;
813 default:
814 return -EOPNOTSUPP;
815 }
174a79ff 816
464bc0fd
JF
817 if (orig)
818 bpf_prog_put(orig);
174a79ff
JF
819
820 return 0;
821}
822
823static void *sock_map_lookup(struct bpf_map *map, void *key)
824{
825 return NULL;
826}
827
828static int sock_map_update_elem(struct bpf_map *map,
829 void *key, void *value, u64 flags)
830{
831 struct bpf_sock_ops_kern skops;
832 u32 fd = *(u32 *)value;
833 struct socket *socket;
834 int err;
835
836 socket = sockfd_lookup(fd, &err);
837 if (!socket)
838 return err;
839
840 skops.sk = socket->sk;
841 if (!skops.sk) {
842 fput(socket->file);
843 return -EINVAL;
844 }
845
2f857d04 846 err = sock_map_ctx_update_elem(&skops, map, key, flags);
174a79ff
JF
847 fput(socket->file);
848 return err;
849}
850
851const struct bpf_map_ops sock_map_ops = {
852 .map_alloc = sock_map_alloc,
853 .map_free = sock_map_free,
854 .map_lookup_elem = sock_map_lookup,
855 .map_get_next_key = sock_map_get_next_key,
856 .map_update_elem = sock_map_update_elem,
857 .map_delete_elem = sock_map_delete_elem,
174a79ff
JF
858};
859
2f857d04
JF
860BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
861 struct bpf_map *, map, void *, key, u64, flags)
174a79ff
JF
862{
863 WARN_ON_ONCE(!rcu_read_lock_held());
2f857d04 864 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
174a79ff
JF
865}
866
867const struct bpf_func_proto bpf_sock_map_update_proto = {
868 .func = bpf_sock_map_update,
869 .gpl_only = false,
870 .pkt_access = true,
871 .ret_type = RET_INTEGER,
872 .arg1_type = ARG_PTR_TO_CTX,
873 .arg2_type = ARG_CONST_MAP_PTR,
874 .arg3_type = ARG_PTR_TO_MAP_KEY,
875 .arg4_type = ARG_ANYTHING,
174a79ff 876};