]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/bpf/sockmap.c
net: sched: cls_flower: fix ndo_setup_tc type for stats call
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / sockmap.c
CommitLineData
174a79ff
JF
1/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12
13/* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
15 *
16 * A sock map may have two BPF programs attached to it, a program used
17 * to parse packets and a program to provide a verdict and redirect
18 * decision on the packet. If no BPF parse program is provided it is
19 * assumed that every skb is a "message" (skb->len). Otherwise the
20 * parse program is attached to strparser and used to build messages
21 * that may span multiple skbs. The verdict program will either select
22 * a socket to send/receive the skb on or provide the drop code indicating
23 * the skb should be dropped. More actions may be added later as needed.
24 * The default program will drop packets.
25 *
26 * For reference this program is similar to devmap used in XDP context
27 * reviewing these together may be useful. For an example please review
28 * ./samples/bpf/sockmap/.
29 */
30#include <linux/bpf.h>
31#include <net/sock.h>
32#include <linux/filter.h>
33#include <linux/errno.h>
34#include <linux/file.h>
35#include <linux/kernel.h>
36#include <linux/net.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/list.h>
40#include <net/strparser.h>
41
42struct bpf_stab {
43 struct bpf_map map;
44 struct sock **sock_map;
45 struct bpf_prog *bpf_parse;
46 struct bpf_prog *bpf_verdict;
47 refcount_t refcnt;
48};
49
50enum smap_psock_state {
51 SMAP_TX_RUNNING,
52};
53
54struct smap_psock {
55 struct rcu_head rcu;
56
57 /* datapath variables */
58 struct sk_buff_head rxqueue;
59 bool strp_enabled;
60
61 /* datapath error path cache across tx work invocations */
62 int save_rem;
63 int save_off;
64 struct sk_buff *save_skb;
65
66 struct strparser strp;
67 struct bpf_prog *bpf_parse;
68 struct bpf_prog *bpf_verdict;
69 struct bpf_stab *stab;
70
71 /* Back reference used when sock callback trigger sockmap operations */
72 int key;
73 struct sock *sock;
74 unsigned long state;
75
76 struct work_struct tx_work;
77 struct work_struct gc_work;
78
79 void (*save_data_ready)(struct sock *sk);
80 void (*save_write_space)(struct sock *sk);
81 void (*save_state_change)(struct sock *sk);
82};
83
84static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
85{
86 return (struct smap_psock *)rcu_dereference_sk_user_data(sk);
87}
88
89static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
90{
91 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
92 int rc;
93
94 if (unlikely(!prog))
95 return SK_DROP;
96
97 skb_orphan(skb);
98 skb->sk = psock->sock;
99 bpf_compute_data_end(skb);
100 rc = (*prog->bpf_func)(skb, prog->insnsi);
101 skb->sk = NULL;
102
103 return rc;
104}
105
106static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
107{
108 struct sock *sock;
109 int rc;
110
111 /* Because we use per cpu values to feed input from sock redirect
112 * in BPF program to do_sk_redirect_map() call we need to ensure we
113 * are not preempted. RCU read lock is not sufficient in this case
114 * with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
115 */
116 preempt_disable();
117 rc = smap_verdict_func(psock, skb);
118 switch (rc) {
119 case SK_REDIRECT:
120 sock = do_sk_redirect_map();
121 preempt_enable();
122 if (likely(sock)) {
123 struct smap_psock *peer = smap_psock_sk(sock);
124
125 if (likely(peer &&
126 test_bit(SMAP_TX_RUNNING, &peer->state) &&
127 sk_stream_memory_free(peer->sock))) {
128 peer->sock->sk_wmem_queued += skb->truesize;
129 sk_mem_charge(peer->sock, skb->truesize);
130 skb_queue_tail(&peer->rxqueue, skb);
131 schedule_work(&peer->tx_work);
132 break;
133 }
134 }
135 /* Fall through and free skb otherwise */
136 case SK_DROP:
137 default:
138 preempt_enable();
139 kfree_skb(skb);
140 }
141}
142
143static void smap_report_sk_error(struct smap_psock *psock, int err)
144{
145 struct sock *sk = psock->sock;
146
147 sk->sk_err = err;
148 sk->sk_error_report(sk);
149}
150
151static void smap_release_sock(struct sock *sock);
152
153/* Called with lock_sock(sk) held */
154static void smap_state_change(struct sock *sk)
155{
156 struct smap_psock *psock;
157 struct sock *osk;
158
159 rcu_read_lock();
160
161 /* Allowing transitions into an established syn_recv states allows
162 * for early binding sockets to a smap object before the connection
163 * is established.
164 */
165 switch (sk->sk_state) {
166 case TCP_SYN_RECV:
167 case TCP_ESTABLISHED:
168 break;
169 case TCP_CLOSE_WAIT:
170 case TCP_CLOSING:
171 case TCP_LAST_ACK:
172 case TCP_FIN_WAIT1:
173 case TCP_FIN_WAIT2:
174 case TCP_LISTEN:
175 break;
176 case TCP_CLOSE:
177 /* Only release if the map entry is in fact the sock in
178 * question. There is a case where the operator deletes
179 * the sock from the map, but the TCP sock is closed before
180 * the psock is detached. Use cmpxchg to verify correct
181 * sock is removed.
182 */
183 psock = smap_psock_sk(sk);
184 if (unlikely(!psock))
185 break;
186 osk = cmpxchg(&psock->stab->sock_map[psock->key], sk, NULL);
187 if (osk == sk)
188 smap_release_sock(sk);
189 break;
190 default:
191 smap_report_sk_error(psock, EPIPE);
192 break;
193 }
194 rcu_read_unlock();
195}
196
197static void smap_read_sock_strparser(struct strparser *strp,
198 struct sk_buff *skb)
199{
200 struct smap_psock *psock;
201
202 rcu_read_lock();
203 psock = container_of(strp, struct smap_psock, strp);
204 smap_do_verdict(psock, skb);
205 rcu_read_unlock();
206}
207
208/* Called with lock held on socket */
209static void smap_data_ready(struct sock *sk)
210{
211 struct smap_psock *psock;
212
213 write_lock_bh(&sk->sk_callback_lock);
214 psock = smap_psock_sk(sk);
215 if (likely(psock))
216 strp_data_ready(&psock->strp);
217 write_unlock_bh(&sk->sk_callback_lock);
218}
219
220static void smap_tx_work(struct work_struct *w)
221{
222 struct smap_psock *psock;
223 struct sk_buff *skb;
224 int rem, off, n;
225
226 psock = container_of(w, struct smap_psock, tx_work);
227
228 /* lock sock to avoid losing sk_socket at some point during loop */
229 lock_sock(psock->sock);
230 if (psock->save_skb) {
231 skb = psock->save_skb;
232 rem = psock->save_rem;
233 off = psock->save_off;
234 psock->save_skb = NULL;
235 goto start;
236 }
237
238 while ((skb = skb_dequeue(&psock->rxqueue))) {
239 rem = skb->len;
240 off = 0;
241start:
242 do {
243 if (likely(psock->sock->sk_socket))
244 n = skb_send_sock_locked(psock->sock,
245 skb, off, rem);
246 else
247 n = -EINVAL;
248 if (n <= 0) {
249 if (n == -EAGAIN) {
250 /* Retry when space is available */
251 psock->save_skb = skb;
252 psock->save_rem = rem;
253 psock->save_off = off;
254 goto out;
255 }
256 /* Hard errors break pipe and stop xmit */
257 smap_report_sk_error(psock, n ? -n : EPIPE);
258 clear_bit(SMAP_TX_RUNNING, &psock->state);
259 sk_mem_uncharge(psock->sock, skb->truesize);
260 psock->sock->sk_wmem_queued -= skb->truesize;
261 kfree_skb(skb);
262 goto out;
263 }
264 rem -= n;
265 off += n;
266 } while (rem);
267 sk_mem_uncharge(psock->sock, skb->truesize);
268 psock->sock->sk_wmem_queued -= skb->truesize;
269 kfree_skb(skb);
270 }
271out:
272 release_sock(psock->sock);
273}
274
275static void smap_write_space(struct sock *sk)
276{
277 struct smap_psock *psock;
278
279 rcu_read_lock();
280 psock = smap_psock_sk(sk);
281 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
282 schedule_work(&psock->tx_work);
283 rcu_read_unlock();
284}
285
286static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
287{
288 write_lock_bh(&sk->sk_callback_lock);
289 if (!psock->strp_enabled)
290 goto out;
291 sk->sk_data_ready = psock->save_data_ready;
292 sk->sk_write_space = psock->save_write_space;
293 sk->sk_state_change = psock->save_state_change;
294 psock->save_data_ready = NULL;
295 psock->save_write_space = NULL;
296 psock->save_state_change = NULL;
297 strp_stop(&psock->strp);
298 psock->strp_enabled = false;
299out:
300 write_unlock_bh(&sk->sk_callback_lock);
301}
302
303static void smap_destroy_psock(struct rcu_head *rcu)
304{
305 struct smap_psock *psock = container_of(rcu,
306 struct smap_psock, rcu);
307
308 /* Now that a grace period has passed there is no longer
309 * any reference to this sock in the sockmap so we can
310 * destroy the psock, strparser, and bpf programs. But,
311 * because we use workqueue sync operations we can not
312 * do it in rcu context
313 */
314 schedule_work(&psock->gc_work);
315}
316
317static void smap_release_sock(struct sock *sock)
318{
319 struct smap_psock *psock = smap_psock_sk(sock);
320
321 smap_stop_sock(psock, sock);
322 clear_bit(SMAP_TX_RUNNING, &psock->state);
323 rcu_assign_sk_user_data(sock, NULL);
324 call_rcu_sched(&psock->rcu, smap_destroy_psock);
325}
326
327static int smap_parse_func_strparser(struct strparser *strp,
328 struct sk_buff *skb)
329{
330 struct smap_psock *psock;
331 struct bpf_prog *prog;
332 int rc;
333
334 rcu_read_lock();
335 psock = container_of(strp, struct smap_psock, strp);
336 prog = READ_ONCE(psock->bpf_parse);
337
338 if (unlikely(!prog)) {
339 rcu_read_unlock();
340 return skb->len;
341 }
342
343 /* Attach socket for bpf program to use if needed we can do this
344 * because strparser clones the skb before handing it to a upper
345 * layer, meaning skb_orphan has been called. We NULL sk on the
346 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
347 * later and because we are not charging the memory of this skb to
348 * any socket yet.
349 */
350 skb->sk = psock->sock;
351 bpf_compute_data_end(skb);
352 rc = (*prog->bpf_func)(skb, prog->insnsi);
353 skb->sk = NULL;
354 rcu_read_unlock();
355 return rc;
356}
357
358
359static int smap_read_sock_done(struct strparser *strp, int err)
360{
361 return err;
362}
363
364static int smap_init_sock(struct smap_psock *psock,
365 struct sock *sk)
366{
367 struct strp_callbacks cb;
368
369 memset(&cb, 0, sizeof(cb));
370 cb.rcv_msg = smap_read_sock_strparser;
371 cb.parse_msg = smap_parse_func_strparser;
372 cb.read_sock_done = smap_read_sock_done;
373 return strp_init(&psock->strp, sk, &cb);
374}
375
376static void smap_init_progs(struct smap_psock *psock,
377 struct bpf_stab *stab,
378 struct bpf_prog *verdict,
379 struct bpf_prog *parse)
380{
381 struct bpf_prog *orig_parse, *orig_verdict;
382
383 orig_parse = xchg(&psock->bpf_parse, parse);
384 orig_verdict = xchg(&psock->bpf_verdict, verdict);
385
386 if (orig_verdict)
387 bpf_prog_put(orig_verdict);
388 if (orig_parse)
389 bpf_prog_put(orig_parse);
390}
391
392static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
393{
394 if (sk->sk_data_ready == smap_data_ready)
395 return;
396 psock->save_data_ready = sk->sk_data_ready;
397 psock->save_write_space = sk->sk_write_space;
398 psock->save_state_change = sk->sk_state_change;
399 sk->sk_data_ready = smap_data_ready;
400 sk->sk_write_space = smap_write_space;
401 sk->sk_state_change = smap_state_change;
402 psock->strp_enabled = true;
403}
404
405static void sock_map_remove_complete(struct bpf_stab *stab)
406{
407 bpf_map_area_free(stab->sock_map);
408 kfree(stab);
409}
410
411static void smap_gc_work(struct work_struct *w)
412{
413 struct smap_psock *psock;
414
415 psock = container_of(w, struct smap_psock, gc_work);
416
417 /* no callback lock needed because we already detached sockmap ops */
418 if (psock->strp_enabled)
419 strp_done(&psock->strp);
420
421 cancel_work_sync(&psock->tx_work);
422 __skb_queue_purge(&psock->rxqueue);
423
424 /* At this point all strparser and xmit work must be complete */
425 if (psock->bpf_parse)
426 bpf_prog_put(psock->bpf_parse);
427 if (psock->bpf_verdict)
428 bpf_prog_put(psock->bpf_verdict);
429
430 if (refcount_dec_and_test(&psock->stab->refcnt))
431 sock_map_remove_complete(psock->stab);
432
433 sock_put(psock->sock);
434 kfree(psock);
435}
436
437static struct smap_psock *smap_init_psock(struct sock *sock,
438 struct bpf_stab *stab)
439{
440 struct smap_psock *psock;
441
442 psock = kzalloc(sizeof(struct smap_psock), GFP_ATOMIC | __GFP_NOWARN);
443 if (!psock)
444 return ERR_PTR(-ENOMEM);
445
446 psock->sock = sock;
447 skb_queue_head_init(&psock->rxqueue);
448 INIT_WORK(&psock->tx_work, smap_tx_work);
449 INIT_WORK(&psock->gc_work, smap_gc_work);
450
451 rcu_assign_sk_user_data(sock, psock);
452 sock_hold(sock);
453 return psock;
454}
455
456static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
457{
458 struct bpf_stab *stab;
459 int err = -EINVAL;
460 u64 cost;
461
462 /* check sanity of attributes */
463 if (attr->max_entries == 0 || attr->key_size != 4 ||
464 attr->value_size != 4 || attr->map_flags)
465 return ERR_PTR(-EINVAL);
466
467 if (attr->value_size > KMALLOC_MAX_SIZE)
468 return ERR_PTR(-E2BIG);
469
470 stab = kzalloc(sizeof(*stab), GFP_USER);
471 if (!stab)
472 return ERR_PTR(-ENOMEM);
473
474 /* mandatory map attributes */
475 stab->map.map_type = attr->map_type;
476 stab->map.key_size = attr->key_size;
477 stab->map.value_size = attr->value_size;
478 stab->map.max_entries = attr->max_entries;
479 stab->map.map_flags = attr->map_flags;
480
481 /* make sure page count doesn't overflow */
482 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
483 if (cost >= U32_MAX - PAGE_SIZE)
484 goto free_stab;
485
486 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
487
488 /* if map size is larger than memlock limit, reject it early */
489 err = bpf_map_precharge_memlock(stab->map.pages);
490 if (err)
491 goto free_stab;
492
493 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
494 sizeof(struct sock *));
495 if (!stab->sock_map)
496 goto free_stab;
497
498 refcount_set(&stab->refcnt, 1);
499 return &stab->map;
500free_stab:
501 kfree(stab);
502 return ERR_PTR(err);
503}
504
505static void sock_map_free(struct bpf_map *map)
506{
507 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
508 int i;
509
510 synchronize_rcu();
511
512 /* At this point no update, lookup or delete operations can happen.
513 * However, be aware we can still get a socket state event updates,
514 * and data ready callabacks that reference the psock from sk_user_data
515 * Also psock worker threads are still in-flight. So smap_release_sock
516 * will only free the psock after cancel_sync on the worker threads
517 * and a grace period expire to ensure psock is really safe to remove.
518 */
519 rcu_read_lock();
520 for (i = 0; i < stab->map.max_entries; i++) {
521 struct sock *sock;
522
523 sock = xchg(&stab->sock_map[i], NULL);
524 if (!sock)
525 continue;
526
527 smap_release_sock(sock);
528 }
529 rcu_read_unlock();
530
531 if (stab->bpf_verdict)
532 bpf_prog_put(stab->bpf_verdict);
533 if (stab->bpf_parse)
534 bpf_prog_put(stab->bpf_parse);
535
536 if (refcount_dec_and_test(&stab->refcnt))
537 sock_map_remove_complete(stab);
538}
539
540static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
541{
542 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
543 u32 i = key ? *(u32 *)key : U32_MAX;
544 u32 *next = (u32 *)next_key;
545
546 if (i >= stab->map.max_entries) {
547 *next = 0;
548 return 0;
549 }
550
551 if (i == stab->map.max_entries - 1)
552 return -ENOENT;
553
554 *next = i + 1;
555 return 0;
556}
557
558struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
559{
560 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
561
562 if (key >= map->max_entries)
563 return NULL;
564
565 return READ_ONCE(stab->sock_map[key]);
566}
567
568static int sock_map_delete_elem(struct bpf_map *map, void *key)
569{
570 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
571 int k = *(u32 *)key;
572 struct sock *sock;
573
574 if (k >= map->max_entries)
575 return -EINVAL;
576
577 sock = xchg(&stab->sock_map[k], NULL);
578 if (!sock)
579 return -EINVAL;
580
581 smap_release_sock(sock);
582 return 0;
583}
584
585/* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
586 * done inside rcu critical sections. This ensures on updates that the psock
587 * will not be released via smap_release_sock() until concurrent updates/deletes
588 * complete. All operations operate on sock_map using cmpxchg and xchg
589 * operations to ensure we do not get stale references. Any reads into the
590 * map must be done with READ_ONCE() because of this.
591 *
592 * A psock is destroyed via call_rcu and after any worker threads are cancelled
593 * and syncd so we are certain all references from the update/lookup/delete
594 * operations as well as references in the data path are no longer in use.
595 *
596 * A psock object holds a refcnt on the sockmap it is attached to and this is
597 * not decremented until after a RCU grace period and garbage collection occurs.
598 * This ensures the map is not free'd until psocks linked to it are removed. The
599 * map link is used when the independent sock events trigger map deletion.
600 *
601 * Psocks may only participate in one sockmap at a time. Users that try to
602 * join a single sock to multiple maps will get an error.
603 *
604 * Last, but not least, it is possible the socket is closed while running
605 * an update on an existing psock. This will release the psock, but again
606 * not until the update has completed due to rcu grace period rules.
607 */
608static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
609 struct bpf_map *map,
610 void *key, u64 flags, u64 map_flags)
611{
612 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
613 struct bpf_prog *verdict, *parse;
614 struct smap_psock *psock = NULL;
615 struct sock *old_sock, *sock;
616 u32 i = *(u32 *)key;
617 bool update = false;
618 int err = 0;
619
620 if (unlikely(flags > BPF_EXIST))
621 return -EINVAL;
622
623 if (unlikely(i >= stab->map.max_entries))
624 return -E2BIG;
625
626 if (unlikely(map_flags > BPF_SOCKMAP_STRPARSER))
627 return -EINVAL;
628
629 verdict = parse = NULL;
630 sock = READ_ONCE(stab->sock_map[i]);
631
632 if (flags == BPF_EXIST || flags == BPF_ANY) {
633 if (!sock && flags == BPF_EXIST) {
634 return -ENOENT;
635 } else if (sock && sock != skops->sk) {
636 return -EINVAL;
637 } else if (sock) {
638 psock = smap_psock_sk(sock);
639 if (unlikely(!psock))
640 return -EBUSY;
641 update = true;
642 }
643 } else if (sock && BPF_NOEXIST) {
644 return -EEXIST;
645 }
646
647 /* reserve BPF programs early so can abort easily on failures */
648 if (map_flags & BPF_SOCKMAP_STRPARSER) {
649 verdict = READ_ONCE(stab->bpf_verdict);
650 parse = READ_ONCE(stab->bpf_parse);
651
652 if (!verdict || !parse)
653 return -ENOENT;
654
655 /* bpf prog refcnt may be zero if a concurrent attach operation
656 * removes the program after the above READ_ONCE() but before
657 * we increment the refcnt. If this is the case abort with an
658 * error.
659 */
660 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
661 if (IS_ERR(verdict))
662 return PTR_ERR(verdict);
663
664 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
665 if (IS_ERR(parse)) {
666 bpf_prog_put(verdict);
667 return PTR_ERR(parse);
668 }
669 }
670
671 if (!psock) {
672 sock = skops->sk;
673 if (rcu_dereference_sk_user_data(sock))
674 return -EEXIST;
675 psock = smap_init_psock(sock, stab);
676 if (IS_ERR(psock)) {
677 if (verdict)
678 bpf_prog_put(verdict);
679 if (parse)
680 bpf_prog_put(parse);
681 return PTR_ERR(psock);
682 }
683 psock->key = i;
684 psock->stab = stab;
685 refcount_inc(&stab->refcnt);
686 set_bit(SMAP_TX_RUNNING, &psock->state);
687 }
688
689 if (map_flags & BPF_SOCKMAP_STRPARSER) {
690 write_lock_bh(&sock->sk_callback_lock);
691 if (psock->strp_enabled)
692 goto start_done;
693 err = smap_init_sock(psock, sock);
694 if (err)
695 goto out;
696 smap_init_progs(psock, stab, verdict, parse);
697 smap_start_sock(psock, sock);
698start_done:
699 write_unlock_bh(&sock->sk_callback_lock);
700 } else if (update) {
701 smap_stop_sock(psock, sock);
702 }
703
704 if (!update) {
705 old_sock = xchg(&stab->sock_map[i], skops->sk);
706 if (old_sock)
707 smap_release_sock(old_sock);
708 }
709
710 return 0;
711out:
712 write_unlock_bh(&sock->sk_callback_lock);
713 if (!update)
714 smap_release_sock(sock);
715 return err;
716}
717
718static int sock_map_attach_prog(struct bpf_map *map,
719 struct bpf_prog *parse,
720 struct bpf_prog *verdict)
721{
722 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
723 struct bpf_prog *_parse, *_verdict;
724
725 _parse = xchg(&stab->bpf_parse, parse);
726 _verdict = xchg(&stab->bpf_verdict, verdict);
727
728 if (_parse)
729 bpf_prog_put(_parse);
730 if (_verdict)
731 bpf_prog_put(_verdict);
732
733 return 0;
734}
735
736static void *sock_map_lookup(struct bpf_map *map, void *key)
737{
738 return NULL;
739}
740
741static int sock_map_update_elem(struct bpf_map *map,
742 void *key, void *value, u64 flags)
743{
744 struct bpf_sock_ops_kern skops;
745 u32 fd = *(u32 *)value;
746 struct socket *socket;
747 int err;
748
749 socket = sockfd_lookup(fd, &err);
750 if (!socket)
751 return err;
752
753 skops.sk = socket->sk;
754 if (!skops.sk) {
755 fput(socket->file);
756 return -EINVAL;
757 }
758
759 err = sock_map_ctx_update_elem(&skops, map, key,
760 flags, BPF_SOCKMAP_STRPARSER);
761 fput(socket->file);
762 return err;
763}
764
765const struct bpf_map_ops sock_map_ops = {
766 .map_alloc = sock_map_alloc,
767 .map_free = sock_map_free,
768 .map_lookup_elem = sock_map_lookup,
769 .map_get_next_key = sock_map_get_next_key,
770 .map_update_elem = sock_map_update_elem,
771 .map_delete_elem = sock_map_delete_elem,
772 .map_attach = sock_map_attach_prog,
773};
774
775BPF_CALL_5(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
776 struct bpf_map *, map, void *, key, u64, flags, u64, map_flags)
777{
778 WARN_ON_ONCE(!rcu_read_lock_held());
779 return sock_map_ctx_update_elem(bpf_sock, map, key, flags, map_flags);
780}
781
782const struct bpf_func_proto bpf_sock_map_update_proto = {
783 .func = bpf_sock_map_update,
784 .gpl_only = false,
785 .pkt_access = true,
786 .ret_type = RET_INTEGER,
787 .arg1_type = ARG_PTR_TO_CTX,
788 .arg2_type = ARG_CONST_MAP_PTR,
789 .arg3_type = ARG_PTR_TO_MAP_KEY,
790 .arg4_type = ARG_ANYTHING,
791 .arg5_type = ARG_ANYTHING,
792};