]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/core/sock_reuseport.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / net / core / sock_reuseport.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
ef456144
CG
2/*
3 * To speed up listener socket lookup, create an array to store all sockets
4 * listening on the same port. This allows a decision to be made after finding
538950a1
CG
5 * the first socket. An optional BPF program can also be configured for
6 * selecting the socket index from the array of available sockets.
ef456144
CG
7 */
8
9#include <net/sock_reuseport.h>
538950a1 10#include <linux/bpf.h>
ef456144
CG
11#include <linux/rcupdate.h>
12
13#define INIT_SOCKS 128
14
15static DEFINE_SPINLOCK(reuseport_lock);
16
822f9bb1 17static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
ef456144 18{
822f9bb1 19 unsigned int size = sizeof(struct sock_reuseport) +
ef456144
CG
20 sizeof(struct sock *) * max_socks;
21 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
22
23 if (!reuse)
24 return NULL;
25
26 reuse->max_socks = max_socks;
27
538950a1 28 RCU_INIT_POINTER(reuse->prog, NULL);
ef456144
CG
29 return reuse;
30}
31
32int reuseport_alloc(struct sock *sk)
33{
34 struct sock_reuseport *reuse;
35
36 /* bh lock used since this function call may precede hlist lock in
37 * soft irq of receive path or setsockopt from process context
38 */
39 spin_lock_bh(&reuseport_lock);
1b5f962e
CG
40
41 /* Allocation attempts can occur concurrently via the setsockopt path
42 * and the bind/hash path. Nothing to do when we lose the race.
43 */
44 if (rcu_dereference_protected(sk->sk_reuseport_cb,
45 lockdep_is_held(&reuseport_lock)))
46 goto out;
47
ef456144
CG
48 reuse = __reuseport_alloc(INIT_SOCKS);
49 if (!reuse) {
50 spin_unlock_bh(&reuseport_lock);
51 return -ENOMEM;
52 }
53
54 reuse->socks[0] = sk;
55 reuse->num_socks = 1;
56 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
57
1b5f962e 58out:
ef456144
CG
59 spin_unlock_bh(&reuseport_lock);
60
61 return 0;
62}
63EXPORT_SYMBOL(reuseport_alloc);
64
65static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
66{
67 struct sock_reuseport *more_reuse;
68 u32 more_socks_size, i;
69
70 more_socks_size = reuse->max_socks * 2U;
71 if (more_socks_size > U16_MAX)
72 return NULL;
73
74 more_reuse = __reuseport_alloc(more_socks_size);
75 if (!more_reuse)
76 return NULL;
77
78 more_reuse->max_socks = more_socks_size;
79 more_reuse->num_socks = reuse->num_socks;
538950a1 80 more_reuse->prog = reuse->prog;
ef456144
CG
81
82 memcpy(more_reuse->socks, reuse->socks,
83 reuse->num_socks * sizeof(struct sock *));
84
85 for (i = 0; i < reuse->num_socks; ++i)
86 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
87 more_reuse);
88
538950a1
CG
89 /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
90 * that reuse and more_reuse can temporarily share a reference
91 * to prog.
92 */
ef456144
CG
93 kfree_rcu(reuse, rcu);
94 return more_reuse;
95}
96
4f720e70
ED
97static void reuseport_free_rcu(struct rcu_head *head)
98{
99 struct sock_reuseport *reuse;
100
101 reuse = container_of(head, struct sock_reuseport, rcu);
102 if (reuse->prog)
103 bpf_prog_destroy(reuse->prog);
104 kfree(reuse);
105}
106
ef456144
CG
107/**
108 * reuseport_add_sock - Add a socket to the reuseport group of another.
109 * @sk: New socket to add to the group.
110 * @sk2: Socket belonging to the existing reuseport group.
111 * May return ENOMEM and not add socket to group under memory pressure.
112 */
b4ace4f1 113int reuseport_add_sock(struct sock *sk, struct sock *sk2)
ef456144 114{
4f720e70 115 struct sock_reuseport *old_reuse, *reuse;
ef456144 116
b4ace4f1
CG
117 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
118 int err = reuseport_alloc(sk2);
119
120 if (err)
121 return err;
122 }
123
ef456144
CG
124 spin_lock_bh(&reuseport_lock);
125 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
4f720e70
ED
126 lockdep_is_held(&reuseport_lock));
127 old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
128 lockdep_is_held(&reuseport_lock));
129 if (old_reuse && old_reuse->num_socks != 1) {
130 spin_unlock_bh(&reuseport_lock);
131 return -EBUSY;
132 }
ef456144
CG
133
134 if (reuse->num_socks == reuse->max_socks) {
135 reuse = reuseport_grow(reuse);
136 if (!reuse) {
137 spin_unlock_bh(&reuseport_lock);
138 return -ENOMEM;
139 }
140 }
141
142 reuse->socks[reuse->num_socks] = sk;
143 /* paired with smp_rmb() in reuseport_select_sock() */
144 smp_wmb();
145 reuse->num_socks++;
146 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
147
148 spin_unlock_bh(&reuseport_lock);
149
4f720e70
ED
150 if (old_reuse)
151 call_rcu(&old_reuse->rcu, reuseport_free_rcu);
ef456144
CG
152 return 0;
153}
ef456144
CG
154
155void reuseport_detach_sock(struct sock *sk)
156{
157 struct sock_reuseport *reuse;
158 int i;
159
160 spin_lock_bh(&reuseport_lock);
161 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
162 lockdep_is_held(&reuseport_lock));
163 rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
164
165 for (i = 0; i < reuse->num_socks; i++) {
166 if (reuse->socks[i] == sk) {
167 reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
168 reuse->num_socks--;
169 if (reuse->num_socks == 0)
538950a1 170 call_rcu(&reuse->rcu, reuseport_free_rcu);
ef456144
CG
171 break;
172 }
173 }
174 spin_unlock_bh(&reuseport_lock);
175}
176EXPORT_SYMBOL(reuseport_detach_sock);
177
538950a1
CG
178static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
179 struct bpf_prog *prog, struct sk_buff *skb,
180 int hdr_len)
181{
182 struct sk_buff *nskb = NULL;
183 u32 index;
184
185 if (skb_shared(skb)) {
186 nskb = skb_clone(skb, GFP_ATOMIC);
187 if (!nskb)
188 return NULL;
189 skb = nskb;
190 }
191
192 /* temporarily advance data past protocol header */
193 if (!pskb_pull(skb, hdr_len)) {
00ce3a15 194 kfree_skb(nskb);
538950a1
CG
195 return NULL;
196 }
197 index = bpf_prog_run_save_cb(prog, skb);
198 __skb_push(skb, hdr_len);
199
200 consume_skb(nskb);
201
202 if (index >= socks)
203 return NULL;
204
205 return reuse->socks[index];
206}
207
ef456144
CG
208/**
209 * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
210 * @sk: First socket in the group.
538950a1
CG
211 * @hash: When no BPF filter is available, use this hash to select.
212 * @skb: skb to run through BPF filter.
213 * @hdr_len: BPF filter expects skb data pointer at payload data. If
214 * the skb does not yet point at the payload, this parameter represents
215 * how far the pointer needs to advance to reach the payload.
ef456144
CG
216 * Returns a socket that should receive the packet (or NULL on error).
217 */
538950a1
CG
218struct sock *reuseport_select_sock(struct sock *sk,
219 u32 hash,
220 struct sk_buff *skb,
221 int hdr_len)
ef456144
CG
222{
223 struct sock_reuseport *reuse;
538950a1 224 struct bpf_prog *prog;
ef456144
CG
225 struct sock *sk2 = NULL;
226 u16 socks;
227
228 rcu_read_lock();
229 reuse = rcu_dereference(sk->sk_reuseport_cb);
230
231 /* if memory allocation failed or add call is not yet complete */
232 if (!reuse)
233 goto out;
234
538950a1 235 prog = rcu_dereference(reuse->prog);
ef456144
CG
236 socks = READ_ONCE(reuse->num_socks);
237 if (likely(socks)) {
238 /* paired with smp_wmb() in reuseport_add_sock() */
239 smp_rmb();
240
538950a1
CG
241 if (prog && skb)
242 sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
243 else
244 sk2 = reuse->socks[reciprocal_scale(hash, socks)];
ef456144
CG
245 }
246
247out:
248 rcu_read_unlock();
249 return sk2;
250}
251EXPORT_SYMBOL(reuseport_select_sock);
538950a1
CG
252
253struct bpf_prog *
254reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
255{
256 struct sock_reuseport *reuse;
257 struct bpf_prog *old_prog;
258
259 spin_lock_bh(&reuseport_lock);
260 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
261 lockdep_is_held(&reuseport_lock));
262 old_prog = rcu_dereference_protected(reuse->prog,
263 lockdep_is_held(&reuseport_lock));
264 rcu_assign_pointer(reuse->prog, prog);
265 spin_unlock_bh(&reuseport_lock);
266
267 return old_prog;
268}
269EXPORT_SYMBOL(reuseport_attach_prog);