]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - net/netfilter/nf_flow_table_core.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[mirror_ubuntu-eoan-kernel.git] / net / netfilter / nf_flow_table_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
8 #include <net/ip.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_tuple.h>
15
16 struct flow_offload_entry {
17 struct flow_offload flow;
18 struct nf_conn *ct;
19 struct rcu_head rcu_head;
20 };
21
22 static DEFINE_MUTEX(flowtable_lock);
23 static LIST_HEAD(flowtables);
24
25 static void
26 flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
27 struct nf_flow_route *route,
28 enum flow_offload_tuple_dir dir)
29 {
30 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
31 struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
32 struct dst_entry *other_dst = route->tuple[!dir].dst;
33 struct dst_entry *dst = route->tuple[dir].dst;
34
35 ft->dir = dir;
36
37 switch (ctt->src.l3num) {
38 case NFPROTO_IPV4:
39 ft->src_v4 = ctt->src.u3.in;
40 ft->dst_v4 = ctt->dst.u3.in;
41 ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
42 break;
43 case NFPROTO_IPV6:
44 ft->src_v6 = ctt->src.u3.in6;
45 ft->dst_v6 = ctt->dst.u3.in6;
46 ft->mtu = ip6_dst_mtu_forward(dst);
47 break;
48 }
49
50 ft->l3proto = ctt->src.l3num;
51 ft->l4proto = ctt->dst.protonum;
52 ft->src_port = ctt->src.u.tcp.port;
53 ft->dst_port = ctt->dst.u.tcp.port;
54
55 ft->iifidx = other_dst->dev->ifindex;
56 ft->dst_cache = dst;
57 }
58
59 struct flow_offload *
60 flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
61 {
62 struct flow_offload_entry *entry;
63 struct flow_offload *flow;
64
65 if (unlikely(nf_ct_is_dying(ct) ||
66 !atomic_inc_not_zero(&ct->ct_general.use)))
67 return NULL;
68
69 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
70 if (!entry)
71 goto err_ct_refcnt;
72
73 flow = &entry->flow;
74
75 if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
76 goto err_dst_cache_original;
77
78 if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
79 goto err_dst_cache_reply;
80
81 entry->ct = ct;
82
83 flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
84 flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
85
86 if (ct->status & IPS_SRC_NAT)
87 flow->flags |= FLOW_OFFLOAD_SNAT;
88 if (ct->status & IPS_DST_NAT)
89 flow->flags |= FLOW_OFFLOAD_DNAT;
90
91 return flow;
92
93 err_dst_cache_reply:
94 dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
95 err_dst_cache_original:
96 kfree(entry);
97 err_ct_refcnt:
98 nf_ct_put(ct);
99
100 return NULL;
101 }
102 EXPORT_SYMBOL_GPL(flow_offload_alloc);
103
104 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
105 {
106 tcp->state = TCP_CONNTRACK_ESTABLISHED;
107 tcp->seen[0].td_maxwin = 0;
108 tcp->seen[1].td_maxwin = 0;
109 }
110
111 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
112 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
113
114 static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
115 {
116 return (__s32)(timeout - (u32)jiffies);
117 }
118
119 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
120 {
121 const struct nf_conntrack_l4proto *l4proto;
122 int l4num = nf_ct_protonum(ct);
123 unsigned int timeout;
124
125 l4proto = nf_ct_l4proto_find(l4num);
126 if (!l4proto)
127 return;
128
129 if (l4num == IPPROTO_TCP)
130 timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
131 else if (l4num == IPPROTO_UDP)
132 timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
133 else
134 return;
135
136 if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
137 ct->timeout = nfct_time_stamp + timeout;
138 }
139
140 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
141 {
142 if (nf_ct_protonum(ct) == IPPROTO_TCP)
143 flow_offload_fixup_tcp(&ct->proto.tcp);
144 }
145
146 static void flow_offload_fixup_ct(struct nf_conn *ct)
147 {
148 flow_offload_fixup_ct_state(ct);
149 flow_offload_fixup_ct_timeout(ct);
150 }
151
152 void flow_offload_free(struct flow_offload *flow)
153 {
154 struct flow_offload_entry *e;
155
156 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
157 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
158 e = container_of(flow, struct flow_offload_entry, flow);
159 if (flow->flags & FLOW_OFFLOAD_DYING)
160 nf_ct_delete(e->ct, 0, 0);
161 nf_ct_put(e->ct);
162 kfree_rcu(e, rcu_head);
163 }
164 EXPORT_SYMBOL_GPL(flow_offload_free);
165
166 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
167 {
168 const struct flow_offload_tuple *tuple = data;
169
170 return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
171 }
172
173 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
174 {
175 const struct flow_offload_tuple_rhash *tuplehash = data;
176
177 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
178 }
179
180 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
181 const void *ptr)
182 {
183 const struct flow_offload_tuple *tuple = arg->key;
184 const struct flow_offload_tuple_rhash *x = ptr;
185
186 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
187 return 1;
188
189 return 0;
190 }
191
192 static const struct rhashtable_params nf_flow_offload_rhash_params = {
193 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
194 .hashfn = flow_offload_hash,
195 .obj_hashfn = flow_offload_hash_obj,
196 .obj_cmpfn = flow_offload_hash_cmp,
197 .automatic_shrinking = true,
198 };
199
200 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
201 {
202 int err;
203
204 err = rhashtable_insert_fast(&flow_table->rhashtable,
205 &flow->tuplehash[0].node,
206 nf_flow_offload_rhash_params);
207 if (err < 0)
208 return err;
209
210 err = rhashtable_insert_fast(&flow_table->rhashtable,
211 &flow->tuplehash[1].node,
212 nf_flow_offload_rhash_params);
213 if (err < 0) {
214 rhashtable_remove_fast(&flow_table->rhashtable,
215 &flow->tuplehash[0].node,
216 nf_flow_offload_rhash_params);
217 return err;
218 }
219
220 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
221 return 0;
222 }
223 EXPORT_SYMBOL_GPL(flow_offload_add);
224
225 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
226 {
227 return nf_flow_timeout_delta(flow->timeout) <= 0;
228 }
229
230 static void flow_offload_del(struct nf_flowtable *flow_table,
231 struct flow_offload *flow)
232 {
233 struct flow_offload_entry *e;
234
235 rhashtable_remove_fast(&flow_table->rhashtable,
236 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
237 nf_flow_offload_rhash_params);
238 rhashtable_remove_fast(&flow_table->rhashtable,
239 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
240 nf_flow_offload_rhash_params);
241
242 e = container_of(flow, struct flow_offload_entry, flow);
243 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
244
245 if (nf_flow_has_expired(flow))
246 flow_offload_fixup_ct(e->ct);
247 else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
248 flow_offload_fixup_ct_timeout(e->ct);
249
250 flow_offload_free(flow);
251 }
252
253 void flow_offload_teardown(struct flow_offload *flow)
254 {
255 struct flow_offload_entry *e;
256
257 flow->flags |= FLOW_OFFLOAD_TEARDOWN;
258
259 e = container_of(flow, struct flow_offload_entry, flow);
260 flow_offload_fixup_ct_state(e->ct);
261 }
262 EXPORT_SYMBOL_GPL(flow_offload_teardown);
263
264 struct flow_offload_tuple_rhash *
265 flow_offload_lookup(struct nf_flowtable *flow_table,
266 struct flow_offload_tuple *tuple)
267 {
268 struct flow_offload_tuple_rhash *tuplehash;
269 struct flow_offload *flow;
270 struct flow_offload_entry *e;
271 int dir;
272
273 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
274 nf_flow_offload_rhash_params);
275 if (!tuplehash)
276 return NULL;
277
278 dir = tuplehash->tuple.dir;
279 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
280 if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
281 return NULL;
282
283 e = container_of(flow, struct flow_offload_entry, flow);
284 if (unlikely(nf_ct_is_dying(e->ct)))
285 return NULL;
286
287 return tuplehash;
288 }
289 EXPORT_SYMBOL_GPL(flow_offload_lookup);
290
291 static int
292 nf_flow_table_iterate(struct nf_flowtable *flow_table,
293 void (*iter)(struct flow_offload *flow, void *data),
294 void *data)
295 {
296 struct flow_offload_tuple_rhash *tuplehash;
297 struct rhashtable_iter hti;
298 struct flow_offload *flow;
299 int err = 0;
300
301 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
302 rhashtable_walk_start(&hti);
303
304 while ((tuplehash = rhashtable_walk_next(&hti))) {
305 if (IS_ERR(tuplehash)) {
306 if (PTR_ERR(tuplehash) != -EAGAIN) {
307 err = PTR_ERR(tuplehash);
308 break;
309 }
310 continue;
311 }
312 if (tuplehash->tuple.dir)
313 continue;
314
315 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
316
317 iter(flow, data);
318 }
319 rhashtable_walk_stop(&hti);
320 rhashtable_walk_exit(&hti);
321
322 return err;
323 }
324
325 static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
326 {
327 struct nf_flowtable *flow_table = data;
328 struct flow_offload_entry *e;
329
330 e = container_of(flow, struct flow_offload_entry, flow);
331 if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) ||
332 (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
333 flow_offload_del(flow_table, flow);
334 }
335
336 static void nf_flow_offload_work_gc(struct work_struct *work)
337 {
338 struct nf_flowtable *flow_table;
339
340 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
341 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
342 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
343 }
344
345 static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
346 __be16 port, __be16 new_port)
347 {
348 struct tcphdr *tcph;
349
350 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
351 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
352 return -1;
353
354 tcph = (void *)(skb_network_header(skb) + thoff);
355 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
356
357 return 0;
358 }
359
360 static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
361 __be16 port, __be16 new_port)
362 {
363 struct udphdr *udph;
364
365 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
366 skb_try_make_writable(skb, thoff + sizeof(*udph)))
367 return -1;
368
369 udph = (void *)(skb_network_header(skb) + thoff);
370 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
371 inet_proto_csum_replace2(&udph->check, skb, port,
372 new_port, true);
373 if (!udph->check)
374 udph->check = CSUM_MANGLED_0;
375 }
376
377 return 0;
378 }
379
380 static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
381 u8 protocol, __be16 port, __be16 new_port)
382 {
383 switch (protocol) {
384 case IPPROTO_TCP:
385 if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
386 return NF_DROP;
387 break;
388 case IPPROTO_UDP:
389 if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
390 return NF_DROP;
391 break;
392 }
393
394 return 0;
395 }
396
397 int nf_flow_snat_port(const struct flow_offload *flow,
398 struct sk_buff *skb, unsigned int thoff,
399 u8 protocol, enum flow_offload_tuple_dir dir)
400 {
401 struct flow_ports *hdr;
402 __be16 port, new_port;
403
404 if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
405 skb_try_make_writable(skb, thoff + sizeof(*hdr)))
406 return -1;
407
408 hdr = (void *)(skb_network_header(skb) + thoff);
409
410 switch (dir) {
411 case FLOW_OFFLOAD_DIR_ORIGINAL:
412 port = hdr->source;
413 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
414 hdr->source = new_port;
415 break;
416 case FLOW_OFFLOAD_DIR_REPLY:
417 port = hdr->dest;
418 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
419 hdr->dest = new_port;
420 break;
421 default:
422 return -1;
423 }
424
425 return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
426 }
427 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
428
429 int nf_flow_dnat_port(const struct flow_offload *flow,
430 struct sk_buff *skb, unsigned int thoff,
431 u8 protocol, enum flow_offload_tuple_dir dir)
432 {
433 struct flow_ports *hdr;
434 __be16 port, new_port;
435
436 if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
437 skb_try_make_writable(skb, thoff + sizeof(*hdr)))
438 return -1;
439
440 hdr = (void *)(skb_network_header(skb) + thoff);
441
442 switch (dir) {
443 case FLOW_OFFLOAD_DIR_ORIGINAL:
444 port = hdr->dest;
445 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
446 hdr->dest = new_port;
447 break;
448 case FLOW_OFFLOAD_DIR_REPLY:
449 port = hdr->source;
450 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
451 hdr->source = new_port;
452 break;
453 default:
454 return -1;
455 }
456
457 return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
458 }
459 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
460
461 int nf_flow_table_init(struct nf_flowtable *flowtable)
462 {
463 int err;
464
465 INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
466
467 err = rhashtable_init(&flowtable->rhashtable,
468 &nf_flow_offload_rhash_params);
469 if (err < 0)
470 return err;
471
472 queue_delayed_work(system_power_efficient_wq,
473 &flowtable->gc_work, HZ);
474
475 mutex_lock(&flowtable_lock);
476 list_add(&flowtable->list, &flowtables);
477 mutex_unlock(&flowtable_lock);
478
479 return 0;
480 }
481 EXPORT_SYMBOL_GPL(nf_flow_table_init);
482
483 static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
484 {
485 struct net_device *dev = data;
486 struct flow_offload_entry *e;
487
488 e = container_of(flow, struct flow_offload_entry, flow);
489
490 if (!dev) {
491 flow_offload_teardown(flow);
492 return;
493 }
494 if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
495 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
496 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
497 flow_offload_dead(flow);
498 }
499
500 static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
501 struct net_device *dev)
502 {
503 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
504 flush_delayed_work(&flowtable->gc_work);
505 }
506
507 void nf_flow_table_cleanup(struct net_device *dev)
508 {
509 struct nf_flowtable *flowtable;
510
511 mutex_lock(&flowtable_lock);
512 list_for_each_entry(flowtable, &flowtables, list)
513 nf_flow_table_iterate_cleanup(flowtable, dev);
514 mutex_unlock(&flowtable_lock);
515 }
516 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
517
518 void nf_flow_table_free(struct nf_flowtable *flow_table)
519 {
520 mutex_lock(&flowtable_lock);
521 list_del(&flow_table->list);
522 mutex_unlock(&flowtable_lock);
523 cancel_delayed_work_sync(&flow_table->gc_work);
524 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
525 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
526 rhashtable_destroy(&flow_table->rhashtable);
527 }
528 EXPORT_SYMBOL_GPL(nf_flow_table_free);
529
530 MODULE_LICENSE("GPL");
531 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");