]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/netfilter/nft_payload.c
Merge tag 'for-linus-5.12-1' of git://github.com/cminyard/linux-ipmi
[mirror_ubuntu-jammy-kernel.git] / net / netfilter / nft_payload.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
5 *
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
23 #include <linux/ip.h>
24 #include <linux/ipv6.h>
25 #include <net/sctp/checksum.h>
26
27 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
28 struct vlan_ethhdr *veth)
29 {
30 if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
31 return false;
32
33 veth->h_vlan_proto = skb->vlan_proto;
34 veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
35 veth->h_vlan_encapsulated_proto = skb->protocol;
36
37 return true;
38 }
39
40 /* add vlan header into the user buffer for if tag was removed by offloads */
41 static bool
42 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
43 {
44 int mac_off = skb_mac_header(skb) - skb->data;
45 u8 *vlanh, *dst_u8 = (u8 *) d;
46 struct vlan_ethhdr veth;
47 u8 vlan_hlen = 0;
48
49 if ((skb->protocol == htons(ETH_P_8021AD) ||
50 skb->protocol == htons(ETH_P_8021Q)) &&
51 offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
52 vlan_hlen += VLAN_HLEN;
53
54 vlanh = (u8 *) &veth;
55 if (offset < VLAN_ETH_HLEN + vlan_hlen) {
56 u8 ethlen = len;
57
58 if (vlan_hlen &&
59 skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
60 return false;
61 else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
62 return false;
63
64 if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
65 ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
66
67 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
68
69 len -= ethlen;
70 if (len == 0)
71 return true;
72
73 dst_u8 += ethlen;
74 offset = ETH_HLEN + vlan_hlen;
75 } else {
76 offset -= VLAN_HLEN + vlan_hlen;
77 }
78
79 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
80 }
81
82 void nft_payload_eval(const struct nft_expr *expr,
83 struct nft_regs *regs,
84 const struct nft_pktinfo *pkt)
85 {
86 const struct nft_payload *priv = nft_expr_priv(expr);
87 const struct sk_buff *skb = pkt->skb;
88 u32 *dest = &regs->data[priv->dreg];
89 int offset;
90
91 if (priv->len % NFT_REG32_SIZE)
92 dest[priv->len / NFT_REG32_SIZE] = 0;
93
94 switch (priv->base) {
95 case NFT_PAYLOAD_LL_HEADER:
96 if (!skb_mac_header_was_set(skb))
97 goto err;
98
99 if (skb_vlan_tag_present(skb)) {
100 if (!nft_payload_copy_vlan(dest, skb,
101 priv->offset, priv->len))
102 goto err;
103 return;
104 }
105 offset = skb_mac_header(skb) - skb->data;
106 break;
107 case NFT_PAYLOAD_NETWORK_HEADER:
108 offset = skb_network_offset(skb);
109 break;
110 case NFT_PAYLOAD_TRANSPORT_HEADER:
111 if (!pkt->tprot_set)
112 goto err;
113 offset = pkt->xt.thoff;
114 break;
115 default:
116 BUG();
117 }
118 offset += priv->offset;
119
120 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
121 goto err;
122 return;
123 err:
124 regs->verdict.code = NFT_BREAK;
125 }
126
127 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
128 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
129 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
130 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
131 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
132 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
133 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
134 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
135 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
136 };
137
138 static int nft_payload_init(const struct nft_ctx *ctx,
139 const struct nft_expr *expr,
140 const struct nlattr * const tb[])
141 {
142 struct nft_payload *priv = nft_expr_priv(expr);
143
144 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
145 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
146 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
147
148 return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
149 &priv->dreg, NULL, NFT_DATA_VALUE,
150 priv->len);
151 }
152
153 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
154 {
155 const struct nft_payload *priv = nft_expr_priv(expr);
156
157 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
158 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
159 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
160 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
161 goto nla_put_failure;
162 return 0;
163
164 nla_put_failure:
165 return -1;
166 }
167
168 static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
169 u32 priv_len, u32 field_len)
170 {
171 unsigned int remainder, delta, k;
172 struct nft_data mask = {};
173 __be32 remainder_mask;
174
175 if (priv_len == field_len) {
176 memset(&reg->mask, 0xff, priv_len);
177 return true;
178 } else if (priv_len > field_len) {
179 return false;
180 }
181
182 memset(&mask, 0xff, field_len);
183 remainder = priv_len % sizeof(u32);
184 if (remainder) {
185 k = priv_len / sizeof(u32);
186 delta = field_len - priv_len;
187 remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
188 mask.data[k] = (__force u32)remainder_mask;
189 }
190
191 memcpy(&reg->mask, &mask, field_len);
192
193 return true;
194 }
195
196 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
197 struct nft_flow_rule *flow,
198 const struct nft_payload *priv)
199 {
200 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
201
202 switch (priv->offset) {
203 case offsetof(struct ethhdr, h_source):
204 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
205 return -EOPNOTSUPP;
206
207 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
208 src, ETH_ALEN, reg);
209 break;
210 case offsetof(struct ethhdr, h_dest):
211 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
212 return -EOPNOTSUPP;
213
214 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
215 dst, ETH_ALEN, reg);
216 break;
217 case offsetof(struct ethhdr, h_proto):
218 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
219 return -EOPNOTSUPP;
220
221 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
222 n_proto, sizeof(__be16), reg);
223 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
224 break;
225 case offsetof(struct vlan_ethhdr, h_vlan_TCI):
226 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
227 return -EOPNOTSUPP;
228
229 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
230 vlan_tci, sizeof(__be16), reg);
231 break;
232 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
233 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
234 return -EOPNOTSUPP;
235
236 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
237 vlan_tpid, sizeof(__be16), reg);
238 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
239 break;
240 case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
241 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
242 return -EOPNOTSUPP;
243
244 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
245 vlan_tci, sizeof(__be16), reg);
246 break;
247 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
248 sizeof(struct vlan_hdr):
249 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
250 return -EOPNOTSUPP;
251
252 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
253 vlan_tpid, sizeof(__be16), reg);
254 break;
255 default:
256 return -EOPNOTSUPP;
257 }
258
259 return 0;
260 }
261
262 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
263 struct nft_flow_rule *flow,
264 const struct nft_payload *priv)
265 {
266 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
267
268 switch (priv->offset) {
269 case offsetof(struct iphdr, saddr):
270 if (!nft_payload_offload_mask(reg, priv->len,
271 sizeof(struct in_addr)))
272 return -EOPNOTSUPP;
273
274 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
275 sizeof(struct in_addr), reg);
276 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
277 break;
278 case offsetof(struct iphdr, daddr):
279 if (!nft_payload_offload_mask(reg, priv->len,
280 sizeof(struct in_addr)))
281 return -EOPNOTSUPP;
282
283 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
284 sizeof(struct in_addr), reg);
285 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
286 break;
287 case offsetof(struct iphdr, protocol):
288 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
289 return -EOPNOTSUPP;
290
291 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
292 sizeof(__u8), reg);
293 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
294 break;
295 default:
296 return -EOPNOTSUPP;
297 }
298
299 return 0;
300 }
301
302 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
303 struct nft_flow_rule *flow,
304 const struct nft_payload *priv)
305 {
306 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
307
308 switch (priv->offset) {
309 case offsetof(struct ipv6hdr, saddr):
310 if (!nft_payload_offload_mask(reg, priv->len,
311 sizeof(struct in6_addr)))
312 return -EOPNOTSUPP;
313
314 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
315 sizeof(struct in6_addr), reg);
316 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
317 break;
318 case offsetof(struct ipv6hdr, daddr):
319 if (!nft_payload_offload_mask(reg, priv->len,
320 sizeof(struct in6_addr)))
321 return -EOPNOTSUPP;
322
323 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
324 sizeof(struct in6_addr), reg);
325 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
326 break;
327 case offsetof(struct ipv6hdr, nexthdr):
328 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
329 return -EOPNOTSUPP;
330
331 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
332 sizeof(__u8), reg);
333 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
334 break;
335 default:
336 return -EOPNOTSUPP;
337 }
338
339 return 0;
340 }
341
342 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
343 struct nft_flow_rule *flow,
344 const struct nft_payload *priv)
345 {
346 int err;
347
348 switch (ctx->dep.l3num) {
349 case htons(ETH_P_IP):
350 err = nft_payload_offload_ip(ctx, flow, priv);
351 break;
352 case htons(ETH_P_IPV6):
353 err = nft_payload_offload_ip6(ctx, flow, priv);
354 break;
355 default:
356 return -EOPNOTSUPP;
357 }
358
359 return err;
360 }
361
362 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
363 struct nft_flow_rule *flow,
364 const struct nft_payload *priv)
365 {
366 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
367
368 switch (priv->offset) {
369 case offsetof(struct tcphdr, source):
370 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
371 return -EOPNOTSUPP;
372
373 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
374 sizeof(__be16), reg);
375 break;
376 case offsetof(struct tcphdr, dest):
377 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
378 return -EOPNOTSUPP;
379
380 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
381 sizeof(__be16), reg);
382 break;
383 default:
384 return -EOPNOTSUPP;
385 }
386
387 return 0;
388 }
389
390 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
391 struct nft_flow_rule *flow,
392 const struct nft_payload *priv)
393 {
394 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
395
396 switch (priv->offset) {
397 case offsetof(struct udphdr, source):
398 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
399 return -EOPNOTSUPP;
400
401 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
402 sizeof(__be16), reg);
403 break;
404 case offsetof(struct udphdr, dest):
405 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
406 return -EOPNOTSUPP;
407
408 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
409 sizeof(__be16), reg);
410 break;
411 default:
412 return -EOPNOTSUPP;
413 }
414
415 return 0;
416 }
417
418 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
419 struct nft_flow_rule *flow,
420 const struct nft_payload *priv)
421 {
422 int err;
423
424 switch (ctx->dep.protonum) {
425 case IPPROTO_TCP:
426 err = nft_payload_offload_tcp(ctx, flow, priv);
427 break;
428 case IPPROTO_UDP:
429 err = nft_payload_offload_udp(ctx, flow, priv);
430 break;
431 default:
432 return -EOPNOTSUPP;
433 }
434
435 return err;
436 }
437
438 static int nft_payload_offload(struct nft_offload_ctx *ctx,
439 struct nft_flow_rule *flow,
440 const struct nft_expr *expr)
441 {
442 const struct nft_payload *priv = nft_expr_priv(expr);
443 int err;
444
445 switch (priv->base) {
446 case NFT_PAYLOAD_LL_HEADER:
447 err = nft_payload_offload_ll(ctx, flow, priv);
448 break;
449 case NFT_PAYLOAD_NETWORK_HEADER:
450 err = nft_payload_offload_nh(ctx, flow, priv);
451 break;
452 case NFT_PAYLOAD_TRANSPORT_HEADER:
453 err = nft_payload_offload_th(ctx, flow, priv);
454 break;
455 default:
456 err = -EOPNOTSUPP;
457 break;
458 }
459 return err;
460 }
461
462 static const struct nft_expr_ops nft_payload_ops = {
463 .type = &nft_payload_type,
464 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
465 .eval = nft_payload_eval,
466 .init = nft_payload_init,
467 .dump = nft_payload_dump,
468 .offload = nft_payload_offload,
469 };
470
471 const struct nft_expr_ops nft_payload_fast_ops = {
472 .type = &nft_payload_type,
473 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
474 .eval = nft_payload_eval,
475 .init = nft_payload_init,
476 .dump = nft_payload_dump,
477 .offload = nft_payload_offload,
478 };
479
480 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
481 {
482 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
483 if (*sum == 0)
484 *sum = CSUM_MANGLED_0;
485 }
486
487 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
488 {
489 struct udphdr *uh, _uh;
490
491 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
492 if (!uh)
493 return false;
494
495 return (__force bool)uh->check;
496 }
497
498 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
499 struct sk_buff *skb,
500 unsigned int *l4csum_offset)
501 {
502 switch (pkt->tprot) {
503 case IPPROTO_TCP:
504 *l4csum_offset = offsetof(struct tcphdr, check);
505 break;
506 case IPPROTO_UDP:
507 if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
508 return -1;
509 fallthrough;
510 case IPPROTO_UDPLITE:
511 *l4csum_offset = offsetof(struct udphdr, check);
512 break;
513 case IPPROTO_ICMPV6:
514 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
515 break;
516 default:
517 return -1;
518 }
519
520 *l4csum_offset += pkt->xt.thoff;
521 return 0;
522 }
523
524 static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
525 {
526 struct sctphdr *sh;
527
528 if (skb_ensure_writable(skb, offset + sizeof(*sh)))
529 return -1;
530
531 sh = (struct sctphdr *)(skb->data + offset);
532 sh->checksum = sctp_compute_cksum(skb, offset);
533 skb->ip_summed = CHECKSUM_UNNECESSARY;
534 return 0;
535 }
536
537 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
538 struct sk_buff *skb,
539 __wsum fsum, __wsum tsum)
540 {
541 int l4csum_offset;
542 __sum16 sum;
543
544 /* If we cannot determine layer 4 checksum offset or this packet doesn't
545 * require layer 4 checksum recalculation, skip this packet.
546 */
547 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
548 return 0;
549
550 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
551 return -1;
552
553 /* Checksum mangling for an arbitrary amount of bytes, based on
554 * inet_proto_csum_replace*() functions.
555 */
556 if (skb->ip_summed != CHECKSUM_PARTIAL) {
557 nft_csum_replace(&sum, fsum, tsum);
558 if (skb->ip_summed == CHECKSUM_COMPLETE) {
559 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
560 tsum);
561 }
562 } else {
563 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
564 tsum));
565 }
566
567 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
568 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
569 return -1;
570
571 return 0;
572 }
573
574 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
575 __wsum fsum, __wsum tsum, int csum_offset)
576 {
577 __sum16 sum;
578
579 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
580 return -1;
581
582 nft_csum_replace(&sum, fsum, tsum);
583 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
584 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
585 return -1;
586
587 return 0;
588 }
589
590 static void nft_payload_set_eval(const struct nft_expr *expr,
591 struct nft_regs *regs,
592 const struct nft_pktinfo *pkt)
593 {
594 const struct nft_payload_set *priv = nft_expr_priv(expr);
595 struct sk_buff *skb = pkt->skb;
596 const u32 *src = &regs->data[priv->sreg];
597 int offset, csum_offset;
598 __wsum fsum, tsum;
599
600 switch (priv->base) {
601 case NFT_PAYLOAD_LL_HEADER:
602 if (!skb_mac_header_was_set(skb))
603 goto err;
604 offset = skb_mac_header(skb) - skb->data;
605 break;
606 case NFT_PAYLOAD_NETWORK_HEADER:
607 offset = skb_network_offset(skb);
608 break;
609 case NFT_PAYLOAD_TRANSPORT_HEADER:
610 if (!pkt->tprot_set)
611 goto err;
612 offset = pkt->xt.thoff;
613 break;
614 default:
615 BUG();
616 }
617
618 csum_offset = offset + priv->csum_offset;
619 offset += priv->offset;
620
621 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
622 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
623 skb->ip_summed != CHECKSUM_PARTIAL)) {
624 fsum = skb_checksum(skb, offset, priv->len, 0);
625 tsum = csum_partial(src, priv->len, 0);
626
627 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
628 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
629 goto err;
630
631 if (priv->csum_flags &&
632 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
633 goto err;
634 }
635
636 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
637 skb_store_bits(skb, offset, src, priv->len) < 0)
638 goto err;
639
640 if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
641 pkt->tprot == IPPROTO_SCTP &&
642 skb->ip_summed != CHECKSUM_PARTIAL) {
643 if (nft_payload_csum_sctp(skb, pkt->xt.thoff))
644 goto err;
645 }
646
647 return;
648 err:
649 regs->verdict.code = NFT_BREAK;
650 }
651
652 static int nft_payload_set_init(const struct nft_ctx *ctx,
653 const struct nft_expr *expr,
654 const struct nlattr * const tb[])
655 {
656 struct nft_payload_set *priv = nft_expr_priv(expr);
657
658 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
659 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
660 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
661
662 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
663 priv->csum_type =
664 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
665 if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
666 priv->csum_offset =
667 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
668 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
669 u32 flags;
670
671 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
672 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
673 return -EINVAL;
674
675 priv->csum_flags = flags;
676 }
677
678 switch (priv->csum_type) {
679 case NFT_PAYLOAD_CSUM_NONE:
680 case NFT_PAYLOAD_CSUM_INET:
681 break;
682 case NFT_PAYLOAD_CSUM_SCTP:
683 if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
684 return -EINVAL;
685
686 if (priv->csum_offset != offsetof(struct sctphdr, checksum))
687 return -EINVAL;
688 break;
689 default:
690 return -EOPNOTSUPP;
691 }
692
693 return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
694 priv->len);
695 }
696
697 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
698 {
699 const struct nft_payload_set *priv = nft_expr_priv(expr);
700
701 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
702 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
703 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
704 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
705 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
706 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
707 htonl(priv->csum_offset)) ||
708 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
709 goto nla_put_failure;
710 return 0;
711
712 nla_put_failure:
713 return -1;
714 }
715
716 static const struct nft_expr_ops nft_payload_set_ops = {
717 .type = &nft_payload_type,
718 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
719 .eval = nft_payload_set_eval,
720 .init = nft_payload_set_init,
721 .dump = nft_payload_set_dump,
722 };
723
724 static const struct nft_expr_ops *
725 nft_payload_select_ops(const struct nft_ctx *ctx,
726 const struct nlattr * const tb[])
727 {
728 enum nft_payload_bases base;
729 unsigned int offset, len;
730
731 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
732 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
733 tb[NFTA_PAYLOAD_LEN] == NULL)
734 return ERR_PTR(-EINVAL);
735
736 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
737 switch (base) {
738 case NFT_PAYLOAD_LL_HEADER:
739 case NFT_PAYLOAD_NETWORK_HEADER:
740 case NFT_PAYLOAD_TRANSPORT_HEADER:
741 break;
742 default:
743 return ERR_PTR(-EOPNOTSUPP);
744 }
745
746 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
747 if (tb[NFTA_PAYLOAD_DREG] != NULL)
748 return ERR_PTR(-EINVAL);
749 return &nft_payload_set_ops;
750 }
751
752 if (tb[NFTA_PAYLOAD_DREG] == NULL)
753 return ERR_PTR(-EINVAL);
754
755 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
756 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
757
758 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
759 base != NFT_PAYLOAD_LL_HEADER)
760 return &nft_payload_fast_ops;
761 else
762 return &nft_payload_ops;
763 }
764
765 struct nft_expr_type nft_payload_type __read_mostly = {
766 .name = "payload",
767 .select_ops = nft_payload_select_ops,
768 .policy = nft_payload_policy,
769 .maxattr = NFTA_PAYLOAD_MAX,
770 .owner = THIS_MODULE,
771 };