]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/netfilter/nft_payload.c
Merge tag 'writeback_for_v5.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / net / netfilter / nft_payload.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
5 *
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
23 #include <linux/ip.h>
24 #include <linux/ipv6.h>
25
26 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
27 struct vlan_ethhdr *veth)
28 {
29 if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
30 return false;
31
32 veth->h_vlan_proto = skb->vlan_proto;
33 veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
34 veth->h_vlan_encapsulated_proto = skb->protocol;
35
36 return true;
37 }
38
39 /* add vlan header into the user buffer for if tag was removed by offloads */
40 static bool
41 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
42 {
43 int mac_off = skb_mac_header(skb) - skb->data;
44 u8 *vlanh, *dst_u8 = (u8 *) d;
45 struct vlan_ethhdr veth;
46 u8 vlan_hlen = 0;
47
48 if ((skb->protocol == htons(ETH_P_8021AD) ||
49 skb->protocol == htons(ETH_P_8021Q)) &&
50 offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
51 vlan_hlen += VLAN_HLEN;
52
53 vlanh = (u8 *) &veth;
54 if (offset < VLAN_ETH_HLEN + vlan_hlen) {
55 u8 ethlen = len;
56
57 if (vlan_hlen &&
58 skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
59 return false;
60 else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
61 return false;
62
63 if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
64 ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
65
66 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
67
68 len -= ethlen;
69 if (len == 0)
70 return true;
71
72 dst_u8 += ethlen;
73 offset = ETH_HLEN + vlan_hlen;
74 } else {
75 offset -= VLAN_HLEN + vlan_hlen;
76 }
77
78 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
79 }
80
81 void nft_payload_eval(const struct nft_expr *expr,
82 struct nft_regs *regs,
83 const struct nft_pktinfo *pkt)
84 {
85 const struct nft_payload *priv = nft_expr_priv(expr);
86 const struct sk_buff *skb = pkt->skb;
87 u32 *dest = &regs->data[priv->dreg];
88 int offset;
89
90 dest[priv->len / NFT_REG32_SIZE] = 0;
91 switch (priv->base) {
92 case NFT_PAYLOAD_LL_HEADER:
93 if (!skb_mac_header_was_set(skb))
94 goto err;
95
96 if (skb_vlan_tag_present(skb)) {
97 if (!nft_payload_copy_vlan(dest, skb,
98 priv->offset, priv->len))
99 goto err;
100 return;
101 }
102 offset = skb_mac_header(skb) - skb->data;
103 break;
104 case NFT_PAYLOAD_NETWORK_HEADER:
105 offset = skb_network_offset(skb);
106 break;
107 case NFT_PAYLOAD_TRANSPORT_HEADER:
108 if (!pkt->tprot_set)
109 goto err;
110 offset = pkt->xt.thoff;
111 break;
112 default:
113 BUG();
114 }
115 offset += priv->offset;
116
117 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
118 goto err;
119 return;
120 err:
121 regs->verdict.code = NFT_BREAK;
122 }
123
124 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
125 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
126 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
127 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
128 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
129 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
130 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
131 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
132 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
133 };
134
135 static int nft_payload_init(const struct nft_ctx *ctx,
136 const struct nft_expr *expr,
137 const struct nlattr * const tb[])
138 {
139 struct nft_payload *priv = nft_expr_priv(expr);
140
141 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
142 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
143 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
144 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
145
146 return nft_validate_register_store(ctx, priv->dreg, NULL,
147 NFT_DATA_VALUE, priv->len);
148 }
149
150 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
151 {
152 const struct nft_payload *priv = nft_expr_priv(expr);
153
154 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
155 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
156 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
157 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
158 goto nla_put_failure;
159 return 0;
160
161 nla_put_failure:
162 return -1;
163 }
164
165 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
166 struct nft_flow_rule *flow,
167 const struct nft_payload *priv)
168 {
169 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
170
171 switch (priv->offset) {
172 case offsetof(struct ethhdr, h_source):
173 if (priv->len != ETH_ALEN)
174 return -EOPNOTSUPP;
175
176 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
177 src, ETH_ALEN, reg);
178 break;
179 case offsetof(struct ethhdr, h_dest):
180 if (priv->len != ETH_ALEN)
181 return -EOPNOTSUPP;
182
183 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
184 dst, ETH_ALEN, reg);
185 break;
186 case offsetof(struct ethhdr, h_proto):
187 if (priv->len != sizeof(__be16))
188 return -EOPNOTSUPP;
189
190 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
191 n_proto, sizeof(__be16), reg);
192 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
193 break;
194 case offsetof(struct vlan_ethhdr, h_vlan_TCI):
195 if (priv->len != sizeof(__be16))
196 return -EOPNOTSUPP;
197
198 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
199 vlan_tci, sizeof(__be16), reg);
200 break;
201 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
202 if (priv->len != sizeof(__be16))
203 return -EOPNOTSUPP;
204
205 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
206 vlan_tpid, sizeof(__be16), reg);
207 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
208 break;
209 case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
210 if (priv->len != sizeof(__be16))
211 return -EOPNOTSUPP;
212
213 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
214 vlan_tci, sizeof(__be16), reg);
215 break;
216 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
217 sizeof(struct vlan_hdr):
218 if (priv->len != sizeof(__be16))
219 return -EOPNOTSUPP;
220
221 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
222 vlan_tpid, sizeof(__be16), reg);
223 break;
224 default:
225 return -EOPNOTSUPP;
226 }
227
228 return 0;
229 }
230
231 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
232 struct nft_flow_rule *flow,
233 const struct nft_payload *priv)
234 {
235 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
236
237 switch (priv->offset) {
238 case offsetof(struct iphdr, saddr):
239 if (priv->len != sizeof(struct in_addr))
240 return -EOPNOTSUPP;
241
242 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
243 sizeof(struct in_addr), reg);
244 break;
245 case offsetof(struct iphdr, daddr):
246 if (priv->len != sizeof(struct in_addr))
247 return -EOPNOTSUPP;
248
249 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
250 sizeof(struct in_addr), reg);
251 break;
252 case offsetof(struct iphdr, protocol):
253 if (priv->len != sizeof(__u8))
254 return -EOPNOTSUPP;
255
256 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
257 sizeof(__u8), reg);
258 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
259 break;
260 default:
261 return -EOPNOTSUPP;
262 }
263
264 return 0;
265 }
266
267 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
268 struct nft_flow_rule *flow,
269 const struct nft_payload *priv)
270 {
271 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
272
273 switch (priv->offset) {
274 case offsetof(struct ipv6hdr, saddr):
275 if (priv->len != sizeof(struct in6_addr))
276 return -EOPNOTSUPP;
277
278 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
279 sizeof(struct in6_addr), reg);
280 break;
281 case offsetof(struct ipv6hdr, daddr):
282 if (priv->len != sizeof(struct in6_addr))
283 return -EOPNOTSUPP;
284
285 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
286 sizeof(struct in6_addr), reg);
287 break;
288 case offsetof(struct ipv6hdr, nexthdr):
289 if (priv->len != sizeof(__u8))
290 return -EOPNOTSUPP;
291
292 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
293 sizeof(__u8), reg);
294 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
295 break;
296 default:
297 return -EOPNOTSUPP;
298 }
299
300 return 0;
301 }
302
303 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
304 struct nft_flow_rule *flow,
305 const struct nft_payload *priv)
306 {
307 int err;
308
309 switch (ctx->dep.l3num) {
310 case htons(ETH_P_IP):
311 err = nft_payload_offload_ip(ctx, flow, priv);
312 break;
313 case htons(ETH_P_IPV6):
314 err = nft_payload_offload_ip6(ctx, flow, priv);
315 break;
316 default:
317 return -EOPNOTSUPP;
318 }
319
320 return err;
321 }
322
323 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
324 struct nft_flow_rule *flow,
325 const struct nft_payload *priv)
326 {
327 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
328
329 switch (priv->offset) {
330 case offsetof(struct tcphdr, source):
331 if (priv->len != sizeof(__be16))
332 return -EOPNOTSUPP;
333
334 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
335 sizeof(__be16), reg);
336 break;
337 case offsetof(struct tcphdr, dest):
338 if (priv->len != sizeof(__be16))
339 return -EOPNOTSUPP;
340
341 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
342 sizeof(__be16), reg);
343 break;
344 default:
345 return -EOPNOTSUPP;
346 }
347
348 return 0;
349 }
350
351 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
352 struct nft_flow_rule *flow,
353 const struct nft_payload *priv)
354 {
355 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
356
357 switch (priv->offset) {
358 case offsetof(struct udphdr, source):
359 if (priv->len != sizeof(__be16))
360 return -EOPNOTSUPP;
361
362 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
363 sizeof(__be16), reg);
364 break;
365 case offsetof(struct udphdr, dest):
366 if (priv->len != sizeof(__be16))
367 return -EOPNOTSUPP;
368
369 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
370 sizeof(__be16), reg);
371 break;
372 default:
373 return -EOPNOTSUPP;
374 }
375
376 return 0;
377 }
378
379 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
380 struct nft_flow_rule *flow,
381 const struct nft_payload *priv)
382 {
383 int err;
384
385 switch (ctx->dep.protonum) {
386 case IPPROTO_TCP:
387 err = nft_payload_offload_tcp(ctx, flow, priv);
388 break;
389 case IPPROTO_UDP:
390 err = nft_payload_offload_udp(ctx, flow, priv);
391 break;
392 default:
393 return -EOPNOTSUPP;
394 }
395
396 return err;
397 }
398
399 static int nft_payload_offload(struct nft_offload_ctx *ctx,
400 struct nft_flow_rule *flow,
401 const struct nft_expr *expr)
402 {
403 const struct nft_payload *priv = nft_expr_priv(expr);
404 int err;
405
406 switch (priv->base) {
407 case NFT_PAYLOAD_LL_HEADER:
408 err = nft_payload_offload_ll(ctx, flow, priv);
409 break;
410 case NFT_PAYLOAD_NETWORK_HEADER:
411 err = nft_payload_offload_nh(ctx, flow, priv);
412 break;
413 case NFT_PAYLOAD_TRANSPORT_HEADER:
414 err = nft_payload_offload_th(ctx, flow, priv);
415 break;
416 default:
417 err = -EOPNOTSUPP;
418 break;
419 }
420 return err;
421 }
422
423 static const struct nft_expr_ops nft_payload_ops = {
424 .type = &nft_payload_type,
425 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
426 .eval = nft_payload_eval,
427 .init = nft_payload_init,
428 .dump = nft_payload_dump,
429 .offload = nft_payload_offload,
430 };
431
432 const struct nft_expr_ops nft_payload_fast_ops = {
433 .type = &nft_payload_type,
434 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
435 .eval = nft_payload_eval,
436 .init = nft_payload_init,
437 .dump = nft_payload_dump,
438 .offload = nft_payload_offload,
439 };
440
441 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
442 {
443 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
444 if (*sum == 0)
445 *sum = CSUM_MANGLED_0;
446 }
447
448 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
449 {
450 struct udphdr *uh, _uh;
451
452 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
453 if (!uh)
454 return false;
455
456 return (__force bool)uh->check;
457 }
458
459 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
460 struct sk_buff *skb,
461 unsigned int *l4csum_offset)
462 {
463 switch (pkt->tprot) {
464 case IPPROTO_TCP:
465 *l4csum_offset = offsetof(struct tcphdr, check);
466 break;
467 case IPPROTO_UDP:
468 if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
469 return -1;
470 fallthrough;
471 case IPPROTO_UDPLITE:
472 *l4csum_offset = offsetof(struct udphdr, check);
473 break;
474 case IPPROTO_ICMPV6:
475 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
476 break;
477 default:
478 return -1;
479 }
480
481 *l4csum_offset += pkt->xt.thoff;
482 return 0;
483 }
484
485 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
486 struct sk_buff *skb,
487 __wsum fsum, __wsum tsum)
488 {
489 int l4csum_offset;
490 __sum16 sum;
491
492 /* If we cannot determine layer 4 checksum offset or this packet doesn't
493 * require layer 4 checksum recalculation, skip this packet.
494 */
495 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
496 return 0;
497
498 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
499 return -1;
500
501 /* Checksum mangling for an arbitrary amount of bytes, based on
502 * inet_proto_csum_replace*() functions.
503 */
504 if (skb->ip_summed != CHECKSUM_PARTIAL) {
505 nft_csum_replace(&sum, fsum, tsum);
506 if (skb->ip_summed == CHECKSUM_COMPLETE) {
507 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
508 tsum);
509 }
510 } else {
511 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
512 tsum));
513 }
514
515 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
516 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
517 return -1;
518
519 return 0;
520 }
521
522 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
523 __wsum fsum, __wsum tsum, int csum_offset)
524 {
525 __sum16 sum;
526
527 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
528 return -1;
529
530 nft_csum_replace(&sum, fsum, tsum);
531 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
532 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
533 return -1;
534
535 return 0;
536 }
537
538 static void nft_payload_set_eval(const struct nft_expr *expr,
539 struct nft_regs *regs,
540 const struct nft_pktinfo *pkt)
541 {
542 const struct nft_payload_set *priv = nft_expr_priv(expr);
543 struct sk_buff *skb = pkt->skb;
544 const u32 *src = &regs->data[priv->sreg];
545 int offset, csum_offset;
546 __wsum fsum, tsum;
547
548 switch (priv->base) {
549 case NFT_PAYLOAD_LL_HEADER:
550 if (!skb_mac_header_was_set(skb))
551 goto err;
552 offset = skb_mac_header(skb) - skb->data;
553 break;
554 case NFT_PAYLOAD_NETWORK_HEADER:
555 offset = skb_network_offset(skb);
556 break;
557 case NFT_PAYLOAD_TRANSPORT_HEADER:
558 if (!pkt->tprot_set)
559 goto err;
560 offset = pkt->xt.thoff;
561 break;
562 default:
563 BUG();
564 }
565
566 csum_offset = offset + priv->csum_offset;
567 offset += priv->offset;
568
569 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
570 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
571 skb->ip_summed != CHECKSUM_PARTIAL)) {
572 fsum = skb_checksum(skb, offset, priv->len, 0);
573 tsum = csum_partial(src, priv->len, 0);
574
575 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
576 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
577 goto err;
578
579 if (priv->csum_flags &&
580 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
581 goto err;
582 }
583
584 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
585 skb_store_bits(skb, offset, src, priv->len) < 0)
586 goto err;
587
588 return;
589 err:
590 regs->verdict.code = NFT_BREAK;
591 }
592
593 static int nft_payload_set_init(const struct nft_ctx *ctx,
594 const struct nft_expr *expr,
595 const struct nlattr * const tb[])
596 {
597 struct nft_payload_set *priv = nft_expr_priv(expr);
598
599 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
600 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
601 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
602 priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
603
604 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
605 priv->csum_type =
606 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
607 if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
608 priv->csum_offset =
609 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
610 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
611 u32 flags;
612
613 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
614 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
615 return -EINVAL;
616
617 priv->csum_flags = flags;
618 }
619
620 switch (priv->csum_type) {
621 case NFT_PAYLOAD_CSUM_NONE:
622 case NFT_PAYLOAD_CSUM_INET:
623 break;
624 default:
625 return -EOPNOTSUPP;
626 }
627
628 return nft_validate_register_load(priv->sreg, priv->len);
629 }
630
631 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
632 {
633 const struct nft_payload_set *priv = nft_expr_priv(expr);
634
635 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
636 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
637 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
638 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
639 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
640 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
641 htonl(priv->csum_offset)) ||
642 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
643 goto nla_put_failure;
644 return 0;
645
646 nla_put_failure:
647 return -1;
648 }
649
650 static const struct nft_expr_ops nft_payload_set_ops = {
651 .type = &nft_payload_type,
652 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
653 .eval = nft_payload_set_eval,
654 .init = nft_payload_set_init,
655 .dump = nft_payload_set_dump,
656 };
657
658 static const struct nft_expr_ops *
659 nft_payload_select_ops(const struct nft_ctx *ctx,
660 const struct nlattr * const tb[])
661 {
662 enum nft_payload_bases base;
663 unsigned int offset, len;
664
665 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
666 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
667 tb[NFTA_PAYLOAD_LEN] == NULL)
668 return ERR_PTR(-EINVAL);
669
670 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
671 switch (base) {
672 case NFT_PAYLOAD_LL_HEADER:
673 case NFT_PAYLOAD_NETWORK_HEADER:
674 case NFT_PAYLOAD_TRANSPORT_HEADER:
675 break;
676 default:
677 return ERR_PTR(-EOPNOTSUPP);
678 }
679
680 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
681 if (tb[NFTA_PAYLOAD_DREG] != NULL)
682 return ERR_PTR(-EINVAL);
683 return &nft_payload_set_ops;
684 }
685
686 if (tb[NFTA_PAYLOAD_DREG] == NULL)
687 return ERR_PTR(-EINVAL);
688
689 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
690 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
691
692 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
693 base != NFT_PAYLOAD_LL_HEADER)
694 return &nft_payload_fast_ops;
695 else
696 return &nft_payload_ops;
697 }
698
699 struct nft_expr_type nft_payload_type __read_mostly = {
700 .name = "payload",
701 .select_ops = nft_payload_select_ops,
702 .policy = nft_payload_policy,
703 .maxattr = NFTA_PAYLOAD_MAX,
704 .owner = THIS_MODULE,
705 };