1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
24 #include <linux/ipv6.h>
26 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff
*skb
, int mac_off
,
27 struct vlan_ethhdr
*veth
)
29 if (skb_copy_bits(skb
, mac_off
, veth
, ETH_HLEN
))
32 veth
->h_vlan_proto
= skb
->vlan_proto
;
33 veth
->h_vlan_TCI
= htons(skb_vlan_tag_get(skb
));
34 veth
->h_vlan_encapsulated_proto
= skb
->protocol
;
39 /* add vlan header into the user buffer for if tag was removed by offloads */
41 nft_payload_copy_vlan(u32
*d
, const struct sk_buff
*skb
, u8 offset
, u8 len
)
43 int mac_off
= skb_mac_header(skb
) - skb
->data
;
44 u8
*vlanh
, *dst_u8
= (u8
*) d
;
45 struct vlan_ethhdr veth
;
48 if ((skb
->protocol
== htons(ETH_P_8021AD
) ||
49 skb
->protocol
== htons(ETH_P_8021Q
)) &&
50 offset
>= VLAN_ETH_HLEN
&& offset
< VLAN_ETH_HLEN
+ VLAN_HLEN
)
51 vlan_hlen
+= VLAN_HLEN
;
54 if (offset
< VLAN_ETH_HLEN
+ vlan_hlen
) {
58 skb_copy_bits(skb
, mac_off
, &veth
, VLAN_ETH_HLEN
) < 0)
60 else if (!nft_payload_rebuild_vlan_hdr(skb
, mac_off
, &veth
))
63 if (offset
+ len
> VLAN_ETH_HLEN
+ vlan_hlen
)
64 ethlen
-= offset
+ len
- VLAN_ETH_HLEN
+ vlan_hlen
;
66 memcpy(dst_u8
, vlanh
+ offset
- vlan_hlen
, ethlen
);
73 offset
= ETH_HLEN
+ vlan_hlen
;
75 offset
-= VLAN_HLEN
+ vlan_hlen
;
78 return skb_copy_bits(skb
, offset
+ mac_off
, dst_u8
, len
) == 0;
81 void nft_payload_eval(const struct nft_expr
*expr
,
82 struct nft_regs
*regs
,
83 const struct nft_pktinfo
*pkt
)
85 const struct nft_payload
*priv
= nft_expr_priv(expr
);
86 const struct sk_buff
*skb
= pkt
->skb
;
87 u32
*dest
= ®s
->data
[priv
->dreg
];
90 dest
[priv
->len
/ NFT_REG32_SIZE
] = 0;
92 case NFT_PAYLOAD_LL_HEADER
:
93 if (!skb_mac_header_was_set(skb
))
96 if (skb_vlan_tag_present(skb
)) {
97 if (!nft_payload_copy_vlan(dest
, skb
,
98 priv
->offset
, priv
->len
))
102 offset
= skb_mac_header(skb
) - skb
->data
;
104 case NFT_PAYLOAD_NETWORK_HEADER
:
105 offset
= skb_network_offset(skb
);
107 case NFT_PAYLOAD_TRANSPORT_HEADER
:
110 offset
= pkt
->xt
.thoff
;
115 offset
+= priv
->offset
;
117 if (skb_copy_bits(skb
, offset
, dest
, priv
->len
) < 0)
121 regs
->verdict
.code
= NFT_BREAK
;
124 static const struct nla_policy nft_payload_policy
[NFTA_PAYLOAD_MAX
+ 1] = {
125 [NFTA_PAYLOAD_SREG
] = { .type
= NLA_U32
},
126 [NFTA_PAYLOAD_DREG
] = { .type
= NLA_U32
},
127 [NFTA_PAYLOAD_BASE
] = { .type
= NLA_U32
},
128 [NFTA_PAYLOAD_OFFSET
] = { .type
= NLA_U32
},
129 [NFTA_PAYLOAD_LEN
] = { .type
= NLA_U32
},
130 [NFTA_PAYLOAD_CSUM_TYPE
] = { .type
= NLA_U32
},
131 [NFTA_PAYLOAD_CSUM_OFFSET
] = { .type
= NLA_U32
},
132 [NFTA_PAYLOAD_CSUM_FLAGS
] = { .type
= NLA_U32
},
135 static int nft_payload_init(const struct nft_ctx
*ctx
,
136 const struct nft_expr
*expr
,
137 const struct nlattr
* const tb
[])
139 struct nft_payload
*priv
= nft_expr_priv(expr
);
141 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
142 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
143 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
144 priv
->dreg
= nft_parse_register(tb
[NFTA_PAYLOAD_DREG
]);
146 return nft_validate_register_store(ctx
, priv
->dreg
, NULL
,
147 NFT_DATA_VALUE
, priv
->len
);
150 static int nft_payload_dump(struct sk_buff
*skb
, const struct nft_expr
*expr
)
152 const struct nft_payload
*priv
= nft_expr_priv(expr
);
154 if (nft_dump_register(skb
, NFTA_PAYLOAD_DREG
, priv
->dreg
) ||
155 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
156 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
157 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)))
158 goto nla_put_failure
;
165 static int nft_payload_offload_ll(struct nft_offload_ctx
*ctx
,
166 struct nft_flow_rule
*flow
,
167 const struct nft_payload
*priv
)
169 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
171 switch (priv
->offset
) {
172 case offsetof(struct ethhdr
, h_source
):
173 if (priv
->len
!= ETH_ALEN
)
176 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth_addrs
,
179 case offsetof(struct ethhdr
, h_dest
):
180 if (priv
->len
!= ETH_ALEN
)
183 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth_addrs
,
186 case offsetof(struct ethhdr
, h_proto
):
187 if (priv
->len
!= sizeof(__be16
))
190 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
,
191 n_proto
, sizeof(__be16
), reg
);
192 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_NETWORK
);
194 case offsetof(struct vlan_ethhdr
, h_vlan_TCI
):
195 if (priv
->len
!= sizeof(__be16
))
198 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN
, vlan
,
199 vlan_tci
, sizeof(__be16
), reg
);
201 case offsetof(struct vlan_ethhdr
, h_vlan_encapsulated_proto
):
202 if (priv
->len
!= sizeof(__be16
))
205 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN
, vlan
,
206 vlan_tpid
, sizeof(__be16
), reg
);
207 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_NETWORK
);
209 case offsetof(struct vlan_ethhdr
, h_vlan_TCI
) + sizeof(struct vlan_hdr
):
210 if (priv
->len
!= sizeof(__be16
))
213 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN
, vlan
,
214 vlan_tci
, sizeof(__be16
), reg
);
216 case offsetof(struct vlan_ethhdr
, h_vlan_encapsulated_proto
) +
217 sizeof(struct vlan_hdr
):
218 if (priv
->len
!= sizeof(__be16
))
221 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN
, vlan
,
222 vlan_tpid
, sizeof(__be16
), reg
);
231 static int nft_payload_offload_ip(struct nft_offload_ctx
*ctx
,
232 struct nft_flow_rule
*flow
,
233 const struct nft_payload
*priv
)
235 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
237 switch (priv
->offset
) {
238 case offsetof(struct iphdr
, saddr
):
239 if (priv
->len
!= sizeof(struct in_addr
))
242 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
, src
,
243 sizeof(struct in_addr
), reg
);
245 case offsetof(struct iphdr
, daddr
):
246 if (priv
->len
!= sizeof(struct in_addr
))
249 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
, dst
,
250 sizeof(struct in_addr
), reg
);
252 case offsetof(struct iphdr
, protocol
):
253 if (priv
->len
!= sizeof(__u8
))
256 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
, ip_proto
,
258 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_TRANSPORT
);
267 static int nft_payload_offload_ip6(struct nft_offload_ctx
*ctx
,
268 struct nft_flow_rule
*flow
,
269 const struct nft_payload
*priv
)
271 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
273 switch (priv
->offset
) {
274 case offsetof(struct ipv6hdr
, saddr
):
275 if (priv
->len
!= sizeof(struct in6_addr
))
278 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
, src
,
279 sizeof(struct in6_addr
), reg
);
281 case offsetof(struct ipv6hdr
, daddr
):
282 if (priv
->len
!= sizeof(struct in6_addr
))
285 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
, dst
,
286 sizeof(struct in6_addr
), reg
);
288 case offsetof(struct ipv6hdr
, nexthdr
):
289 if (priv
->len
!= sizeof(__u8
))
292 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
, ip_proto
,
294 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_TRANSPORT
);
303 static int nft_payload_offload_nh(struct nft_offload_ctx
*ctx
,
304 struct nft_flow_rule
*flow
,
305 const struct nft_payload
*priv
)
309 switch (ctx
->dep
.l3num
) {
310 case htons(ETH_P_IP
):
311 err
= nft_payload_offload_ip(ctx
, flow
, priv
);
313 case htons(ETH_P_IPV6
):
314 err
= nft_payload_offload_ip6(ctx
, flow
, priv
);
323 static int nft_payload_offload_tcp(struct nft_offload_ctx
*ctx
,
324 struct nft_flow_rule
*flow
,
325 const struct nft_payload
*priv
)
327 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
329 switch (priv
->offset
) {
330 case offsetof(struct tcphdr
, source
):
331 if (priv
->len
!= sizeof(__be16
))
334 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, src
,
335 sizeof(__be16
), reg
);
337 case offsetof(struct tcphdr
, dest
):
338 if (priv
->len
!= sizeof(__be16
))
341 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, dst
,
342 sizeof(__be16
), reg
);
351 static int nft_payload_offload_udp(struct nft_offload_ctx
*ctx
,
352 struct nft_flow_rule
*flow
,
353 const struct nft_payload
*priv
)
355 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
357 switch (priv
->offset
) {
358 case offsetof(struct udphdr
, source
):
359 if (priv
->len
!= sizeof(__be16
))
362 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, src
,
363 sizeof(__be16
), reg
);
365 case offsetof(struct udphdr
, dest
):
366 if (priv
->len
!= sizeof(__be16
))
369 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, dst
,
370 sizeof(__be16
), reg
);
379 static int nft_payload_offload_th(struct nft_offload_ctx
*ctx
,
380 struct nft_flow_rule
*flow
,
381 const struct nft_payload
*priv
)
385 switch (ctx
->dep
.protonum
) {
387 err
= nft_payload_offload_tcp(ctx
, flow
, priv
);
390 err
= nft_payload_offload_udp(ctx
, flow
, priv
);
399 static int nft_payload_offload(struct nft_offload_ctx
*ctx
,
400 struct nft_flow_rule
*flow
,
401 const struct nft_expr
*expr
)
403 const struct nft_payload
*priv
= nft_expr_priv(expr
);
406 switch (priv
->base
) {
407 case NFT_PAYLOAD_LL_HEADER
:
408 err
= nft_payload_offload_ll(ctx
, flow
, priv
);
410 case NFT_PAYLOAD_NETWORK_HEADER
:
411 err
= nft_payload_offload_nh(ctx
, flow
, priv
);
413 case NFT_PAYLOAD_TRANSPORT_HEADER
:
414 err
= nft_payload_offload_th(ctx
, flow
, priv
);
423 static const struct nft_expr_ops nft_payload_ops
= {
424 .type
= &nft_payload_type
,
425 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
426 .eval
= nft_payload_eval
,
427 .init
= nft_payload_init
,
428 .dump
= nft_payload_dump
,
429 .offload
= nft_payload_offload
,
432 const struct nft_expr_ops nft_payload_fast_ops
= {
433 .type
= &nft_payload_type
,
434 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
435 .eval
= nft_payload_eval
,
436 .init
= nft_payload_init
,
437 .dump
= nft_payload_dump
,
438 .offload
= nft_payload_offload
,
441 static inline void nft_csum_replace(__sum16
*sum
, __wsum fsum
, __wsum tsum
)
443 *sum
= csum_fold(csum_add(csum_sub(~csum_unfold(*sum
), fsum
), tsum
));
445 *sum
= CSUM_MANGLED_0
;
448 static bool nft_payload_udp_checksum(struct sk_buff
*skb
, unsigned int thoff
)
450 struct udphdr
*uh
, _uh
;
452 uh
= skb_header_pointer(skb
, thoff
, sizeof(_uh
), &_uh
);
456 return (__force
bool)uh
->check
;
459 static int nft_payload_l4csum_offset(const struct nft_pktinfo
*pkt
,
461 unsigned int *l4csum_offset
)
463 switch (pkt
->tprot
) {
465 *l4csum_offset
= offsetof(struct tcphdr
, check
);
468 if (!nft_payload_udp_checksum(skb
, pkt
->xt
.thoff
))
471 case IPPROTO_UDPLITE
:
472 *l4csum_offset
= offsetof(struct udphdr
, check
);
475 *l4csum_offset
= offsetof(struct icmp6hdr
, icmp6_cksum
);
481 *l4csum_offset
+= pkt
->xt
.thoff
;
485 static int nft_payload_l4csum_update(const struct nft_pktinfo
*pkt
,
487 __wsum fsum
, __wsum tsum
)
492 /* If we cannot determine layer 4 checksum offset or this packet doesn't
493 * require layer 4 checksum recalculation, skip this packet.
495 if (nft_payload_l4csum_offset(pkt
, skb
, &l4csum_offset
) < 0)
498 if (skb_copy_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
501 /* Checksum mangling for an arbitrary amount of bytes, based on
502 * inet_proto_csum_replace*() functions.
504 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
505 nft_csum_replace(&sum
, fsum
, tsum
);
506 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
507 skb
->csum
= ~csum_add(csum_sub(~(skb
->csum
), fsum
),
511 sum
= ~csum_fold(csum_add(csum_sub(csum_unfold(sum
), fsum
),
515 if (skb_ensure_writable(skb
, l4csum_offset
+ sizeof(sum
)) ||
516 skb_store_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
522 static int nft_payload_csum_inet(struct sk_buff
*skb
, const u32
*src
,
523 __wsum fsum
, __wsum tsum
, int csum_offset
)
527 if (skb_copy_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
530 nft_csum_replace(&sum
, fsum
, tsum
);
531 if (skb_ensure_writable(skb
, csum_offset
+ sizeof(sum
)) ||
532 skb_store_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
538 static void nft_payload_set_eval(const struct nft_expr
*expr
,
539 struct nft_regs
*regs
,
540 const struct nft_pktinfo
*pkt
)
542 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
543 struct sk_buff
*skb
= pkt
->skb
;
544 const u32
*src
= ®s
->data
[priv
->sreg
];
545 int offset
, csum_offset
;
548 switch (priv
->base
) {
549 case NFT_PAYLOAD_LL_HEADER
:
550 if (!skb_mac_header_was_set(skb
))
552 offset
= skb_mac_header(skb
) - skb
->data
;
554 case NFT_PAYLOAD_NETWORK_HEADER
:
555 offset
= skb_network_offset(skb
);
557 case NFT_PAYLOAD_TRANSPORT_HEADER
:
560 offset
= pkt
->xt
.thoff
;
566 csum_offset
= offset
+ priv
->csum_offset
;
567 offset
+= priv
->offset
;
569 if ((priv
->csum_type
== NFT_PAYLOAD_CSUM_INET
|| priv
->csum_flags
) &&
570 (priv
->base
!= NFT_PAYLOAD_TRANSPORT_HEADER
||
571 skb
->ip_summed
!= CHECKSUM_PARTIAL
)) {
572 fsum
= skb_checksum(skb
, offset
, priv
->len
, 0);
573 tsum
= csum_partial(src
, priv
->len
, 0);
575 if (priv
->csum_type
== NFT_PAYLOAD_CSUM_INET
&&
576 nft_payload_csum_inet(skb
, src
, fsum
, tsum
, csum_offset
))
579 if (priv
->csum_flags
&&
580 nft_payload_l4csum_update(pkt
, skb
, fsum
, tsum
) < 0)
584 if (skb_ensure_writable(skb
, max(offset
+ priv
->len
, 0)) ||
585 skb_store_bits(skb
, offset
, src
, priv
->len
) < 0)
590 regs
->verdict
.code
= NFT_BREAK
;
593 static int nft_payload_set_init(const struct nft_ctx
*ctx
,
594 const struct nft_expr
*expr
,
595 const struct nlattr
* const tb
[])
597 struct nft_payload_set
*priv
= nft_expr_priv(expr
);
599 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
600 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
601 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
602 priv
->sreg
= nft_parse_register(tb
[NFTA_PAYLOAD_SREG
]);
604 if (tb
[NFTA_PAYLOAD_CSUM_TYPE
])
606 ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_TYPE
]));
607 if (tb
[NFTA_PAYLOAD_CSUM_OFFSET
])
609 ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_OFFSET
]));
610 if (tb
[NFTA_PAYLOAD_CSUM_FLAGS
]) {
613 flags
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_FLAGS
]));
614 if (flags
& ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR
)
617 priv
->csum_flags
= flags
;
620 switch (priv
->csum_type
) {
621 case NFT_PAYLOAD_CSUM_NONE
:
622 case NFT_PAYLOAD_CSUM_INET
:
628 return nft_validate_register_load(priv
->sreg
, priv
->len
);
631 static int nft_payload_set_dump(struct sk_buff
*skb
, const struct nft_expr
*expr
)
633 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
635 if (nft_dump_register(skb
, NFTA_PAYLOAD_SREG
, priv
->sreg
) ||
636 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
637 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
638 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)) ||
639 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_TYPE
, htonl(priv
->csum_type
)) ||
640 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_OFFSET
,
641 htonl(priv
->csum_offset
)) ||
642 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_FLAGS
, htonl(priv
->csum_flags
)))
643 goto nla_put_failure
;
650 static const struct nft_expr_ops nft_payload_set_ops
= {
651 .type
= &nft_payload_type
,
652 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload_set
)),
653 .eval
= nft_payload_set_eval
,
654 .init
= nft_payload_set_init
,
655 .dump
= nft_payload_set_dump
,
658 static const struct nft_expr_ops
*
659 nft_payload_select_ops(const struct nft_ctx
*ctx
,
660 const struct nlattr
* const tb
[])
662 enum nft_payload_bases base
;
663 unsigned int offset
, len
;
665 if (tb
[NFTA_PAYLOAD_BASE
] == NULL
||
666 tb
[NFTA_PAYLOAD_OFFSET
] == NULL
||
667 tb
[NFTA_PAYLOAD_LEN
] == NULL
)
668 return ERR_PTR(-EINVAL
);
670 base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
672 case NFT_PAYLOAD_LL_HEADER
:
673 case NFT_PAYLOAD_NETWORK_HEADER
:
674 case NFT_PAYLOAD_TRANSPORT_HEADER
:
677 return ERR_PTR(-EOPNOTSUPP
);
680 if (tb
[NFTA_PAYLOAD_SREG
] != NULL
) {
681 if (tb
[NFTA_PAYLOAD_DREG
] != NULL
)
682 return ERR_PTR(-EINVAL
);
683 return &nft_payload_set_ops
;
686 if (tb
[NFTA_PAYLOAD_DREG
] == NULL
)
687 return ERR_PTR(-EINVAL
);
689 offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
690 len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
692 if (len
<= 4 && is_power_of_2(len
) && IS_ALIGNED(offset
, len
) &&
693 base
!= NFT_PAYLOAD_LL_HEADER
)
694 return &nft_payload_fast_ops
;
696 return &nft_payload_ops
;
699 struct nft_expr_type nft_payload_type __read_mostly
= {
701 .select_ops
= nft_payload_select_ops
,
702 .policy
= nft_payload_policy
,
703 .maxattr
= NFTA_PAYLOAD_MAX
,
704 .owner
= THIS_MODULE
,