2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
3 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Development of this code funded by Astaro AG (http://www.astaro.com/)
12 #include <linux/kernel.h>
13 #include <linux/if_vlan.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/netlink.h>
17 #include <linux/netfilter.h>
18 #include <linux/netfilter/nf_tables.h>
19 #include <net/netfilter/nf_tables_core.h>
20 #include <net/netfilter/nf_tables.h>
21 /* For layer 4 checksum field offset. */
22 #include <linux/tcp.h>
23 #include <linux/udp.h>
24 #include <linux/icmpv6.h>
26 /* add vlan header into the user buffer for if tag was removed by offloads */
28 nft_payload_copy_vlan(u32
*d
, const struct sk_buff
*skb
, u8 offset
, u8 len
)
30 int mac_off
= skb_mac_header(skb
) - skb
->data
;
31 u8 vlan_len
, *vlanh
, *dst_u8
= (u8
*) d
;
32 struct vlan_ethhdr veth
;
35 if (offset
< ETH_HLEN
) {
36 u8 ethlen
= min_t(u8
, len
, ETH_HLEN
- offset
);
38 if (skb_copy_bits(skb
, mac_off
, &veth
, ETH_HLEN
))
41 veth
.h_vlan_proto
= skb
->vlan_proto
;
43 memcpy(dst_u8
, vlanh
+ offset
, ethlen
);
51 } else if (offset
>= VLAN_ETH_HLEN
) {
56 veth
.h_vlan_TCI
= htons(skb_vlan_tag_get(skb
));
57 veth
.h_vlan_encapsulated_proto
= skb
->protocol
;
61 vlan_len
= min_t(u8
, len
, VLAN_ETH_HLEN
- offset
);
62 memcpy(dst_u8
, vlanh
, vlan_len
);
70 return skb_copy_bits(skb
, offset
+ mac_off
, dst_u8
, len
) == 0;
73 static void nft_payload_eval(const struct nft_expr
*expr
,
74 struct nft_regs
*regs
,
75 const struct nft_pktinfo
*pkt
)
77 const struct nft_payload
*priv
= nft_expr_priv(expr
);
78 const struct sk_buff
*skb
= pkt
->skb
;
79 u32
*dest
= ®s
->data
[priv
->dreg
];
82 dest
[priv
->len
/ NFT_REG32_SIZE
] = 0;
84 case NFT_PAYLOAD_LL_HEADER
:
85 if (!skb_mac_header_was_set(skb
))
88 if (skb_vlan_tag_present(skb
)) {
89 if (!nft_payload_copy_vlan(dest
, skb
,
90 priv
->offset
, priv
->len
))
94 offset
= skb_mac_header(skb
) - skb
->data
;
96 case NFT_PAYLOAD_NETWORK_HEADER
:
97 offset
= skb_network_offset(skb
);
99 case NFT_PAYLOAD_TRANSPORT_HEADER
:
102 offset
= pkt
->xt
.thoff
;
107 offset
+= priv
->offset
;
109 if (skb_copy_bits(skb
, offset
, dest
, priv
->len
) < 0)
113 regs
->verdict
.code
= NFT_BREAK
;
116 static const struct nla_policy nft_payload_policy
[NFTA_PAYLOAD_MAX
+ 1] = {
117 [NFTA_PAYLOAD_SREG
] = { .type
= NLA_U32
},
118 [NFTA_PAYLOAD_DREG
] = { .type
= NLA_U32
},
119 [NFTA_PAYLOAD_BASE
] = { .type
= NLA_U32
},
120 [NFTA_PAYLOAD_OFFSET
] = { .type
= NLA_U32
},
121 [NFTA_PAYLOAD_LEN
] = { .type
= NLA_U32
},
122 [NFTA_PAYLOAD_CSUM_TYPE
] = { .type
= NLA_U32
},
123 [NFTA_PAYLOAD_CSUM_OFFSET
] = { .type
= NLA_U32
},
126 static int nft_payload_init(const struct nft_ctx
*ctx
,
127 const struct nft_expr
*expr
,
128 const struct nlattr
* const tb
[])
130 struct nft_payload
*priv
= nft_expr_priv(expr
);
132 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
133 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
134 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
135 priv
->dreg
= nft_parse_register(tb
[NFTA_PAYLOAD_DREG
]);
137 return nft_validate_register_store(ctx
, priv
->dreg
, NULL
,
138 NFT_DATA_VALUE
, priv
->len
);
141 static int nft_payload_dump(struct sk_buff
*skb
, const struct nft_expr
*expr
)
143 const struct nft_payload
*priv
= nft_expr_priv(expr
);
145 if (nft_dump_register(skb
, NFTA_PAYLOAD_DREG
, priv
->dreg
) ||
146 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
147 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
148 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)))
149 goto nla_put_failure
;
156 static const struct nft_expr_ops nft_payload_ops
= {
157 .type
= &nft_payload_type
,
158 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
159 .eval
= nft_payload_eval
,
160 .init
= nft_payload_init
,
161 .dump
= nft_payload_dump
,
164 const struct nft_expr_ops nft_payload_fast_ops
= {
165 .type
= &nft_payload_type
,
166 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
167 .eval
= nft_payload_eval
,
168 .init
= nft_payload_init
,
169 .dump
= nft_payload_dump
,
172 static inline void nft_csum_replace(__sum16
*sum
, __wsum fsum
, __wsum tsum
)
174 *sum
= csum_fold(csum_add(csum_sub(~csum_unfold(*sum
), fsum
), tsum
));
176 *sum
= CSUM_MANGLED_0
;
179 static bool nft_payload_udp_checksum(struct sk_buff
*skb
, unsigned int thoff
)
181 struct udphdr
*uh
, _uh
;
183 uh
= skb_header_pointer(skb
, thoff
, sizeof(_uh
), &_uh
);
190 static int nft_payload_l4csum_offset(const struct nft_pktinfo
*pkt
,
192 unsigned int *l4csum_offset
)
194 switch (pkt
->tprot
) {
196 *l4csum_offset
= offsetof(struct tcphdr
, check
);
199 if (!nft_payload_udp_checksum(skb
, pkt
->xt
.thoff
))
202 case IPPROTO_UDPLITE
:
203 *l4csum_offset
= offsetof(struct udphdr
, check
);
206 *l4csum_offset
= offsetof(struct icmp6hdr
, icmp6_cksum
);
212 *l4csum_offset
+= pkt
->xt
.thoff
;
216 static int nft_payload_l4csum_update(const struct nft_pktinfo
*pkt
,
218 __wsum fsum
, __wsum tsum
)
223 /* If we cannot determine layer 4 checksum offset or this packet doesn't
224 * require layer 4 checksum recalculation, skip this packet.
226 if (nft_payload_l4csum_offset(pkt
, skb
, &l4csum_offset
) < 0)
229 if (skb_copy_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
232 /* Checksum mangling for an arbitrary amount of bytes, based on
233 * inet_proto_csum_replace*() functions.
235 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
236 nft_csum_replace(&sum
, fsum
, tsum
);
237 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
238 skb
->csum
= ~csum_add(csum_sub(~(skb
->csum
), fsum
),
242 sum
= ~csum_fold(csum_add(csum_sub(csum_unfold(sum
), fsum
),
246 if (!skb_make_writable(skb
, l4csum_offset
+ sizeof(sum
)) ||
247 skb_store_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
253 static void nft_payload_set_eval(const struct nft_expr
*expr
,
254 struct nft_regs
*regs
,
255 const struct nft_pktinfo
*pkt
)
257 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
258 struct sk_buff
*skb
= pkt
->skb
;
259 const u32
*src
= ®s
->data
[priv
->sreg
];
260 int offset
, csum_offset
;
264 switch (priv
->base
) {
265 case NFT_PAYLOAD_LL_HEADER
:
266 if (!skb_mac_header_was_set(skb
))
268 offset
= skb_mac_header(skb
) - skb
->data
;
270 case NFT_PAYLOAD_NETWORK_HEADER
:
271 offset
= skb_network_offset(skb
);
273 case NFT_PAYLOAD_TRANSPORT_HEADER
:
276 offset
= pkt
->xt
.thoff
;
282 csum_offset
= offset
+ priv
->csum_offset
;
283 offset
+= priv
->offset
;
285 if (priv
->csum_type
== NFT_PAYLOAD_CSUM_INET
&&
286 (priv
->base
!= NFT_PAYLOAD_TRANSPORT_HEADER
||
287 skb
->ip_summed
!= CHECKSUM_PARTIAL
)) {
288 if (skb_copy_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
291 fsum
= skb_checksum(skb
, offset
, priv
->len
, 0);
292 tsum
= csum_partial(src
, priv
->len
, 0);
293 nft_csum_replace(&sum
, fsum
, tsum
);
295 if (!skb_make_writable(skb
, csum_offset
+ sizeof(sum
)) ||
296 skb_store_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
299 if (priv
->csum_flags
&&
300 nft_payload_l4csum_update(pkt
, skb
, fsum
, tsum
) < 0)
304 if (!skb_make_writable(skb
, max(offset
+ priv
->len
, 0)) ||
305 skb_store_bits(skb
, offset
, src
, priv
->len
) < 0)
310 regs
->verdict
.code
= NFT_BREAK
;
313 static int nft_payload_set_init(const struct nft_ctx
*ctx
,
314 const struct nft_expr
*expr
,
315 const struct nlattr
* const tb
[])
317 struct nft_payload_set
*priv
= nft_expr_priv(expr
);
319 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
320 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
321 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
322 priv
->sreg
= nft_parse_register(tb
[NFTA_PAYLOAD_SREG
]);
324 if (tb
[NFTA_PAYLOAD_CSUM_TYPE
])
326 ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_TYPE
]));
327 if (tb
[NFTA_PAYLOAD_CSUM_OFFSET
])
329 ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_OFFSET
]));
330 if (tb
[NFTA_PAYLOAD_CSUM_FLAGS
]) {
333 flags
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_FLAGS
]));
334 if (flags
& ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR
)
337 priv
->csum_flags
= flags
;
340 switch (priv
->csum_type
) {
341 case NFT_PAYLOAD_CSUM_NONE
:
342 case NFT_PAYLOAD_CSUM_INET
:
348 return nft_validate_register_load(priv
->sreg
, priv
->len
);
351 static int nft_payload_set_dump(struct sk_buff
*skb
, const struct nft_expr
*expr
)
353 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
355 if (nft_dump_register(skb
, NFTA_PAYLOAD_SREG
, priv
->sreg
) ||
356 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
357 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
358 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)) ||
359 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_TYPE
, htonl(priv
->csum_type
)) ||
360 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_OFFSET
,
361 htonl(priv
->csum_offset
)) ||
362 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_FLAGS
, htonl(priv
->csum_flags
)))
363 goto nla_put_failure
;
370 static const struct nft_expr_ops nft_payload_set_ops
= {
371 .type
= &nft_payload_type
,
372 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload_set
)),
373 .eval
= nft_payload_set_eval
,
374 .init
= nft_payload_set_init
,
375 .dump
= nft_payload_set_dump
,
378 static const struct nft_expr_ops
*
379 nft_payload_select_ops(const struct nft_ctx
*ctx
,
380 const struct nlattr
* const tb
[])
382 enum nft_payload_bases base
;
383 unsigned int offset
, len
;
385 if (tb
[NFTA_PAYLOAD_BASE
] == NULL
||
386 tb
[NFTA_PAYLOAD_OFFSET
] == NULL
||
387 tb
[NFTA_PAYLOAD_LEN
] == NULL
)
388 return ERR_PTR(-EINVAL
);
390 base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
392 case NFT_PAYLOAD_LL_HEADER
:
393 case NFT_PAYLOAD_NETWORK_HEADER
:
394 case NFT_PAYLOAD_TRANSPORT_HEADER
:
397 return ERR_PTR(-EOPNOTSUPP
);
400 if (tb
[NFTA_PAYLOAD_SREG
] != NULL
) {
401 if (tb
[NFTA_PAYLOAD_DREG
] != NULL
)
402 return ERR_PTR(-EINVAL
);
403 return &nft_payload_set_ops
;
406 if (tb
[NFTA_PAYLOAD_DREG
] == NULL
)
407 return ERR_PTR(-EINVAL
);
409 offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
410 len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
412 if (len
<= 4 && is_power_of_2(len
) && IS_ALIGNED(offset
, len
) &&
413 base
!= NFT_PAYLOAD_LL_HEADER
)
414 return &nft_payload_fast_ops
;
416 return &nft_payload_ops
;
419 struct nft_expr_type nft_payload_type __read_mostly
= {
421 .select_ops
= nft_payload_select_ops
,
422 .policy
= nft_payload_policy
,
423 .maxattr
= NFTA_PAYLOAD_MAX
,
424 .owner
= THIS_MODULE
,