]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/netfilter/nft_payload.c
98fb5d7b8087f60f5b97109b2570169631286590
[mirror_ubuntu-artful-kernel.git] / net / netfilter / nft_payload.c
1 /*
2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/if_vlan.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/netlink.h>
16 #include <linux/netfilter.h>
17 #include <linux/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_core.h>
19 #include <net/netfilter/nf_tables.h>
20
21 /* add vlan header into the user buffer for if tag was removed by offloads */
22 static bool
23 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
24 {
25 int mac_off = skb_mac_header(skb) - skb->data;
26 u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
27 struct vlan_ethhdr veth;
28
29 vlanh = (u8 *) &veth;
30 if (offset < ETH_HLEN) {
31 u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
32
33 if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
34 return false;
35
36 veth.h_vlan_proto = skb->vlan_proto;
37
38 memcpy(dst_u8, vlanh + offset, ethlen);
39
40 len -= ethlen;
41 if (len == 0)
42 return true;
43
44 dst_u8 += ethlen;
45 offset = ETH_HLEN;
46 } else if (offset >= VLAN_ETH_HLEN) {
47 offset -= VLAN_HLEN;
48 goto skip;
49 }
50
51 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
52 veth.h_vlan_encapsulated_proto = skb->protocol;
53
54 vlanh += offset;
55
56 vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
57 memcpy(dst_u8, vlanh, vlan_len);
58
59 len -= vlan_len;
60 if (!len)
61 return true;
62
63 dst_u8 += vlan_len;
64 skip:
65 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
66 }
67
68 static void nft_payload_eval(const struct nft_expr *expr,
69 struct nft_regs *regs,
70 const struct nft_pktinfo *pkt)
71 {
72 const struct nft_payload *priv = nft_expr_priv(expr);
73 const struct sk_buff *skb = pkt->skb;
74 u32 *dest = &regs->data[priv->dreg];
75 int offset;
76
77 dest[priv->len / NFT_REG32_SIZE] = 0;
78 switch (priv->base) {
79 case NFT_PAYLOAD_LL_HEADER:
80 if (!skb_mac_header_was_set(skb))
81 goto err;
82
83 if (skb_vlan_tag_present(skb)) {
84 if (!nft_payload_copy_vlan(dest, skb,
85 priv->offset, priv->len))
86 goto err;
87 return;
88 }
89 offset = skb_mac_header(skb) - skb->data;
90 break;
91 case NFT_PAYLOAD_NETWORK_HEADER:
92 offset = skb_network_offset(skb);
93 break;
94 case NFT_PAYLOAD_TRANSPORT_HEADER:
95 if (!pkt->tprot_set)
96 goto err;
97 offset = pkt->xt.thoff;
98 break;
99 default:
100 BUG();
101 }
102 offset += priv->offset;
103
104 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
105 goto err;
106 return;
107 err:
108 regs->verdict.code = NFT_BREAK;
109 }
110
111 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
112 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
113 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
114 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
115 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
116 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
117 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
118 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
119 };
120
121 static int nft_payload_init(const struct nft_ctx *ctx,
122 const struct nft_expr *expr,
123 const struct nlattr * const tb[])
124 {
125 struct nft_payload *priv = nft_expr_priv(expr);
126
127 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
128 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
129 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
130 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
131
132 return nft_validate_register_store(ctx, priv->dreg, NULL,
133 NFT_DATA_VALUE, priv->len);
134 }
135
136 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
137 {
138 const struct nft_payload *priv = nft_expr_priv(expr);
139
140 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
141 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
142 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
143 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
144 goto nla_put_failure;
145 return 0;
146
147 nla_put_failure:
148 return -1;
149 }
150
151 static const struct nft_expr_ops nft_payload_ops = {
152 .type = &nft_payload_type,
153 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
154 .eval = nft_payload_eval,
155 .init = nft_payload_init,
156 .dump = nft_payload_dump,
157 };
158
159 const struct nft_expr_ops nft_payload_fast_ops = {
160 .type = &nft_payload_type,
161 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
162 .eval = nft_payload_eval,
163 .init = nft_payload_init,
164 .dump = nft_payload_dump,
165 };
166
167 static void nft_payload_set_eval(const struct nft_expr *expr,
168 struct nft_regs *regs,
169 const struct nft_pktinfo *pkt)
170 {
171 const struct nft_payload_set *priv = nft_expr_priv(expr);
172 struct sk_buff *skb = pkt->skb;
173 const u32 *src = &regs->data[priv->sreg];
174 int offset, csum_offset;
175 __wsum fsum, tsum;
176 __sum16 sum;
177
178 switch (priv->base) {
179 case NFT_PAYLOAD_LL_HEADER:
180 if (!skb_mac_header_was_set(skb))
181 goto err;
182 offset = skb_mac_header(skb) - skb->data;
183 break;
184 case NFT_PAYLOAD_NETWORK_HEADER:
185 offset = skb_network_offset(skb);
186 break;
187 case NFT_PAYLOAD_TRANSPORT_HEADER:
188 if (!pkt->tprot_set)
189 goto err;
190 offset = pkt->xt.thoff;
191 break;
192 default:
193 BUG();
194 }
195
196 csum_offset = offset + priv->csum_offset;
197 offset += priv->offset;
198
199 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
200 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
201 skb->ip_summed != CHECKSUM_PARTIAL)) {
202 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
203 goto err;
204
205 fsum = skb_checksum(skb, offset, priv->len, 0);
206 tsum = csum_partial(src, priv->len, 0);
207 sum = csum_fold(csum_add(csum_sub(~csum_unfold(sum), fsum),
208 tsum));
209 if (sum == 0)
210 sum = CSUM_MANGLED_0;
211
212 if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
213 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
214 goto err;
215 }
216
217 if (!skb_make_writable(skb, max(offset + priv->len, 0)) ||
218 skb_store_bits(skb, offset, src, priv->len) < 0)
219 goto err;
220
221 return;
222 err:
223 regs->verdict.code = NFT_BREAK;
224 }
225
226 static int nft_payload_set_init(const struct nft_ctx *ctx,
227 const struct nft_expr *expr,
228 const struct nlattr * const tb[])
229 {
230 struct nft_payload_set *priv = nft_expr_priv(expr);
231
232 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
233 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
234 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
235 priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
236
237 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
238 priv->csum_type =
239 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
240 if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
241 priv->csum_offset =
242 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
243
244 switch (priv->csum_type) {
245 case NFT_PAYLOAD_CSUM_NONE:
246 case NFT_PAYLOAD_CSUM_INET:
247 break;
248 default:
249 return -EOPNOTSUPP;
250 }
251
252 return nft_validate_register_load(priv->sreg, priv->len);
253 }
254
255 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
256 {
257 const struct nft_payload_set *priv = nft_expr_priv(expr);
258
259 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
260 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
261 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
262 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
263 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
264 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
265 htonl(priv->csum_offset)))
266 goto nla_put_failure;
267 return 0;
268
269 nla_put_failure:
270 return -1;
271 }
272
273 static const struct nft_expr_ops nft_payload_set_ops = {
274 .type = &nft_payload_type,
275 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
276 .eval = nft_payload_set_eval,
277 .init = nft_payload_set_init,
278 .dump = nft_payload_set_dump,
279 };
280
281 static const struct nft_expr_ops *
282 nft_payload_select_ops(const struct nft_ctx *ctx,
283 const struct nlattr * const tb[])
284 {
285 enum nft_payload_bases base;
286 unsigned int offset, len;
287
288 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
289 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
290 tb[NFTA_PAYLOAD_LEN] == NULL)
291 return ERR_PTR(-EINVAL);
292
293 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
294 switch (base) {
295 case NFT_PAYLOAD_LL_HEADER:
296 case NFT_PAYLOAD_NETWORK_HEADER:
297 case NFT_PAYLOAD_TRANSPORT_HEADER:
298 break;
299 default:
300 return ERR_PTR(-EOPNOTSUPP);
301 }
302
303 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
304 if (tb[NFTA_PAYLOAD_DREG] != NULL)
305 return ERR_PTR(-EINVAL);
306 return &nft_payload_set_ops;
307 }
308
309 if (tb[NFTA_PAYLOAD_DREG] == NULL)
310 return ERR_PTR(-EINVAL);
311
312 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
313 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
314
315 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
316 base != NFT_PAYLOAD_LL_HEADER)
317 return &nft_payload_fast_ops;
318 else
319 return &nft_payload_ops;
320 }
321
322 struct nft_expr_type nft_payload_type __read_mostly = {
323 .name = "payload",
324 .select_ops = nft_payload_select_ops,
325 .policy = nft_payload_policy,
326 .maxattr = NFTA_PAYLOAD_MAX,
327 .owner = THIS_MODULE,
328 };