]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/sched/act_tunnel_key.c
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[mirror_ubuntu-jammy-kernel.git] / net / sched / act_tunnel_key.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
5 */
6
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <net/geneve.h>
13 #include <net/vxlan.h>
14 #include <net/erspan.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/dst.h>
18 #include <net/pkt_cls.h>
19
20 #include <linux/tc_act/tc_tunnel_key.h>
21 #include <net/tc_act/tc_tunnel_key.h>
22
23 static unsigned int tunnel_key_net_id;
24 static struct tc_action_ops act_tunnel_key_ops;
25
26 static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
27 struct tcf_result *res)
28 {
29 struct tcf_tunnel_key *t = to_tunnel_key(a);
30 struct tcf_tunnel_key_params *params;
31 int action;
32
33 params = rcu_dereference_bh(t->params);
34
35 tcf_lastuse_update(&t->tcf_tm);
36 tcf_action_update_bstats(&t->common, skb);
37 action = READ_ONCE(t->tcf_action);
38
39 switch (params->tcft_action) {
40 case TCA_TUNNEL_KEY_ACT_RELEASE:
41 skb_dst_drop(skb);
42 break;
43 case TCA_TUNNEL_KEY_ACT_SET:
44 skb_dst_drop(skb);
45 skb_dst_set(skb, dst_clone(&params->tcft_enc_metadata->dst));
46 break;
47 default:
48 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
49 params->tcft_action);
50 break;
51 }
52
53 return action;
54 }
55
56 static const struct nla_policy
57 enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
58 [TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC] = {
59 .strict_start_type = TCA_TUNNEL_KEY_ENC_OPTS_VXLAN },
60 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
61 [TCA_TUNNEL_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
62 [TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
63 };
64
65 static const struct nla_policy
66 geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
67 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
68 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
69 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
70 .len = 128 },
71 };
72
73 static const struct nla_policy
74 vxlan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1] = {
75 [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
76 };
77
78 static const struct nla_policy
79 erspan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
80 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
81 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
82 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
83 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
84 };
85
86 static int
87 tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
88 struct netlink_ext_ack *extack)
89 {
90 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
91 int err, data_len, opt_len;
92 u8 *data;
93
94 err = nla_parse_nested_deprecated(tb,
95 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
96 nla, geneve_opt_policy, extack);
97 if (err < 0)
98 return err;
99
100 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
101 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
102 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
103 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
104 return -EINVAL;
105 }
106
107 data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
108 data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
109 if (data_len < 4) {
110 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
111 return -ERANGE;
112 }
113 if (data_len % 4) {
114 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
115 return -ERANGE;
116 }
117
118 opt_len = sizeof(struct geneve_opt) + data_len;
119 if (dst) {
120 struct geneve_opt *opt = dst;
121
122 WARN_ON(dst_len < opt_len);
123
124 opt->opt_class =
125 nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
126 opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
127 opt->length = data_len / 4; /* length is in units of 4 bytes */
128 opt->r1 = 0;
129 opt->r2 = 0;
130 opt->r3 = 0;
131
132 memcpy(opt + 1, data, data_len);
133 }
134
135 return opt_len;
136 }
137
138 static int
139 tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len,
140 struct netlink_ext_ack *extack)
141 {
142 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1];
143 int err;
144
145 err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX, nla,
146 vxlan_opt_policy, extack);
147 if (err < 0)
148 return err;
149
150 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]) {
151 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
152 return -EINVAL;
153 }
154
155 if (dst) {
156 struct vxlan_metadata *md = dst;
157
158 md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]);
159 }
160
161 return sizeof(struct vxlan_metadata);
162 }
163
164 static int
165 tunnel_key_copy_erspan_opt(const struct nlattr *nla, void *dst, int dst_len,
166 struct netlink_ext_ack *extack)
167 {
168 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1];
169 int err;
170 u8 ver;
171
172 err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX, nla,
173 erspan_opt_policy, extack);
174 if (err < 0)
175 return err;
176
177 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]) {
178 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
179 return -EINVAL;
180 }
181
182 ver = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]);
183 if (ver == 1) {
184 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]) {
185 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
186 return -EINVAL;
187 }
188 } else if (ver == 2) {
189 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] ||
190 !tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]) {
191 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
192 return -EINVAL;
193 }
194 } else {
195 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
196 return -EINVAL;
197 }
198
199 if (dst) {
200 struct erspan_metadata *md = dst;
201
202 md->version = ver;
203 if (ver == 1) {
204 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX];
205 md->u.index = nla_get_be32(nla);
206 } else {
207 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR];
208 md->u.md2.dir = nla_get_u8(nla);
209 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID];
210 set_hwid(&md->u.md2, nla_get_u8(nla));
211 }
212 }
213
214 return sizeof(struct erspan_metadata);
215 }
216
217 static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
218 int dst_len, struct netlink_ext_ack *extack)
219 {
220 int err, rem, opt_len, len = nla_len(nla), opts_len = 0, type = 0;
221 const struct nlattr *attr, *head = nla_data(nla);
222
223 err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
224 enc_opts_policy, extack);
225 if (err)
226 return err;
227
228 nla_for_each_attr(attr, head, len, rem) {
229 switch (nla_type(attr)) {
230 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
231 if (type && type != TUNNEL_GENEVE_OPT) {
232 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
233 return -EINVAL;
234 }
235 opt_len = tunnel_key_copy_geneve_opt(attr, dst,
236 dst_len, extack);
237 if (opt_len < 0)
238 return opt_len;
239 opts_len += opt_len;
240 if (opts_len > IP_TUNNEL_OPTS_MAX) {
241 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
242 return -EINVAL;
243 }
244 if (dst) {
245 dst_len -= opt_len;
246 dst += opt_len;
247 }
248 type = TUNNEL_GENEVE_OPT;
249 break;
250 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
251 if (type) {
252 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
253 return -EINVAL;
254 }
255 opt_len = tunnel_key_copy_vxlan_opt(attr, dst,
256 dst_len, extack);
257 if (opt_len < 0)
258 return opt_len;
259 opts_len += opt_len;
260 type = TUNNEL_VXLAN_OPT;
261 break;
262 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
263 if (type) {
264 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
265 return -EINVAL;
266 }
267 opt_len = tunnel_key_copy_erspan_opt(attr, dst,
268 dst_len, extack);
269 if (opt_len < 0)
270 return opt_len;
271 opts_len += opt_len;
272 type = TUNNEL_ERSPAN_OPT;
273 break;
274 }
275 }
276
277 if (!opts_len) {
278 NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
279 return -EINVAL;
280 }
281
282 if (rem > 0) {
283 NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
284 return -EINVAL;
285 }
286
287 return opts_len;
288 }
289
290 static int tunnel_key_get_opts_len(struct nlattr *nla,
291 struct netlink_ext_ack *extack)
292 {
293 return tunnel_key_copy_opts(nla, NULL, 0, extack);
294 }
295
296 static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
297 int opts_len, struct netlink_ext_ack *extack)
298 {
299 info->options_len = opts_len;
300 switch (nla_type(nla_data(nla))) {
301 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
302 #if IS_ENABLED(CONFIG_INET)
303 info->key.tun_flags |= TUNNEL_GENEVE_OPT;
304 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
305 opts_len, extack);
306 #else
307 return -EAFNOSUPPORT;
308 #endif
309 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
310 #if IS_ENABLED(CONFIG_INET)
311 info->key.tun_flags |= TUNNEL_VXLAN_OPT;
312 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
313 opts_len, extack);
314 #else
315 return -EAFNOSUPPORT;
316 #endif
317 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
318 #if IS_ENABLED(CONFIG_INET)
319 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
320 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
321 opts_len, extack);
322 #else
323 return -EAFNOSUPPORT;
324 #endif
325 default:
326 NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
327 return -EINVAL;
328 }
329 }
330
331 static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
332 [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
333 [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
334 [TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
335 [TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
336 [TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
337 [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
338 [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
339 [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 },
340 [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED },
341 [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 },
342 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
343 };
344
345 static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
346 {
347 if (!p)
348 return;
349 if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
350 dst_release(&p->tcft_enc_metadata->dst);
351
352 kfree_rcu(p, rcu);
353 }
354
355 static int tunnel_key_init(struct net *net, struct nlattr *nla,
356 struct nlattr *est, struct tc_action **a,
357 int ovr, int bind, bool rtnl_held,
358 struct tcf_proto *tp, u32 act_flags,
359 struct netlink_ext_ack *extack)
360 {
361 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
362 struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
363 struct tcf_tunnel_key_params *params_new;
364 struct metadata_dst *metadata = NULL;
365 struct tcf_chain *goto_ch = NULL;
366 struct tc_tunnel_key *parm;
367 struct tcf_tunnel_key *t;
368 bool exists = false;
369 __be16 dst_port = 0;
370 __be64 key_id = 0;
371 int opts_len = 0;
372 __be16 flags = 0;
373 u8 tos, ttl;
374 int ret = 0;
375 u32 index;
376 int err;
377
378 if (!nla) {
379 NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
380 return -EINVAL;
381 }
382
383 err = nla_parse_nested_deprecated(tb, TCA_TUNNEL_KEY_MAX, nla,
384 tunnel_key_policy, extack);
385 if (err < 0) {
386 NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
387 return err;
388 }
389
390 if (!tb[TCA_TUNNEL_KEY_PARMS]) {
391 NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
392 return -EINVAL;
393 }
394
395 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
396 index = parm->index;
397 err = tcf_idr_check_alloc(tn, &index, a, bind);
398 if (err < 0)
399 return err;
400 exists = err;
401 if (exists && bind)
402 return 0;
403
404 switch (parm->t_action) {
405 case TCA_TUNNEL_KEY_ACT_RELEASE:
406 break;
407 case TCA_TUNNEL_KEY_ACT_SET:
408 if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
409 __be32 key32;
410
411 key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
412 key_id = key32_to_tunnel_id(key32);
413 flags = TUNNEL_KEY;
414 }
415
416 flags |= TUNNEL_CSUM;
417 if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
418 nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
419 flags &= ~TUNNEL_CSUM;
420
421 if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
422 dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
423
424 if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
425 opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
426 extack);
427 if (opts_len < 0) {
428 ret = opts_len;
429 goto err_out;
430 }
431 }
432
433 tos = 0;
434 if (tb[TCA_TUNNEL_KEY_ENC_TOS])
435 tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
436 ttl = 0;
437 if (tb[TCA_TUNNEL_KEY_ENC_TTL])
438 ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
439
440 if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
441 tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
442 __be32 saddr;
443 __be32 daddr;
444
445 saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
446 daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
447
448 metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
449 dst_port, flags,
450 key_id, opts_len);
451 } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
452 tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
453 struct in6_addr saddr;
454 struct in6_addr daddr;
455
456 saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
457 daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
458
459 metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
460 0, flags,
461 key_id, 0);
462 } else {
463 NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
464 ret = -EINVAL;
465 goto err_out;
466 }
467
468 if (!metadata) {
469 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
470 ret = -ENOMEM;
471 goto err_out;
472 }
473
474 #ifdef CONFIG_DST_CACHE
475 ret = dst_cache_init(&metadata->u.tun_info.dst_cache, GFP_KERNEL);
476 if (ret)
477 goto release_tun_meta;
478 #endif
479
480 if (opts_len) {
481 ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
482 &metadata->u.tun_info,
483 opts_len, extack);
484 if (ret < 0)
485 goto release_tun_meta;
486 }
487
488 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
489 break;
490 default:
491 NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
492 ret = -EINVAL;
493 goto err_out;
494 }
495
496 if (!exists) {
497 ret = tcf_idr_create_from_flags(tn, index, est, a,
498 &act_tunnel_key_ops, bind,
499 act_flags);
500 if (ret) {
501 NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
502 goto release_tun_meta;
503 }
504
505 ret = ACT_P_CREATED;
506 } else if (!ovr) {
507 NL_SET_ERR_MSG(extack, "TC IDR already exists");
508 ret = -EEXIST;
509 goto release_tun_meta;
510 }
511
512 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
513 if (err < 0) {
514 ret = err;
515 exists = true;
516 goto release_tun_meta;
517 }
518 t = to_tunnel_key(*a);
519
520 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
521 if (unlikely(!params_new)) {
522 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
523 ret = -ENOMEM;
524 exists = true;
525 goto put_chain;
526 }
527 params_new->tcft_action = parm->t_action;
528 params_new->tcft_enc_metadata = metadata;
529
530 spin_lock_bh(&t->tcf_lock);
531 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
532 params_new = rcu_replace_pointer(t->params, params_new,
533 lockdep_is_held(&t->tcf_lock));
534 spin_unlock_bh(&t->tcf_lock);
535 tunnel_key_release_params(params_new);
536 if (goto_ch)
537 tcf_chain_put_by_act(goto_ch);
538
539 if (ret == ACT_P_CREATED)
540 tcf_idr_insert(tn, *a);
541
542 return ret;
543
544 put_chain:
545 if (goto_ch)
546 tcf_chain_put_by_act(goto_ch);
547
548 release_tun_meta:
549 if (metadata)
550 dst_release(&metadata->dst);
551
552 err_out:
553 if (exists)
554 tcf_idr_release(*a, bind);
555 else
556 tcf_idr_cleanup(tn, index);
557 return ret;
558 }
559
560 static void tunnel_key_release(struct tc_action *a)
561 {
562 struct tcf_tunnel_key *t = to_tunnel_key(a);
563 struct tcf_tunnel_key_params *params;
564
565 params = rcu_dereference_protected(t->params, 1);
566 tunnel_key_release_params(params);
567 }
568
569 static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
570 const struct ip_tunnel_info *info)
571 {
572 int len = info->options_len;
573 u8 *src = (u8 *)(info + 1);
574 struct nlattr *start;
575
576 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
577 if (!start)
578 return -EMSGSIZE;
579
580 while (len > 0) {
581 struct geneve_opt *opt = (struct geneve_opt *)src;
582
583 if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
584 opt->opt_class) ||
585 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
586 opt->type) ||
587 nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
588 opt->length * 4, opt + 1)) {
589 nla_nest_cancel(skb, start);
590 return -EMSGSIZE;
591 }
592
593 len -= sizeof(struct geneve_opt) + opt->length * 4;
594 src += sizeof(struct geneve_opt) + opt->length * 4;
595 }
596
597 nla_nest_end(skb, start);
598 return 0;
599 }
600
601 static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb,
602 const struct ip_tunnel_info *info)
603 {
604 struct vxlan_metadata *md = (struct vxlan_metadata *)(info + 1);
605 struct nlattr *start;
606
607 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN);
608 if (!start)
609 return -EMSGSIZE;
610
611 if (nla_put_u32(skb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) {
612 nla_nest_cancel(skb, start);
613 return -EMSGSIZE;
614 }
615
616 nla_nest_end(skb, start);
617 return 0;
618 }
619
620 static int tunnel_key_erspan_opts_dump(struct sk_buff *skb,
621 const struct ip_tunnel_info *info)
622 {
623 struct erspan_metadata *md = (struct erspan_metadata *)(info + 1);
624 struct nlattr *start;
625
626 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN);
627 if (!start)
628 return -EMSGSIZE;
629
630 if (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, md->version))
631 goto err;
632
633 if (md->version == 1 &&
634 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
635 goto err;
636
637 if (md->version == 2 &&
638 (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR,
639 md->u.md2.dir) ||
640 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID,
641 get_hwid(&md->u.md2))))
642 goto err;
643
644 nla_nest_end(skb, start);
645 return 0;
646 err:
647 nla_nest_cancel(skb, start);
648 return -EMSGSIZE;
649 }
650
651 static int tunnel_key_opts_dump(struct sk_buff *skb,
652 const struct ip_tunnel_info *info)
653 {
654 struct nlattr *start;
655 int err = -EINVAL;
656
657 if (!info->options_len)
658 return 0;
659
660 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS);
661 if (!start)
662 return -EMSGSIZE;
663
664 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
665 err = tunnel_key_geneve_opts_dump(skb, info);
666 if (err)
667 goto err_out;
668 } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
669 err = tunnel_key_vxlan_opts_dump(skb, info);
670 if (err)
671 goto err_out;
672 } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
673 err = tunnel_key_erspan_opts_dump(skb, info);
674 if (err)
675 goto err_out;
676 } else {
677 err_out:
678 nla_nest_cancel(skb, start);
679 return err;
680 }
681
682 nla_nest_end(skb, start);
683 return 0;
684 }
685
686 static int tunnel_key_dump_addresses(struct sk_buff *skb,
687 const struct ip_tunnel_info *info)
688 {
689 unsigned short family = ip_tunnel_info_af(info);
690
691 if (family == AF_INET) {
692 __be32 saddr = info->key.u.ipv4.src;
693 __be32 daddr = info->key.u.ipv4.dst;
694
695 if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
696 !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
697 return 0;
698 }
699
700 if (family == AF_INET6) {
701 const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
702 const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
703
704 if (!nla_put_in6_addr(skb,
705 TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
706 !nla_put_in6_addr(skb,
707 TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
708 return 0;
709 }
710
711 return -EINVAL;
712 }
713
714 static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
715 int bind, int ref)
716 {
717 unsigned char *b = skb_tail_pointer(skb);
718 struct tcf_tunnel_key *t = to_tunnel_key(a);
719 struct tcf_tunnel_key_params *params;
720 struct tc_tunnel_key opt = {
721 .index = t->tcf_index,
722 .refcnt = refcount_read(&t->tcf_refcnt) - ref,
723 .bindcnt = atomic_read(&t->tcf_bindcnt) - bind,
724 };
725 struct tcf_t tm;
726
727 spin_lock_bh(&t->tcf_lock);
728 params = rcu_dereference_protected(t->params,
729 lockdep_is_held(&t->tcf_lock));
730 opt.action = t->tcf_action;
731 opt.t_action = params->tcft_action;
732
733 if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
734 goto nla_put_failure;
735
736 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
737 struct ip_tunnel_info *info =
738 &params->tcft_enc_metadata->u.tun_info;
739 struct ip_tunnel_key *key = &info->key;
740 __be32 key_id = tunnel_id_to_key32(key->tun_id);
741
742 if (((key->tun_flags & TUNNEL_KEY) &&
743 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
744 tunnel_key_dump_addresses(skb,
745 &params->tcft_enc_metadata->u.tun_info) ||
746 (key->tp_dst &&
747 nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
748 key->tp_dst)) ||
749 nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
750 !(key->tun_flags & TUNNEL_CSUM)) ||
751 tunnel_key_opts_dump(skb, info))
752 goto nla_put_failure;
753
754 if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
755 goto nla_put_failure;
756
757 if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
758 goto nla_put_failure;
759 }
760
761 tcf_tm_dump(&tm, &t->tcf_tm);
762 if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
763 &tm, TCA_TUNNEL_KEY_PAD))
764 goto nla_put_failure;
765 spin_unlock_bh(&t->tcf_lock);
766
767 return skb->len;
768
769 nla_put_failure:
770 spin_unlock_bh(&t->tcf_lock);
771 nlmsg_trim(skb, b);
772 return -1;
773 }
774
775 static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
776 struct netlink_callback *cb, int type,
777 const struct tc_action_ops *ops,
778 struct netlink_ext_ack *extack)
779 {
780 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
781
782 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
783 }
784
785 static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
786 {
787 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
788
789 return tcf_idr_search(tn, a, index);
790 }
791
792 static struct tc_action_ops act_tunnel_key_ops = {
793 .kind = "tunnel_key",
794 .id = TCA_ID_TUNNEL_KEY,
795 .owner = THIS_MODULE,
796 .act = tunnel_key_act,
797 .dump = tunnel_key_dump,
798 .init = tunnel_key_init,
799 .cleanup = tunnel_key_release,
800 .walk = tunnel_key_walker,
801 .lookup = tunnel_key_search,
802 .size = sizeof(struct tcf_tunnel_key),
803 };
804
805 static __net_init int tunnel_key_init_net(struct net *net)
806 {
807 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
808
809 return tc_action_net_init(net, tn, &act_tunnel_key_ops);
810 }
811
812 static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
813 {
814 tc_action_net_exit(net_list, tunnel_key_net_id);
815 }
816
817 static struct pernet_operations tunnel_key_net_ops = {
818 .init = tunnel_key_init_net,
819 .exit_batch = tunnel_key_exit_net,
820 .id = &tunnel_key_net_id,
821 .size = sizeof(struct tc_action_net),
822 };
823
824 static int __init tunnel_key_init_module(void)
825 {
826 return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
827 }
828
829 static void __exit tunnel_key_cleanup_module(void)
830 {
831 tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
832 }
833
834 module_init(tunnel_key_init_module);
835 module_exit(tunnel_key_cleanup_module);
836
837 MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
838 MODULE_DESCRIPTION("ip tunnel manipulation actions");
839 MODULE_LICENSE("GPL v2");