2 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
3 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <net/geneve.h>
17 #include <net/netlink.h>
18 #include <net/pkt_sched.h>
20 #include <net/pkt_cls.h>
22 #include <linux/tc_act/tc_tunnel_key.h>
23 #include <net/tc_act/tc_tunnel_key.h>
25 static unsigned int tunnel_key_net_id
;
26 static struct tc_action_ops act_tunnel_key_ops
;
28 static int tunnel_key_act(struct sk_buff
*skb
, const struct tc_action
*a
,
29 struct tcf_result
*res
)
31 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
32 struct tcf_tunnel_key_params
*params
;
35 params
= rcu_dereference_bh(t
->params
);
37 tcf_lastuse_update(&t
->tcf_tm
);
38 bstats_cpu_update(this_cpu_ptr(t
->common
.cpu_bstats
), skb
);
39 action
= READ_ONCE(t
->tcf_action
);
41 switch (params
->tcft_action
) {
42 case TCA_TUNNEL_KEY_ACT_RELEASE
:
45 case TCA_TUNNEL_KEY_ACT_SET
:
47 skb_dst_set(skb
, dst_clone(¶ms
->tcft_enc_metadata
->dst
));
50 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
58 static const struct nla_policy
59 enc_opts_policy
[TCA_TUNNEL_KEY_ENC_OPTS_MAX
+ 1] = {
60 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
] = { .type
= NLA_NESTED
},
63 static const struct nla_policy
64 geneve_opt_policy
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
+ 1] = {
65 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
] = { .type
= NLA_U16
},
66 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
] = { .type
= NLA_U8
},
67 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
] = { .type
= NLA_BINARY
,
72 tunnel_key_copy_geneve_opt(const struct nlattr
*nla
, void *dst
, int dst_len
,
73 struct netlink_ext_ack
*extack
)
75 struct nlattr
*tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
+ 1];
76 int err
, data_len
, opt_len
;
79 err
= nla_parse_nested_deprecated(tb
,
80 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
,
81 nla
, geneve_opt_policy
, extack
);
85 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
] ||
86 !tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
] ||
87 !tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]) {
88 NL_SET_ERR_MSG(extack
, "Missing tunnel key geneve option class, type or data");
92 data
= nla_data(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]);
93 data_len
= nla_len(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]);
95 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is less than 4 bytes long");
99 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is not a multiple of 4 bytes long");
103 opt_len
= sizeof(struct geneve_opt
) + data_len
;
105 struct geneve_opt
*opt
= dst
;
107 WARN_ON(dst_len
< opt_len
);
110 nla_get_be16(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
]);
111 opt
->type
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
]);
112 opt
->length
= data_len
/ 4; /* length is in units of 4 bytes */
117 memcpy(opt
+ 1, data
, data_len
);
123 static int tunnel_key_copy_opts(const struct nlattr
*nla
, u8
*dst
,
124 int dst_len
, struct netlink_ext_ack
*extack
)
126 int err
, rem
, opt_len
, len
= nla_len(nla
), opts_len
= 0;
127 const struct nlattr
*attr
, *head
= nla_data(nla
);
129 err
= nla_validate_deprecated(head
, len
, TCA_TUNNEL_KEY_ENC_OPTS_MAX
,
130 enc_opts_policy
, extack
);
134 nla_for_each_attr(attr
, head
, len
, rem
) {
135 switch (nla_type(attr
)) {
136 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
:
137 opt_len
= tunnel_key_copy_geneve_opt(attr
, dst
,
151 NL_SET_ERR_MSG(extack
, "Empty list of tunnel options");
156 NL_SET_ERR_MSG(extack
, "Trailing data after parsing tunnel key options attributes");
163 static int tunnel_key_get_opts_len(struct nlattr
*nla
,
164 struct netlink_ext_ack
*extack
)
166 return tunnel_key_copy_opts(nla
, NULL
, 0, extack
);
169 static int tunnel_key_opts_set(struct nlattr
*nla
, struct ip_tunnel_info
*info
,
170 int opts_len
, struct netlink_ext_ack
*extack
)
172 info
->options_len
= opts_len
;
173 switch (nla_type(nla_data(nla
))) {
174 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
:
175 #if IS_ENABLED(CONFIG_INET)
176 info
->key
.tun_flags
|= TUNNEL_GENEVE_OPT
;
177 return tunnel_key_copy_opts(nla
, ip_tunnel_info_opts(info
),
180 return -EAFNOSUPPORT
;
183 NL_SET_ERR_MSG(extack
, "Cannot set tunnel options for unknown tunnel type");
188 static const struct nla_policy tunnel_key_policy
[TCA_TUNNEL_KEY_MAX
+ 1] = {
189 [TCA_TUNNEL_KEY_PARMS
] = { .len
= sizeof(struct tc_tunnel_key
) },
190 [TCA_TUNNEL_KEY_ENC_IPV4_SRC
] = { .type
= NLA_U32
},
191 [TCA_TUNNEL_KEY_ENC_IPV4_DST
] = { .type
= NLA_U32
},
192 [TCA_TUNNEL_KEY_ENC_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
193 [TCA_TUNNEL_KEY_ENC_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
194 [TCA_TUNNEL_KEY_ENC_KEY_ID
] = { .type
= NLA_U32
},
195 [TCA_TUNNEL_KEY_ENC_DST_PORT
] = {.type
= NLA_U16
},
196 [TCA_TUNNEL_KEY_NO_CSUM
] = { .type
= NLA_U8
},
197 [TCA_TUNNEL_KEY_ENC_OPTS
] = { .type
= NLA_NESTED
},
198 [TCA_TUNNEL_KEY_ENC_TOS
] = { .type
= NLA_U8
},
199 [TCA_TUNNEL_KEY_ENC_TTL
] = { .type
= NLA_U8
},
202 static void tunnel_key_release_params(struct tcf_tunnel_key_params
*p
)
206 if (p
->tcft_action
== TCA_TUNNEL_KEY_ACT_SET
)
207 dst_release(&p
->tcft_enc_metadata
->dst
);
212 static int tunnel_key_init(struct net
*net
, struct nlattr
*nla
,
213 struct nlattr
*est
, struct tc_action
**a
,
214 int ovr
, int bind
, bool rtnl_held
,
215 struct tcf_proto
*tp
,
216 struct netlink_ext_ack
*extack
)
218 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
219 struct nlattr
*tb
[TCA_TUNNEL_KEY_MAX
+ 1];
220 struct tcf_tunnel_key_params
*params_new
;
221 struct metadata_dst
*metadata
= NULL
;
222 struct tcf_chain
*goto_ch
= NULL
;
223 struct tc_tunnel_key
*parm
;
224 struct tcf_tunnel_key
*t
;
235 NL_SET_ERR_MSG(extack
, "Tunnel requires attributes to be passed");
239 err
= nla_parse_nested_deprecated(tb
, TCA_TUNNEL_KEY_MAX
, nla
,
240 tunnel_key_policy
, extack
);
242 NL_SET_ERR_MSG(extack
, "Failed to parse nested tunnel key attributes");
246 if (!tb
[TCA_TUNNEL_KEY_PARMS
]) {
247 NL_SET_ERR_MSG(extack
, "Missing tunnel key parameters");
251 parm
= nla_data(tb
[TCA_TUNNEL_KEY_PARMS
]);
252 err
= tcf_idr_check_alloc(tn
, &parm
->index
, a
, bind
);
259 switch (parm
->t_action
) {
260 case TCA_TUNNEL_KEY_ACT_RELEASE
:
262 case TCA_TUNNEL_KEY_ACT_SET
:
263 if (tb
[TCA_TUNNEL_KEY_ENC_KEY_ID
]) {
266 key32
= nla_get_be32(tb
[TCA_TUNNEL_KEY_ENC_KEY_ID
]);
267 key_id
= key32_to_tunnel_id(key32
);
271 flags
|= TUNNEL_CSUM
;
272 if (tb
[TCA_TUNNEL_KEY_NO_CSUM
] &&
273 nla_get_u8(tb
[TCA_TUNNEL_KEY_NO_CSUM
]))
274 flags
&= ~TUNNEL_CSUM
;
276 if (tb
[TCA_TUNNEL_KEY_ENC_DST_PORT
])
277 dst_port
= nla_get_be16(tb
[TCA_TUNNEL_KEY_ENC_DST_PORT
]);
279 if (tb
[TCA_TUNNEL_KEY_ENC_OPTS
]) {
280 opts_len
= tunnel_key_get_opts_len(tb
[TCA_TUNNEL_KEY_ENC_OPTS
],
289 if (tb
[TCA_TUNNEL_KEY_ENC_TOS
])
290 tos
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_TOS
]);
292 if (tb
[TCA_TUNNEL_KEY_ENC_TTL
])
293 ttl
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_TTL
]);
295 if (tb
[TCA_TUNNEL_KEY_ENC_IPV4_SRC
] &&
296 tb
[TCA_TUNNEL_KEY_ENC_IPV4_DST
]) {
300 saddr
= nla_get_in_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV4_SRC
]);
301 daddr
= nla_get_in_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV4_DST
]);
303 metadata
= __ip_tun_set_dst(saddr
, daddr
, tos
, ttl
,
306 } else if (tb
[TCA_TUNNEL_KEY_ENC_IPV6_SRC
] &&
307 tb
[TCA_TUNNEL_KEY_ENC_IPV6_DST
]) {
308 struct in6_addr saddr
;
309 struct in6_addr daddr
;
311 saddr
= nla_get_in6_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV6_SRC
]);
312 daddr
= nla_get_in6_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV6_DST
]);
314 metadata
= __ipv6_tun_set_dst(&saddr
, &daddr
, tos
, ttl
, dst_port
,
318 NL_SET_ERR_MSG(extack
, "Missing either ipv4 or ipv6 src and dst");
324 NL_SET_ERR_MSG(extack
, "Cannot allocate tunnel metadata dst");
329 #ifdef CONFIG_DST_CACHE
330 ret
= dst_cache_init(&metadata
->u
.tun_info
.dst_cache
, GFP_KERNEL
);
332 goto release_tun_meta
;
336 ret
= tunnel_key_opts_set(tb
[TCA_TUNNEL_KEY_ENC_OPTS
],
337 &metadata
->u
.tun_info
,
340 goto release_tun_meta
;
343 metadata
->u
.tun_info
.mode
|= IP_TUNNEL_INFO_TX
;
346 NL_SET_ERR_MSG(extack
, "Unknown tunnel key action");
352 ret
= tcf_idr_create(tn
, parm
->index
, est
, a
,
353 &act_tunnel_key_ops
, bind
, true);
355 NL_SET_ERR_MSG(extack
, "Cannot create TC IDR");
356 goto release_tun_meta
;
361 NL_SET_ERR_MSG(extack
, "TC IDR already exists");
363 goto release_tun_meta
;
366 err
= tcf_action_check_ctrlact(parm
->action
, tp
, &goto_ch
, extack
);
370 goto release_tun_meta
;
372 t
= to_tunnel_key(*a
);
374 params_new
= kzalloc(sizeof(*params_new
), GFP_KERNEL
);
375 if (unlikely(!params_new
)) {
376 NL_SET_ERR_MSG(extack
, "Cannot allocate tunnel key parameters");
381 params_new
->tcft_action
= parm
->t_action
;
382 params_new
->tcft_enc_metadata
= metadata
;
384 spin_lock_bh(&t
->tcf_lock
);
385 goto_ch
= tcf_action_set_ctrlact(*a
, parm
->action
, goto_ch
);
386 rcu_swap_protected(t
->params
, params_new
,
387 lockdep_is_held(&t
->tcf_lock
));
388 spin_unlock_bh(&t
->tcf_lock
);
389 tunnel_key_release_params(params_new
);
391 tcf_chain_put_by_act(goto_ch
);
393 if (ret
== ACT_P_CREATED
)
394 tcf_idr_insert(tn
, *a
);
400 tcf_chain_put_by_act(goto_ch
);
404 dst_release(&metadata
->dst
);
408 tcf_idr_release(*a
, bind
);
410 tcf_idr_cleanup(tn
, parm
->index
);
414 static void tunnel_key_release(struct tc_action
*a
)
416 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
417 struct tcf_tunnel_key_params
*params
;
419 params
= rcu_dereference_protected(t
->params
, 1);
420 tunnel_key_release_params(params
);
423 static int tunnel_key_geneve_opts_dump(struct sk_buff
*skb
,
424 const struct ip_tunnel_info
*info
)
426 int len
= info
->options_len
;
427 u8
*src
= (u8
*)(info
+ 1);
428 struct nlattr
*start
;
430 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
);
435 struct geneve_opt
*opt
= (struct geneve_opt
*)src
;
437 if (nla_put_be16(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
,
439 nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
,
441 nla_put(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
,
442 opt
->length
* 4, opt
+ 1)) {
443 nla_nest_cancel(skb
, start
);
447 len
-= sizeof(struct geneve_opt
) + opt
->length
* 4;
448 src
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
451 nla_nest_end(skb
, start
);
455 static int tunnel_key_opts_dump(struct sk_buff
*skb
,
456 const struct ip_tunnel_info
*info
)
458 struct nlattr
*start
;
461 if (!info
->options_len
)
464 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS
);
468 if (info
->key
.tun_flags
& TUNNEL_GENEVE_OPT
) {
469 err
= tunnel_key_geneve_opts_dump(skb
, info
);
474 nla_nest_cancel(skb
, start
);
478 nla_nest_end(skb
, start
);
482 static int tunnel_key_dump_addresses(struct sk_buff
*skb
,
483 const struct ip_tunnel_info
*info
)
485 unsigned short family
= ip_tunnel_info_af(info
);
487 if (family
== AF_INET
) {
488 __be32 saddr
= info
->key
.u
.ipv4
.src
;
489 __be32 daddr
= info
->key
.u
.ipv4
.dst
;
491 if (!nla_put_in_addr(skb
, TCA_TUNNEL_KEY_ENC_IPV4_SRC
, saddr
) &&
492 !nla_put_in_addr(skb
, TCA_TUNNEL_KEY_ENC_IPV4_DST
, daddr
))
496 if (family
== AF_INET6
) {
497 const struct in6_addr
*saddr6
= &info
->key
.u
.ipv6
.src
;
498 const struct in6_addr
*daddr6
= &info
->key
.u
.ipv6
.dst
;
500 if (!nla_put_in6_addr(skb
,
501 TCA_TUNNEL_KEY_ENC_IPV6_SRC
, saddr6
) &&
502 !nla_put_in6_addr(skb
,
503 TCA_TUNNEL_KEY_ENC_IPV6_DST
, daddr6
))
510 static int tunnel_key_dump(struct sk_buff
*skb
, struct tc_action
*a
,
513 unsigned char *b
= skb_tail_pointer(skb
);
514 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
515 struct tcf_tunnel_key_params
*params
;
516 struct tc_tunnel_key opt
= {
517 .index
= t
->tcf_index
,
518 .refcnt
= refcount_read(&t
->tcf_refcnt
) - ref
,
519 .bindcnt
= atomic_read(&t
->tcf_bindcnt
) - bind
,
523 spin_lock_bh(&t
->tcf_lock
);
524 params
= rcu_dereference_protected(t
->params
,
525 lockdep_is_held(&t
->tcf_lock
));
526 opt
.action
= t
->tcf_action
;
527 opt
.t_action
= params
->tcft_action
;
529 if (nla_put(skb
, TCA_TUNNEL_KEY_PARMS
, sizeof(opt
), &opt
))
530 goto nla_put_failure
;
532 if (params
->tcft_action
== TCA_TUNNEL_KEY_ACT_SET
) {
533 struct ip_tunnel_info
*info
=
534 ¶ms
->tcft_enc_metadata
->u
.tun_info
;
535 struct ip_tunnel_key
*key
= &info
->key
;
536 __be32 key_id
= tunnel_id_to_key32(key
->tun_id
);
538 if (((key
->tun_flags
& TUNNEL_KEY
) &&
539 nla_put_be32(skb
, TCA_TUNNEL_KEY_ENC_KEY_ID
, key_id
)) ||
540 tunnel_key_dump_addresses(skb
,
541 ¶ms
->tcft_enc_metadata
->u
.tun_info
) ||
543 nla_put_be16(skb
, TCA_TUNNEL_KEY_ENC_DST_PORT
,
545 nla_put_u8(skb
, TCA_TUNNEL_KEY_NO_CSUM
,
546 !(key
->tun_flags
& TUNNEL_CSUM
)) ||
547 tunnel_key_opts_dump(skb
, info
))
548 goto nla_put_failure
;
550 if (key
->tos
&& nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_TOS
, key
->tos
))
551 goto nla_put_failure
;
553 if (key
->ttl
&& nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_TTL
, key
->ttl
))
554 goto nla_put_failure
;
557 tcf_tm_dump(&tm
, &t
->tcf_tm
);
558 if (nla_put_64bit(skb
, TCA_TUNNEL_KEY_TM
, sizeof(tm
),
559 &tm
, TCA_TUNNEL_KEY_PAD
))
560 goto nla_put_failure
;
561 spin_unlock_bh(&t
->tcf_lock
);
566 spin_unlock_bh(&t
->tcf_lock
);
571 static int tunnel_key_walker(struct net
*net
, struct sk_buff
*skb
,
572 struct netlink_callback
*cb
, int type
,
573 const struct tc_action_ops
*ops
,
574 struct netlink_ext_ack
*extack
)
576 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
578 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
581 static int tunnel_key_search(struct net
*net
, struct tc_action
**a
, u32 index
)
583 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
585 return tcf_idr_search(tn
, a
, index
);
588 static struct tc_action_ops act_tunnel_key_ops
= {
589 .kind
= "tunnel_key",
590 .id
= TCA_ID_TUNNEL_KEY
,
591 .owner
= THIS_MODULE
,
592 .act
= tunnel_key_act
,
593 .dump
= tunnel_key_dump
,
594 .init
= tunnel_key_init
,
595 .cleanup
= tunnel_key_release
,
596 .walk
= tunnel_key_walker
,
597 .lookup
= tunnel_key_search
,
598 .size
= sizeof(struct tcf_tunnel_key
),
601 static __net_init
int tunnel_key_init_net(struct net
*net
)
603 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
605 return tc_action_net_init(tn
, &act_tunnel_key_ops
);
608 static void __net_exit
tunnel_key_exit_net(struct list_head
*net_list
)
610 tc_action_net_exit(net_list
, tunnel_key_net_id
);
613 static struct pernet_operations tunnel_key_net_ops
= {
614 .init
= tunnel_key_init_net
,
615 .exit_batch
= tunnel_key_exit_net
,
616 .id
= &tunnel_key_net_id
,
617 .size
= sizeof(struct tc_action_net
),
620 static int __init
tunnel_key_init_module(void)
622 return tcf_register_action(&act_tunnel_key_ops
, &tunnel_key_net_ops
);
625 static void __exit
tunnel_key_cleanup_module(void)
627 tcf_unregister_action(&act_tunnel_key_ops
, &tunnel_key_net_ops
);
630 module_init(tunnel_key_init_module
);
631 module_exit(tunnel_key_cleanup_module
);
633 MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
634 MODULE_DESCRIPTION("ip tunnel manipulation actions");
635 MODULE_LICENSE("GPL v2");