]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/sched/act_tunnel_key.c
Merge tag 'nfsd-5.2-1' of git://linux-nfs.org/~bfields/linux
[mirror_ubuntu-jammy-kernel.git] / net / sched / act_tunnel_key.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
5 */
6
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <net/geneve.h>
13 #include <net/netlink.h>
14 #include <net/pkt_sched.h>
15 #include <net/dst.h>
16 #include <net/pkt_cls.h>
17
18 #include <linux/tc_act/tc_tunnel_key.h>
19 #include <net/tc_act/tc_tunnel_key.h>
20
21 static unsigned int tunnel_key_net_id;
22 static struct tc_action_ops act_tunnel_key_ops;
23
24 static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
25 struct tcf_result *res)
26 {
27 struct tcf_tunnel_key *t = to_tunnel_key(a);
28 struct tcf_tunnel_key_params *params;
29 int action;
30
31 params = rcu_dereference_bh(t->params);
32
33 tcf_lastuse_update(&t->tcf_tm);
34 bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
35 action = READ_ONCE(t->tcf_action);
36
37 switch (params->tcft_action) {
38 case TCA_TUNNEL_KEY_ACT_RELEASE:
39 skb_dst_drop(skb);
40 break;
41 case TCA_TUNNEL_KEY_ACT_SET:
42 skb_dst_drop(skb);
43 skb_dst_set(skb, dst_clone(&params->tcft_enc_metadata->dst));
44 break;
45 default:
46 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
47 params->tcft_action);
48 break;
49 }
50
51 return action;
52 }
53
54 static const struct nla_policy
55 enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
56 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
57 };
58
59 static const struct nla_policy
60 geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
61 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
62 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
63 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
64 .len = 128 },
65 };
66
67 static int
68 tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
69 struct netlink_ext_ack *extack)
70 {
71 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
72 int err, data_len, opt_len;
73 u8 *data;
74
75 err = nla_parse_nested_deprecated(tb,
76 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
77 nla, geneve_opt_policy, extack);
78 if (err < 0)
79 return err;
80
81 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
82 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
83 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
84 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
85 return -EINVAL;
86 }
87
88 data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
89 data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
90 if (data_len < 4) {
91 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
92 return -ERANGE;
93 }
94 if (data_len % 4) {
95 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
96 return -ERANGE;
97 }
98
99 opt_len = sizeof(struct geneve_opt) + data_len;
100 if (dst) {
101 struct geneve_opt *opt = dst;
102
103 WARN_ON(dst_len < opt_len);
104
105 opt->opt_class =
106 nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
107 opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
108 opt->length = data_len / 4; /* length is in units of 4 bytes */
109 opt->r1 = 0;
110 opt->r2 = 0;
111 opt->r3 = 0;
112
113 memcpy(opt + 1, data, data_len);
114 }
115
116 return opt_len;
117 }
118
119 static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
120 int dst_len, struct netlink_ext_ack *extack)
121 {
122 int err, rem, opt_len, len = nla_len(nla), opts_len = 0;
123 const struct nlattr *attr, *head = nla_data(nla);
124
125 err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
126 enc_opts_policy, extack);
127 if (err)
128 return err;
129
130 nla_for_each_attr(attr, head, len, rem) {
131 switch (nla_type(attr)) {
132 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
133 opt_len = tunnel_key_copy_geneve_opt(attr, dst,
134 dst_len, extack);
135 if (opt_len < 0)
136 return opt_len;
137 opts_len += opt_len;
138 if (dst) {
139 dst_len -= opt_len;
140 dst += opt_len;
141 }
142 break;
143 }
144 }
145
146 if (!opts_len) {
147 NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
148 return -EINVAL;
149 }
150
151 if (rem > 0) {
152 NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
153 return -EINVAL;
154 }
155
156 return opts_len;
157 }
158
159 static int tunnel_key_get_opts_len(struct nlattr *nla,
160 struct netlink_ext_ack *extack)
161 {
162 return tunnel_key_copy_opts(nla, NULL, 0, extack);
163 }
164
165 static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
166 int opts_len, struct netlink_ext_ack *extack)
167 {
168 info->options_len = opts_len;
169 switch (nla_type(nla_data(nla))) {
170 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
171 #if IS_ENABLED(CONFIG_INET)
172 info->key.tun_flags |= TUNNEL_GENEVE_OPT;
173 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
174 opts_len, extack);
175 #else
176 return -EAFNOSUPPORT;
177 #endif
178 default:
179 NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
180 return -EINVAL;
181 }
182 }
183
184 static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
185 [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
186 [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
187 [TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
188 [TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
189 [TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
190 [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
191 [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
192 [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 },
193 [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED },
194 [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 },
195 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
196 };
197
198 static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
199 {
200 if (!p)
201 return;
202 if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
203 dst_release(&p->tcft_enc_metadata->dst);
204
205 kfree_rcu(p, rcu);
206 }
207
208 static int tunnel_key_init(struct net *net, struct nlattr *nla,
209 struct nlattr *est, struct tc_action **a,
210 int ovr, int bind, bool rtnl_held,
211 struct tcf_proto *tp,
212 struct netlink_ext_ack *extack)
213 {
214 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
215 struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
216 struct tcf_tunnel_key_params *params_new;
217 struct metadata_dst *metadata = NULL;
218 struct tcf_chain *goto_ch = NULL;
219 struct tc_tunnel_key *parm;
220 struct tcf_tunnel_key *t;
221 bool exists = false;
222 __be16 dst_port = 0;
223 __be64 key_id = 0;
224 int opts_len = 0;
225 __be16 flags = 0;
226 u8 tos, ttl;
227 int ret = 0;
228 int err;
229
230 if (!nla) {
231 NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
232 return -EINVAL;
233 }
234
235 err = nla_parse_nested_deprecated(tb, TCA_TUNNEL_KEY_MAX, nla,
236 tunnel_key_policy, extack);
237 if (err < 0) {
238 NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
239 return err;
240 }
241
242 if (!tb[TCA_TUNNEL_KEY_PARMS]) {
243 NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
244 return -EINVAL;
245 }
246
247 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
248 err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
249 if (err < 0)
250 return err;
251 exists = err;
252 if (exists && bind)
253 return 0;
254
255 switch (parm->t_action) {
256 case TCA_TUNNEL_KEY_ACT_RELEASE:
257 break;
258 case TCA_TUNNEL_KEY_ACT_SET:
259 if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
260 __be32 key32;
261
262 key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
263 key_id = key32_to_tunnel_id(key32);
264 flags = TUNNEL_KEY;
265 }
266
267 flags |= TUNNEL_CSUM;
268 if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
269 nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
270 flags &= ~TUNNEL_CSUM;
271
272 if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
273 dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
274
275 if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
276 opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
277 extack);
278 if (opts_len < 0) {
279 ret = opts_len;
280 goto err_out;
281 }
282 }
283
284 tos = 0;
285 if (tb[TCA_TUNNEL_KEY_ENC_TOS])
286 tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
287 ttl = 0;
288 if (tb[TCA_TUNNEL_KEY_ENC_TTL])
289 ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
290
291 if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
292 tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
293 __be32 saddr;
294 __be32 daddr;
295
296 saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
297 daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
298
299 metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
300 dst_port, flags,
301 key_id, opts_len);
302 } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
303 tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
304 struct in6_addr saddr;
305 struct in6_addr daddr;
306
307 saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
308 daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
309
310 metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
311 0, flags,
312 key_id, 0);
313 } else {
314 NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
315 ret = -EINVAL;
316 goto err_out;
317 }
318
319 if (!metadata) {
320 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
321 ret = -ENOMEM;
322 goto err_out;
323 }
324
325 #ifdef CONFIG_DST_CACHE
326 ret = dst_cache_init(&metadata->u.tun_info.dst_cache, GFP_KERNEL);
327 if (ret)
328 goto release_tun_meta;
329 #endif
330
331 if (opts_len) {
332 ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
333 &metadata->u.tun_info,
334 opts_len, extack);
335 if (ret < 0)
336 goto release_tun_meta;
337 }
338
339 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
340 break;
341 default:
342 NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
343 ret = -EINVAL;
344 goto err_out;
345 }
346
347 if (!exists) {
348 ret = tcf_idr_create(tn, parm->index, est, a,
349 &act_tunnel_key_ops, bind, true);
350 if (ret) {
351 NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
352 goto release_tun_meta;
353 }
354
355 ret = ACT_P_CREATED;
356 } else if (!ovr) {
357 NL_SET_ERR_MSG(extack, "TC IDR already exists");
358 ret = -EEXIST;
359 goto release_tun_meta;
360 }
361
362 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
363 if (err < 0) {
364 ret = err;
365 exists = true;
366 goto release_tun_meta;
367 }
368 t = to_tunnel_key(*a);
369
370 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
371 if (unlikely(!params_new)) {
372 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
373 ret = -ENOMEM;
374 exists = true;
375 goto put_chain;
376 }
377 params_new->tcft_action = parm->t_action;
378 params_new->tcft_enc_metadata = metadata;
379
380 spin_lock_bh(&t->tcf_lock);
381 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
382 rcu_swap_protected(t->params, params_new,
383 lockdep_is_held(&t->tcf_lock));
384 spin_unlock_bh(&t->tcf_lock);
385 tunnel_key_release_params(params_new);
386 if (goto_ch)
387 tcf_chain_put_by_act(goto_ch);
388
389 if (ret == ACT_P_CREATED)
390 tcf_idr_insert(tn, *a);
391
392 return ret;
393
394 put_chain:
395 if (goto_ch)
396 tcf_chain_put_by_act(goto_ch);
397
398 release_tun_meta:
399 if (metadata)
400 dst_release(&metadata->dst);
401
402 err_out:
403 if (exists)
404 tcf_idr_release(*a, bind);
405 else
406 tcf_idr_cleanup(tn, parm->index);
407 return ret;
408 }
409
410 static void tunnel_key_release(struct tc_action *a)
411 {
412 struct tcf_tunnel_key *t = to_tunnel_key(a);
413 struct tcf_tunnel_key_params *params;
414
415 params = rcu_dereference_protected(t->params, 1);
416 tunnel_key_release_params(params);
417 }
418
419 static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
420 const struct ip_tunnel_info *info)
421 {
422 int len = info->options_len;
423 u8 *src = (u8 *)(info + 1);
424 struct nlattr *start;
425
426 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
427 if (!start)
428 return -EMSGSIZE;
429
430 while (len > 0) {
431 struct geneve_opt *opt = (struct geneve_opt *)src;
432
433 if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
434 opt->opt_class) ||
435 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
436 opt->type) ||
437 nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
438 opt->length * 4, opt + 1)) {
439 nla_nest_cancel(skb, start);
440 return -EMSGSIZE;
441 }
442
443 len -= sizeof(struct geneve_opt) + opt->length * 4;
444 src += sizeof(struct geneve_opt) + opt->length * 4;
445 }
446
447 nla_nest_end(skb, start);
448 return 0;
449 }
450
451 static int tunnel_key_opts_dump(struct sk_buff *skb,
452 const struct ip_tunnel_info *info)
453 {
454 struct nlattr *start;
455 int err = -EINVAL;
456
457 if (!info->options_len)
458 return 0;
459
460 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS);
461 if (!start)
462 return -EMSGSIZE;
463
464 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
465 err = tunnel_key_geneve_opts_dump(skb, info);
466 if (err)
467 goto err_out;
468 } else {
469 err_out:
470 nla_nest_cancel(skb, start);
471 return err;
472 }
473
474 nla_nest_end(skb, start);
475 return 0;
476 }
477
478 static int tunnel_key_dump_addresses(struct sk_buff *skb,
479 const struct ip_tunnel_info *info)
480 {
481 unsigned short family = ip_tunnel_info_af(info);
482
483 if (family == AF_INET) {
484 __be32 saddr = info->key.u.ipv4.src;
485 __be32 daddr = info->key.u.ipv4.dst;
486
487 if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
488 !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
489 return 0;
490 }
491
492 if (family == AF_INET6) {
493 const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
494 const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
495
496 if (!nla_put_in6_addr(skb,
497 TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
498 !nla_put_in6_addr(skb,
499 TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
500 return 0;
501 }
502
503 return -EINVAL;
504 }
505
506 static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
507 int bind, int ref)
508 {
509 unsigned char *b = skb_tail_pointer(skb);
510 struct tcf_tunnel_key *t = to_tunnel_key(a);
511 struct tcf_tunnel_key_params *params;
512 struct tc_tunnel_key opt = {
513 .index = t->tcf_index,
514 .refcnt = refcount_read(&t->tcf_refcnt) - ref,
515 .bindcnt = atomic_read(&t->tcf_bindcnt) - bind,
516 };
517 struct tcf_t tm;
518
519 spin_lock_bh(&t->tcf_lock);
520 params = rcu_dereference_protected(t->params,
521 lockdep_is_held(&t->tcf_lock));
522 opt.action = t->tcf_action;
523 opt.t_action = params->tcft_action;
524
525 if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
526 goto nla_put_failure;
527
528 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
529 struct ip_tunnel_info *info =
530 &params->tcft_enc_metadata->u.tun_info;
531 struct ip_tunnel_key *key = &info->key;
532 __be32 key_id = tunnel_id_to_key32(key->tun_id);
533
534 if (((key->tun_flags & TUNNEL_KEY) &&
535 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
536 tunnel_key_dump_addresses(skb,
537 &params->tcft_enc_metadata->u.tun_info) ||
538 (key->tp_dst &&
539 nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
540 key->tp_dst)) ||
541 nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
542 !(key->tun_flags & TUNNEL_CSUM)) ||
543 tunnel_key_opts_dump(skb, info))
544 goto nla_put_failure;
545
546 if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
547 goto nla_put_failure;
548
549 if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
550 goto nla_put_failure;
551 }
552
553 tcf_tm_dump(&tm, &t->tcf_tm);
554 if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
555 &tm, TCA_TUNNEL_KEY_PAD))
556 goto nla_put_failure;
557 spin_unlock_bh(&t->tcf_lock);
558
559 return skb->len;
560
561 nla_put_failure:
562 spin_unlock_bh(&t->tcf_lock);
563 nlmsg_trim(skb, b);
564 return -1;
565 }
566
567 static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
568 struct netlink_callback *cb, int type,
569 const struct tc_action_ops *ops,
570 struct netlink_ext_ack *extack)
571 {
572 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
573
574 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
575 }
576
577 static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
578 {
579 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
580
581 return tcf_idr_search(tn, a, index);
582 }
583
584 static struct tc_action_ops act_tunnel_key_ops = {
585 .kind = "tunnel_key",
586 .id = TCA_ID_TUNNEL_KEY,
587 .owner = THIS_MODULE,
588 .act = tunnel_key_act,
589 .dump = tunnel_key_dump,
590 .init = tunnel_key_init,
591 .cleanup = tunnel_key_release,
592 .walk = tunnel_key_walker,
593 .lookup = tunnel_key_search,
594 .size = sizeof(struct tcf_tunnel_key),
595 };
596
597 static __net_init int tunnel_key_init_net(struct net *net)
598 {
599 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
600
601 return tc_action_net_init(tn, &act_tunnel_key_ops);
602 }
603
604 static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
605 {
606 tc_action_net_exit(net_list, tunnel_key_net_id);
607 }
608
609 static struct pernet_operations tunnel_key_net_ops = {
610 .init = tunnel_key_init_net,
611 .exit_batch = tunnel_key_exit_net,
612 .id = &tunnel_key_net_id,
613 .size = sizeof(struct tc_action_net),
614 };
615
616 static int __init tunnel_key_init_module(void)
617 {
618 return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
619 }
620
621 static void __exit tunnel_key_cleanup_module(void)
622 {
623 tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
624 }
625
626 module_init(tunnel_key_init_module);
627 module_exit(tunnel_key_cleanup_module);
628
629 MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
630 MODULE_DESCRIPTION("ip tunnel manipulation actions");
631 MODULE_LICENSE("GPL v2");