]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/sched/act_tunnel_key.c
Merge tag 's390-5.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[mirror_ubuntu-hirsute-kernel.git] / net / sched / act_tunnel_key.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
5 */
6
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <net/geneve.h>
13 #include <net/netlink.h>
14 #include <net/pkt_sched.h>
15 #include <net/dst.h>
16 #include <net/pkt_cls.h>
17
18 #include <linux/tc_act/tc_tunnel_key.h>
19 #include <net/tc_act/tc_tunnel_key.h>
20
21 static unsigned int tunnel_key_net_id;
22 static struct tc_action_ops act_tunnel_key_ops;
23
24 static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
25 struct tcf_result *res)
26 {
27 struct tcf_tunnel_key *t = to_tunnel_key(a);
28 struct tcf_tunnel_key_params *params;
29 int action;
30
31 params = rcu_dereference_bh(t->params);
32
33 tcf_lastuse_update(&t->tcf_tm);
34 bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
35 action = READ_ONCE(t->tcf_action);
36
37 switch (params->tcft_action) {
38 case TCA_TUNNEL_KEY_ACT_RELEASE:
39 skb_dst_drop(skb);
40 break;
41 case TCA_TUNNEL_KEY_ACT_SET:
42 skb_dst_drop(skb);
43 skb_dst_set(skb, dst_clone(&params->tcft_enc_metadata->dst));
44 break;
45 default:
46 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
47 params->tcft_action);
48 break;
49 }
50
51 return action;
52 }
53
54 static const struct nla_policy
55 enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
56 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
57 };
58
59 static const struct nla_policy
60 geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
61 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
62 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
63 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
64 .len = 128 },
65 };
66
67 static int
68 tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
69 struct netlink_ext_ack *extack)
70 {
71 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
72 int err, data_len, opt_len;
73 u8 *data;
74
75 err = nla_parse_nested_deprecated(tb,
76 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
77 nla, geneve_opt_policy, extack);
78 if (err < 0)
79 return err;
80
81 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
82 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
83 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
84 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
85 return -EINVAL;
86 }
87
88 data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
89 data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
90 if (data_len < 4) {
91 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
92 return -ERANGE;
93 }
94 if (data_len % 4) {
95 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
96 return -ERANGE;
97 }
98
99 opt_len = sizeof(struct geneve_opt) + data_len;
100 if (dst) {
101 struct geneve_opt *opt = dst;
102
103 WARN_ON(dst_len < opt_len);
104
105 opt->opt_class =
106 nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
107 opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
108 opt->length = data_len / 4; /* length is in units of 4 bytes */
109 opt->r1 = 0;
110 opt->r2 = 0;
111 opt->r3 = 0;
112
113 memcpy(opt + 1, data, data_len);
114 }
115
116 return opt_len;
117 }
118
119 static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
120 int dst_len, struct netlink_ext_ack *extack)
121 {
122 int err, rem, opt_len, len = nla_len(nla), opts_len = 0;
123 const struct nlattr *attr, *head = nla_data(nla);
124
125 err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
126 enc_opts_policy, extack);
127 if (err)
128 return err;
129
130 nla_for_each_attr(attr, head, len, rem) {
131 switch (nla_type(attr)) {
132 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
133 opt_len = tunnel_key_copy_geneve_opt(attr, dst,
134 dst_len, extack);
135 if (opt_len < 0)
136 return opt_len;
137 opts_len += opt_len;
138 if (dst) {
139 dst_len -= opt_len;
140 dst += opt_len;
141 }
142 break;
143 }
144 }
145
146 if (!opts_len) {
147 NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
148 return -EINVAL;
149 }
150
151 if (rem > 0) {
152 NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
153 return -EINVAL;
154 }
155
156 return opts_len;
157 }
158
159 static int tunnel_key_get_opts_len(struct nlattr *nla,
160 struct netlink_ext_ack *extack)
161 {
162 return tunnel_key_copy_opts(nla, NULL, 0, extack);
163 }
164
165 static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
166 int opts_len, struct netlink_ext_ack *extack)
167 {
168 info->options_len = opts_len;
169 switch (nla_type(nla_data(nla))) {
170 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
171 #if IS_ENABLED(CONFIG_INET)
172 info->key.tun_flags |= TUNNEL_GENEVE_OPT;
173 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
174 opts_len, extack);
175 #else
176 return -EAFNOSUPPORT;
177 #endif
178 default:
179 NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
180 return -EINVAL;
181 }
182 }
183
184 static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
185 [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
186 [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
187 [TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
188 [TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
189 [TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
190 [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
191 [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
192 [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 },
193 [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED },
194 [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 },
195 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
196 };
197
198 static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
199 {
200 if (!p)
201 return;
202 if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
203 dst_release(&p->tcft_enc_metadata->dst);
204
205 kfree_rcu(p, rcu);
206 }
207
208 static int tunnel_key_init(struct net *net, struct nlattr *nla,
209 struct nlattr *est, struct tc_action **a,
210 int ovr, int bind, bool rtnl_held,
211 struct tcf_proto *tp,
212 struct netlink_ext_ack *extack)
213 {
214 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
215 struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
216 struct tcf_tunnel_key_params *params_new;
217 struct metadata_dst *metadata = NULL;
218 struct tcf_chain *goto_ch = NULL;
219 struct tc_tunnel_key *parm;
220 struct tcf_tunnel_key *t;
221 bool exists = false;
222 __be16 dst_port = 0;
223 __be64 key_id = 0;
224 int opts_len = 0;
225 __be16 flags = 0;
226 u8 tos, ttl;
227 int ret = 0;
228 u32 index;
229 int err;
230
231 if (!nla) {
232 NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
233 return -EINVAL;
234 }
235
236 err = nla_parse_nested_deprecated(tb, TCA_TUNNEL_KEY_MAX, nla,
237 tunnel_key_policy, extack);
238 if (err < 0) {
239 NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
240 return err;
241 }
242
243 if (!tb[TCA_TUNNEL_KEY_PARMS]) {
244 NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
245 return -EINVAL;
246 }
247
248 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
249 index = parm->index;
250 err = tcf_idr_check_alloc(tn, &index, a, bind);
251 if (err < 0)
252 return err;
253 exists = err;
254 if (exists && bind)
255 return 0;
256
257 switch (parm->t_action) {
258 case TCA_TUNNEL_KEY_ACT_RELEASE:
259 break;
260 case TCA_TUNNEL_KEY_ACT_SET:
261 if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
262 __be32 key32;
263
264 key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
265 key_id = key32_to_tunnel_id(key32);
266 flags = TUNNEL_KEY;
267 }
268
269 flags |= TUNNEL_CSUM;
270 if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
271 nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
272 flags &= ~TUNNEL_CSUM;
273
274 if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
275 dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
276
277 if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
278 opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
279 extack);
280 if (opts_len < 0) {
281 ret = opts_len;
282 goto err_out;
283 }
284 }
285
286 tos = 0;
287 if (tb[TCA_TUNNEL_KEY_ENC_TOS])
288 tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
289 ttl = 0;
290 if (tb[TCA_TUNNEL_KEY_ENC_TTL])
291 ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
292
293 if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
294 tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
295 __be32 saddr;
296 __be32 daddr;
297
298 saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
299 daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
300
301 metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
302 dst_port, flags,
303 key_id, opts_len);
304 } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
305 tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
306 struct in6_addr saddr;
307 struct in6_addr daddr;
308
309 saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
310 daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
311
312 metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
313 0, flags,
314 key_id, 0);
315 } else {
316 NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
317 ret = -EINVAL;
318 goto err_out;
319 }
320
321 if (!metadata) {
322 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
323 ret = -ENOMEM;
324 goto err_out;
325 }
326
327 #ifdef CONFIG_DST_CACHE
328 ret = dst_cache_init(&metadata->u.tun_info.dst_cache, GFP_KERNEL);
329 if (ret)
330 goto release_tun_meta;
331 #endif
332
333 if (opts_len) {
334 ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
335 &metadata->u.tun_info,
336 opts_len, extack);
337 if (ret < 0)
338 goto release_tun_meta;
339 }
340
341 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
342 break;
343 default:
344 NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
345 ret = -EINVAL;
346 goto err_out;
347 }
348
349 if (!exists) {
350 ret = tcf_idr_create(tn, index, est, a,
351 &act_tunnel_key_ops, bind, true);
352 if (ret) {
353 NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
354 goto release_tun_meta;
355 }
356
357 ret = ACT_P_CREATED;
358 } else if (!ovr) {
359 NL_SET_ERR_MSG(extack, "TC IDR already exists");
360 ret = -EEXIST;
361 goto release_tun_meta;
362 }
363
364 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
365 if (err < 0) {
366 ret = err;
367 exists = true;
368 goto release_tun_meta;
369 }
370 t = to_tunnel_key(*a);
371
372 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
373 if (unlikely(!params_new)) {
374 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
375 ret = -ENOMEM;
376 exists = true;
377 goto put_chain;
378 }
379 params_new->tcft_action = parm->t_action;
380 params_new->tcft_enc_metadata = metadata;
381
382 spin_lock_bh(&t->tcf_lock);
383 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
384 rcu_swap_protected(t->params, params_new,
385 lockdep_is_held(&t->tcf_lock));
386 spin_unlock_bh(&t->tcf_lock);
387 tunnel_key_release_params(params_new);
388 if (goto_ch)
389 tcf_chain_put_by_act(goto_ch);
390
391 if (ret == ACT_P_CREATED)
392 tcf_idr_insert(tn, *a);
393
394 return ret;
395
396 put_chain:
397 if (goto_ch)
398 tcf_chain_put_by_act(goto_ch);
399
400 release_tun_meta:
401 if (metadata)
402 dst_release(&metadata->dst);
403
404 err_out:
405 if (exists)
406 tcf_idr_release(*a, bind);
407 else
408 tcf_idr_cleanup(tn, index);
409 return ret;
410 }
411
412 static void tunnel_key_release(struct tc_action *a)
413 {
414 struct tcf_tunnel_key *t = to_tunnel_key(a);
415 struct tcf_tunnel_key_params *params;
416
417 params = rcu_dereference_protected(t->params, 1);
418 tunnel_key_release_params(params);
419 }
420
421 static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
422 const struct ip_tunnel_info *info)
423 {
424 int len = info->options_len;
425 u8 *src = (u8 *)(info + 1);
426 struct nlattr *start;
427
428 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
429 if (!start)
430 return -EMSGSIZE;
431
432 while (len > 0) {
433 struct geneve_opt *opt = (struct geneve_opt *)src;
434
435 if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
436 opt->opt_class) ||
437 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
438 opt->type) ||
439 nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
440 opt->length * 4, opt + 1)) {
441 nla_nest_cancel(skb, start);
442 return -EMSGSIZE;
443 }
444
445 len -= sizeof(struct geneve_opt) + opt->length * 4;
446 src += sizeof(struct geneve_opt) + opt->length * 4;
447 }
448
449 nla_nest_end(skb, start);
450 return 0;
451 }
452
453 static int tunnel_key_opts_dump(struct sk_buff *skb,
454 const struct ip_tunnel_info *info)
455 {
456 struct nlattr *start;
457 int err = -EINVAL;
458
459 if (!info->options_len)
460 return 0;
461
462 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS);
463 if (!start)
464 return -EMSGSIZE;
465
466 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
467 err = tunnel_key_geneve_opts_dump(skb, info);
468 if (err)
469 goto err_out;
470 } else {
471 err_out:
472 nla_nest_cancel(skb, start);
473 return err;
474 }
475
476 nla_nest_end(skb, start);
477 return 0;
478 }
479
480 static int tunnel_key_dump_addresses(struct sk_buff *skb,
481 const struct ip_tunnel_info *info)
482 {
483 unsigned short family = ip_tunnel_info_af(info);
484
485 if (family == AF_INET) {
486 __be32 saddr = info->key.u.ipv4.src;
487 __be32 daddr = info->key.u.ipv4.dst;
488
489 if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
490 !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
491 return 0;
492 }
493
494 if (family == AF_INET6) {
495 const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
496 const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
497
498 if (!nla_put_in6_addr(skb,
499 TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
500 !nla_put_in6_addr(skb,
501 TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
502 return 0;
503 }
504
505 return -EINVAL;
506 }
507
508 static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
509 int bind, int ref)
510 {
511 unsigned char *b = skb_tail_pointer(skb);
512 struct tcf_tunnel_key *t = to_tunnel_key(a);
513 struct tcf_tunnel_key_params *params;
514 struct tc_tunnel_key opt = {
515 .index = t->tcf_index,
516 .refcnt = refcount_read(&t->tcf_refcnt) - ref,
517 .bindcnt = atomic_read(&t->tcf_bindcnt) - bind,
518 };
519 struct tcf_t tm;
520
521 spin_lock_bh(&t->tcf_lock);
522 params = rcu_dereference_protected(t->params,
523 lockdep_is_held(&t->tcf_lock));
524 opt.action = t->tcf_action;
525 opt.t_action = params->tcft_action;
526
527 if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
528 goto nla_put_failure;
529
530 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
531 struct ip_tunnel_info *info =
532 &params->tcft_enc_metadata->u.tun_info;
533 struct ip_tunnel_key *key = &info->key;
534 __be32 key_id = tunnel_id_to_key32(key->tun_id);
535
536 if (((key->tun_flags & TUNNEL_KEY) &&
537 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
538 tunnel_key_dump_addresses(skb,
539 &params->tcft_enc_metadata->u.tun_info) ||
540 (key->tp_dst &&
541 nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
542 key->tp_dst)) ||
543 nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
544 !(key->tun_flags & TUNNEL_CSUM)) ||
545 tunnel_key_opts_dump(skb, info))
546 goto nla_put_failure;
547
548 if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
549 goto nla_put_failure;
550
551 if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
552 goto nla_put_failure;
553 }
554
555 tcf_tm_dump(&tm, &t->tcf_tm);
556 if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
557 &tm, TCA_TUNNEL_KEY_PAD))
558 goto nla_put_failure;
559 spin_unlock_bh(&t->tcf_lock);
560
561 return skb->len;
562
563 nla_put_failure:
564 spin_unlock_bh(&t->tcf_lock);
565 nlmsg_trim(skb, b);
566 return -1;
567 }
568
569 static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
570 struct netlink_callback *cb, int type,
571 const struct tc_action_ops *ops,
572 struct netlink_ext_ack *extack)
573 {
574 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
575
576 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
577 }
578
579 static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
580 {
581 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
582
583 return tcf_idr_search(tn, a, index);
584 }
585
586 static struct tc_action_ops act_tunnel_key_ops = {
587 .kind = "tunnel_key",
588 .id = TCA_ID_TUNNEL_KEY,
589 .owner = THIS_MODULE,
590 .act = tunnel_key_act,
591 .dump = tunnel_key_dump,
592 .init = tunnel_key_init,
593 .cleanup = tunnel_key_release,
594 .walk = tunnel_key_walker,
595 .lookup = tunnel_key_search,
596 .size = sizeof(struct tcf_tunnel_key),
597 };
598
599 static __net_init int tunnel_key_init_net(struct net *net)
600 {
601 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
602
603 return tc_action_net_init(net, tn, &act_tunnel_key_ops);
604 }
605
606 static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
607 {
608 tc_action_net_exit(net_list, tunnel_key_net_id);
609 }
610
611 static struct pernet_operations tunnel_key_net_ops = {
612 .init = tunnel_key_init_net,
613 .exit_batch = tunnel_key_exit_net,
614 .id = &tunnel_key_net_id,
615 .size = sizeof(struct tc_action_net),
616 };
617
618 static int __init tunnel_key_init_module(void)
619 {
620 return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
621 }
622
623 static void __exit tunnel_key_cleanup_module(void)
624 {
625 tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
626 }
627
628 module_init(tunnel_key_init_module);
629 module_exit(tunnel_key_cleanup_module);
630
631 MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
632 MODULE_DESCRIPTION("ip tunnel manipulation actions");
633 MODULE_LICENSE("GPL v2");