]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/ipv6/ioam6.c
net/tls: fix slab-out-of-bounds bug in decrypt_internal
[mirror_ubuntu-jammy-kernel.git] / net / ipv6 / ioam6.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * IPv6 IOAM implementation
4 *
5 * Author:
6 * Justin Iurman <justin.iurman@uliege.be>
7 */
8
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/net.h>
13 #include <linux/ioam6.h>
14 #include <linux/ioam6_genl.h>
15 #include <linux/rhashtable.h>
16
17 #include <net/addrconf.h>
18 #include <net/genetlink.h>
19 #include <net/ioam6.h>
20
21 static void ioam6_ns_release(struct ioam6_namespace *ns)
22 {
23 kfree_rcu(ns, rcu);
24 }
25
26 static void ioam6_sc_release(struct ioam6_schema *sc)
27 {
28 kfree_rcu(sc, rcu);
29 }
30
31 static void ioam6_free_ns(void *ptr, void *arg)
32 {
33 struct ioam6_namespace *ns = (struct ioam6_namespace *)ptr;
34
35 if (ns)
36 ioam6_ns_release(ns);
37 }
38
39 static void ioam6_free_sc(void *ptr, void *arg)
40 {
41 struct ioam6_schema *sc = (struct ioam6_schema *)ptr;
42
43 if (sc)
44 ioam6_sc_release(sc);
45 }
46
47 static int ioam6_ns_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
48 {
49 const struct ioam6_namespace *ns = obj;
50
51 return (ns->id != *(__be16 *)arg->key);
52 }
53
54 static int ioam6_sc_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
55 {
56 const struct ioam6_schema *sc = obj;
57
58 return (sc->id != *(u32 *)arg->key);
59 }
60
61 static const struct rhashtable_params rht_ns_params = {
62 .key_len = sizeof(__be16),
63 .key_offset = offsetof(struct ioam6_namespace, id),
64 .head_offset = offsetof(struct ioam6_namespace, head),
65 .automatic_shrinking = true,
66 .obj_cmpfn = ioam6_ns_cmpfn,
67 };
68
69 static const struct rhashtable_params rht_sc_params = {
70 .key_len = sizeof(u32),
71 .key_offset = offsetof(struct ioam6_schema, id),
72 .head_offset = offsetof(struct ioam6_schema, head),
73 .automatic_shrinking = true,
74 .obj_cmpfn = ioam6_sc_cmpfn,
75 };
76
77 static struct genl_family ioam6_genl_family;
78
79 static const struct nla_policy ioam6_genl_policy_addns[] = {
80 [IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
81 [IOAM6_ATTR_NS_DATA] = { .type = NLA_U32 },
82 [IOAM6_ATTR_NS_DATA_WIDE] = { .type = NLA_U64 },
83 };
84
85 static const struct nla_policy ioam6_genl_policy_delns[] = {
86 [IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
87 };
88
89 static const struct nla_policy ioam6_genl_policy_addsc[] = {
90 [IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
91 [IOAM6_ATTR_SC_DATA] = { .type = NLA_BINARY,
92 .len = IOAM6_MAX_SCHEMA_DATA_LEN },
93 };
94
95 static const struct nla_policy ioam6_genl_policy_delsc[] = {
96 [IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
97 };
98
99 static const struct nla_policy ioam6_genl_policy_ns_sc[] = {
100 [IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
101 [IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
102 [IOAM6_ATTR_SC_NONE] = { .type = NLA_FLAG },
103 };
104
105 static int ioam6_genl_addns(struct sk_buff *skb, struct genl_info *info)
106 {
107 struct ioam6_pernet_data *nsdata;
108 struct ioam6_namespace *ns;
109 u64 data64;
110 u32 data32;
111 __be16 id;
112 int err;
113
114 if (!info->attrs[IOAM6_ATTR_NS_ID])
115 return -EINVAL;
116
117 id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
118 nsdata = ioam6_pernet(genl_info_net(info));
119
120 mutex_lock(&nsdata->lock);
121
122 ns = rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
123 if (ns) {
124 err = -EEXIST;
125 goto out_unlock;
126 }
127
128 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
129 if (!ns) {
130 err = -ENOMEM;
131 goto out_unlock;
132 }
133
134 ns->id = id;
135
136 if (!info->attrs[IOAM6_ATTR_NS_DATA])
137 data32 = IOAM6_U32_UNAVAILABLE;
138 else
139 data32 = nla_get_u32(info->attrs[IOAM6_ATTR_NS_DATA]);
140
141 if (!info->attrs[IOAM6_ATTR_NS_DATA_WIDE])
142 data64 = IOAM6_U64_UNAVAILABLE;
143 else
144 data64 = nla_get_u64(info->attrs[IOAM6_ATTR_NS_DATA_WIDE]);
145
146 ns->data = cpu_to_be32(data32);
147 ns->data_wide = cpu_to_be64(data64);
148
149 err = rhashtable_lookup_insert_fast(&nsdata->namespaces, &ns->head,
150 rht_ns_params);
151 if (err)
152 kfree(ns);
153
154 out_unlock:
155 mutex_unlock(&nsdata->lock);
156 return err;
157 }
158
159 static int ioam6_genl_delns(struct sk_buff *skb, struct genl_info *info)
160 {
161 struct ioam6_pernet_data *nsdata;
162 struct ioam6_namespace *ns;
163 struct ioam6_schema *sc;
164 __be16 id;
165 int err;
166
167 if (!info->attrs[IOAM6_ATTR_NS_ID])
168 return -EINVAL;
169
170 id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
171 nsdata = ioam6_pernet(genl_info_net(info));
172
173 mutex_lock(&nsdata->lock);
174
175 ns = rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
176 if (!ns) {
177 err = -ENOENT;
178 goto out_unlock;
179 }
180
181 sc = rcu_dereference_protected(ns->schema,
182 lockdep_is_held(&nsdata->lock));
183
184 err = rhashtable_remove_fast(&nsdata->namespaces, &ns->head,
185 rht_ns_params);
186 if (err)
187 goto out_unlock;
188
189 if (sc)
190 rcu_assign_pointer(sc->ns, NULL);
191
192 ioam6_ns_release(ns);
193
194 out_unlock:
195 mutex_unlock(&nsdata->lock);
196 return err;
197 }
198
199 static int __ioam6_genl_dumpns_element(struct ioam6_namespace *ns,
200 u32 portid,
201 u32 seq,
202 u32 flags,
203 struct sk_buff *skb,
204 u8 cmd)
205 {
206 struct ioam6_schema *sc;
207 u64 data64;
208 u32 data32;
209 void *hdr;
210
211 hdr = genlmsg_put(skb, portid, seq, &ioam6_genl_family, flags, cmd);
212 if (!hdr)
213 return -ENOMEM;
214
215 data32 = be32_to_cpu(ns->data);
216 data64 = be64_to_cpu(ns->data_wide);
217
218 if (nla_put_u16(skb, IOAM6_ATTR_NS_ID, be16_to_cpu(ns->id)) ||
219 (data32 != IOAM6_U32_UNAVAILABLE &&
220 nla_put_u32(skb, IOAM6_ATTR_NS_DATA, data32)) ||
221 (data64 != IOAM6_U64_UNAVAILABLE &&
222 nla_put_u64_64bit(skb, IOAM6_ATTR_NS_DATA_WIDE,
223 data64, IOAM6_ATTR_PAD)))
224 goto nla_put_failure;
225
226 rcu_read_lock();
227
228 sc = rcu_dereference(ns->schema);
229 if (sc && nla_put_u32(skb, IOAM6_ATTR_SC_ID, sc->id)) {
230 rcu_read_unlock();
231 goto nla_put_failure;
232 }
233
234 rcu_read_unlock();
235
236 genlmsg_end(skb, hdr);
237 return 0;
238
239 nla_put_failure:
240 genlmsg_cancel(skb, hdr);
241 return -EMSGSIZE;
242 }
243
244 static int ioam6_genl_dumpns_start(struct netlink_callback *cb)
245 {
246 struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
247 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
248
249 if (!iter) {
250 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
251 if (!iter)
252 return -ENOMEM;
253
254 cb->args[0] = (long)iter;
255 }
256
257 rhashtable_walk_enter(&nsdata->namespaces, iter);
258
259 return 0;
260 }
261
262 static int ioam6_genl_dumpns_done(struct netlink_callback *cb)
263 {
264 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
265
266 rhashtable_walk_exit(iter);
267 kfree(iter);
268
269 return 0;
270 }
271
272 static int ioam6_genl_dumpns(struct sk_buff *skb, struct netlink_callback *cb)
273 {
274 struct rhashtable_iter *iter;
275 struct ioam6_namespace *ns;
276 int err;
277
278 iter = (struct rhashtable_iter *)cb->args[0];
279 rhashtable_walk_start(iter);
280
281 for (;;) {
282 ns = rhashtable_walk_next(iter);
283
284 if (IS_ERR(ns)) {
285 if (PTR_ERR(ns) == -EAGAIN)
286 continue;
287 err = PTR_ERR(ns);
288 goto done;
289 } else if (!ns) {
290 break;
291 }
292
293 err = __ioam6_genl_dumpns_element(ns,
294 NETLINK_CB(cb->skb).portid,
295 cb->nlh->nlmsg_seq,
296 NLM_F_MULTI,
297 skb,
298 IOAM6_CMD_DUMP_NAMESPACES);
299 if (err)
300 goto done;
301 }
302
303 err = skb->len;
304
305 done:
306 rhashtable_walk_stop(iter);
307 return err;
308 }
309
310 static int ioam6_genl_addsc(struct sk_buff *skb, struct genl_info *info)
311 {
312 struct ioam6_pernet_data *nsdata;
313 int len, len_aligned, err;
314 struct ioam6_schema *sc;
315 u32 id;
316
317 if (!info->attrs[IOAM6_ATTR_SC_ID] || !info->attrs[IOAM6_ATTR_SC_DATA])
318 return -EINVAL;
319
320 id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
321 nsdata = ioam6_pernet(genl_info_net(info));
322
323 mutex_lock(&nsdata->lock);
324
325 sc = rhashtable_lookup_fast(&nsdata->schemas, &id, rht_sc_params);
326 if (sc) {
327 err = -EEXIST;
328 goto out_unlock;
329 }
330
331 len = nla_len(info->attrs[IOAM6_ATTR_SC_DATA]);
332 len_aligned = ALIGN(len, 4);
333
334 sc = kzalloc(sizeof(*sc) + len_aligned, GFP_KERNEL);
335 if (!sc) {
336 err = -ENOMEM;
337 goto out_unlock;
338 }
339
340 sc->id = id;
341 sc->len = len_aligned;
342 sc->hdr = cpu_to_be32(sc->id | ((u8)(sc->len / 4) << 24));
343 nla_memcpy(sc->data, info->attrs[IOAM6_ATTR_SC_DATA], len);
344
345 err = rhashtable_lookup_insert_fast(&nsdata->schemas, &sc->head,
346 rht_sc_params);
347 if (err)
348 goto free_sc;
349
350 out_unlock:
351 mutex_unlock(&nsdata->lock);
352 return err;
353 free_sc:
354 kfree(sc);
355 goto out_unlock;
356 }
357
358 static int ioam6_genl_delsc(struct sk_buff *skb, struct genl_info *info)
359 {
360 struct ioam6_pernet_data *nsdata;
361 struct ioam6_namespace *ns;
362 struct ioam6_schema *sc;
363 int err;
364 u32 id;
365
366 if (!info->attrs[IOAM6_ATTR_SC_ID])
367 return -EINVAL;
368
369 id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
370 nsdata = ioam6_pernet(genl_info_net(info));
371
372 mutex_lock(&nsdata->lock);
373
374 sc = rhashtable_lookup_fast(&nsdata->schemas, &id, rht_sc_params);
375 if (!sc) {
376 err = -ENOENT;
377 goto out_unlock;
378 }
379
380 ns = rcu_dereference_protected(sc->ns, lockdep_is_held(&nsdata->lock));
381
382 err = rhashtable_remove_fast(&nsdata->schemas, &sc->head,
383 rht_sc_params);
384 if (err)
385 goto out_unlock;
386
387 if (ns)
388 rcu_assign_pointer(ns->schema, NULL);
389
390 ioam6_sc_release(sc);
391
392 out_unlock:
393 mutex_unlock(&nsdata->lock);
394 return err;
395 }
396
397 static int __ioam6_genl_dumpsc_element(struct ioam6_schema *sc,
398 u32 portid, u32 seq, u32 flags,
399 struct sk_buff *skb, u8 cmd)
400 {
401 struct ioam6_namespace *ns;
402 void *hdr;
403
404 hdr = genlmsg_put(skb, portid, seq, &ioam6_genl_family, flags, cmd);
405 if (!hdr)
406 return -ENOMEM;
407
408 if (nla_put_u32(skb, IOAM6_ATTR_SC_ID, sc->id) ||
409 nla_put(skb, IOAM6_ATTR_SC_DATA, sc->len, sc->data))
410 goto nla_put_failure;
411
412 rcu_read_lock();
413
414 ns = rcu_dereference(sc->ns);
415 if (ns && nla_put_u16(skb, IOAM6_ATTR_NS_ID, be16_to_cpu(ns->id))) {
416 rcu_read_unlock();
417 goto nla_put_failure;
418 }
419
420 rcu_read_unlock();
421
422 genlmsg_end(skb, hdr);
423 return 0;
424
425 nla_put_failure:
426 genlmsg_cancel(skb, hdr);
427 return -EMSGSIZE;
428 }
429
430 static int ioam6_genl_dumpsc_start(struct netlink_callback *cb)
431 {
432 struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
433 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
434
435 if (!iter) {
436 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
437 if (!iter)
438 return -ENOMEM;
439
440 cb->args[0] = (long)iter;
441 }
442
443 rhashtable_walk_enter(&nsdata->schemas, iter);
444
445 return 0;
446 }
447
448 static int ioam6_genl_dumpsc_done(struct netlink_callback *cb)
449 {
450 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
451
452 rhashtable_walk_exit(iter);
453 kfree(iter);
454
455 return 0;
456 }
457
458 static int ioam6_genl_dumpsc(struct sk_buff *skb, struct netlink_callback *cb)
459 {
460 struct rhashtable_iter *iter;
461 struct ioam6_schema *sc;
462 int err;
463
464 iter = (struct rhashtable_iter *)cb->args[0];
465 rhashtable_walk_start(iter);
466
467 for (;;) {
468 sc = rhashtable_walk_next(iter);
469
470 if (IS_ERR(sc)) {
471 if (PTR_ERR(sc) == -EAGAIN)
472 continue;
473 err = PTR_ERR(sc);
474 goto done;
475 } else if (!sc) {
476 break;
477 }
478
479 err = __ioam6_genl_dumpsc_element(sc,
480 NETLINK_CB(cb->skb).portid,
481 cb->nlh->nlmsg_seq,
482 NLM_F_MULTI,
483 skb,
484 IOAM6_CMD_DUMP_SCHEMAS);
485 if (err)
486 goto done;
487 }
488
489 err = skb->len;
490
491 done:
492 rhashtable_walk_stop(iter);
493 return err;
494 }
495
496 static int ioam6_genl_ns_set_schema(struct sk_buff *skb, struct genl_info *info)
497 {
498 struct ioam6_namespace *ns, *ns_ref;
499 struct ioam6_schema *sc, *sc_ref;
500 struct ioam6_pernet_data *nsdata;
501 __be16 ns_id;
502 u32 sc_id;
503 int err;
504
505 if (!info->attrs[IOAM6_ATTR_NS_ID] ||
506 (!info->attrs[IOAM6_ATTR_SC_ID] &&
507 !info->attrs[IOAM6_ATTR_SC_NONE]))
508 return -EINVAL;
509
510 ns_id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
511 nsdata = ioam6_pernet(genl_info_net(info));
512
513 mutex_lock(&nsdata->lock);
514
515 ns = rhashtable_lookup_fast(&nsdata->namespaces, &ns_id, rht_ns_params);
516 if (!ns) {
517 err = -ENOENT;
518 goto out_unlock;
519 }
520
521 if (info->attrs[IOAM6_ATTR_SC_NONE]) {
522 sc = NULL;
523 } else {
524 sc_id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
525 sc = rhashtable_lookup_fast(&nsdata->schemas, &sc_id,
526 rht_sc_params);
527 if (!sc) {
528 err = -ENOENT;
529 goto out_unlock;
530 }
531 }
532
533 sc_ref = rcu_dereference_protected(ns->schema,
534 lockdep_is_held(&nsdata->lock));
535 if (sc_ref)
536 rcu_assign_pointer(sc_ref->ns, NULL);
537 rcu_assign_pointer(ns->schema, sc);
538
539 if (sc) {
540 ns_ref = rcu_dereference_protected(sc->ns,
541 lockdep_is_held(&nsdata->lock));
542 if (ns_ref)
543 rcu_assign_pointer(ns_ref->schema, NULL);
544 rcu_assign_pointer(sc->ns, ns);
545 }
546
547 err = 0;
548
549 out_unlock:
550 mutex_unlock(&nsdata->lock);
551 return err;
552 }
553
554 static const struct genl_ops ioam6_genl_ops[] = {
555 {
556 .cmd = IOAM6_CMD_ADD_NAMESPACE,
557 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
558 .doit = ioam6_genl_addns,
559 .flags = GENL_ADMIN_PERM,
560 .policy = ioam6_genl_policy_addns,
561 .maxattr = ARRAY_SIZE(ioam6_genl_policy_addns) - 1,
562 },
563 {
564 .cmd = IOAM6_CMD_DEL_NAMESPACE,
565 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
566 .doit = ioam6_genl_delns,
567 .flags = GENL_ADMIN_PERM,
568 .policy = ioam6_genl_policy_delns,
569 .maxattr = ARRAY_SIZE(ioam6_genl_policy_delns) - 1,
570 },
571 {
572 .cmd = IOAM6_CMD_DUMP_NAMESPACES,
573 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
574 .start = ioam6_genl_dumpns_start,
575 .dumpit = ioam6_genl_dumpns,
576 .done = ioam6_genl_dumpns_done,
577 .flags = GENL_ADMIN_PERM,
578 },
579 {
580 .cmd = IOAM6_CMD_ADD_SCHEMA,
581 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
582 .doit = ioam6_genl_addsc,
583 .flags = GENL_ADMIN_PERM,
584 .policy = ioam6_genl_policy_addsc,
585 .maxattr = ARRAY_SIZE(ioam6_genl_policy_addsc) - 1,
586 },
587 {
588 .cmd = IOAM6_CMD_DEL_SCHEMA,
589 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
590 .doit = ioam6_genl_delsc,
591 .flags = GENL_ADMIN_PERM,
592 .policy = ioam6_genl_policy_delsc,
593 .maxattr = ARRAY_SIZE(ioam6_genl_policy_delsc) - 1,
594 },
595 {
596 .cmd = IOAM6_CMD_DUMP_SCHEMAS,
597 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
598 .start = ioam6_genl_dumpsc_start,
599 .dumpit = ioam6_genl_dumpsc,
600 .done = ioam6_genl_dumpsc_done,
601 .flags = GENL_ADMIN_PERM,
602 },
603 {
604 .cmd = IOAM6_CMD_NS_SET_SCHEMA,
605 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
606 .doit = ioam6_genl_ns_set_schema,
607 .flags = GENL_ADMIN_PERM,
608 .policy = ioam6_genl_policy_ns_sc,
609 .maxattr = ARRAY_SIZE(ioam6_genl_policy_ns_sc) - 1,
610 },
611 };
612
613 static struct genl_family ioam6_genl_family __ro_after_init = {
614 .name = IOAM6_GENL_NAME,
615 .version = IOAM6_GENL_VERSION,
616 .netnsok = true,
617 .parallel_ops = true,
618 .ops = ioam6_genl_ops,
619 .n_ops = ARRAY_SIZE(ioam6_genl_ops),
620 .module = THIS_MODULE,
621 };
622
623 struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id)
624 {
625 struct ioam6_pernet_data *nsdata = ioam6_pernet(net);
626
627 return rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
628 }
629
630 static void __ioam6_fill_trace_data(struct sk_buff *skb,
631 struct ioam6_namespace *ns,
632 struct ioam6_trace_hdr *trace,
633 struct ioam6_schema *sc,
634 u8 sclen)
635 {
636 struct __kernel_sock_timeval ts;
637 u64 raw64;
638 u32 raw32;
639 u16 raw16;
640 u8 *data;
641 u8 byte;
642
643 data = trace->data + trace->remlen * 4 - trace->nodelen * 4 - sclen * 4;
644
645 /* hop_lim and node_id */
646 if (trace->type.bit0) {
647 byte = ipv6_hdr(skb)->hop_limit;
648 if (skb->dev)
649 byte--;
650
651 raw32 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id;
652
653 *(__be32 *)data = cpu_to_be32((byte << 24) | raw32);
654 data += sizeof(__be32);
655 }
656
657 /* ingress_if_id and egress_if_id */
658 if (trace->type.bit1) {
659 if (!skb->dev)
660 raw16 = IOAM6_U16_UNAVAILABLE;
661 else
662 raw16 = (__force u16)__in6_dev_get(skb->dev)->cnf.ioam6_id;
663
664 *(__be16 *)data = cpu_to_be16(raw16);
665 data += sizeof(__be16);
666
667 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
668 raw16 = IOAM6_U16_UNAVAILABLE;
669 else
670 raw16 = (__force u16)__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id;
671
672 *(__be16 *)data = cpu_to_be16(raw16);
673 data += sizeof(__be16);
674 }
675
676 /* timestamp seconds */
677 if (trace->type.bit2) {
678 if (!skb->dev) {
679 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
680 } else {
681 if (!skb->tstamp)
682 __net_timestamp(skb);
683
684 skb_get_new_timestamp(skb, &ts);
685 *(__be32 *)data = cpu_to_be32((u32)ts.tv_sec);
686 }
687 data += sizeof(__be32);
688 }
689
690 /* timestamp subseconds */
691 if (trace->type.bit3) {
692 if (!skb->dev) {
693 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
694 } else {
695 if (!skb->tstamp)
696 __net_timestamp(skb);
697
698 if (!trace->type.bit2)
699 skb_get_new_timestamp(skb, &ts);
700
701 *(__be32 *)data = cpu_to_be32((u32)ts.tv_usec);
702 }
703 data += sizeof(__be32);
704 }
705
706 /* transit delay */
707 if (trace->type.bit4) {
708 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
709 data += sizeof(__be32);
710 }
711
712 /* namespace data */
713 if (trace->type.bit5) {
714 *(__be32 *)data = ns->data;
715 data += sizeof(__be32);
716 }
717
718 /* queue depth */
719 if (trace->type.bit6) {
720 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
721 data += sizeof(__be32);
722 }
723
724 /* checksum complement */
725 if (trace->type.bit7) {
726 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
727 data += sizeof(__be32);
728 }
729
730 /* hop_lim and node_id (wide) */
731 if (trace->type.bit8) {
732 byte = ipv6_hdr(skb)->hop_limit;
733 if (skb->dev)
734 byte--;
735
736 raw64 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id_wide;
737
738 *(__be64 *)data = cpu_to_be64(((u64)byte << 56) | raw64);
739 data += sizeof(__be64);
740 }
741
742 /* ingress_if_id and egress_if_id (wide) */
743 if (trace->type.bit9) {
744 if (!skb->dev)
745 raw32 = IOAM6_U32_UNAVAILABLE;
746 else
747 raw32 = __in6_dev_get(skb->dev)->cnf.ioam6_id_wide;
748
749 *(__be32 *)data = cpu_to_be32(raw32);
750 data += sizeof(__be32);
751
752 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
753 raw32 = IOAM6_U32_UNAVAILABLE;
754 else
755 raw32 = __in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide;
756
757 *(__be32 *)data = cpu_to_be32(raw32);
758 data += sizeof(__be32);
759 }
760
761 /* namespace data (wide) */
762 if (trace->type.bit10) {
763 *(__be64 *)data = ns->data_wide;
764 data += sizeof(__be64);
765 }
766
767 /* buffer occupancy */
768 if (trace->type.bit11) {
769 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
770 data += sizeof(__be32);
771 }
772
773 /* bit12 undefined: filled with empty value */
774 if (trace->type.bit12) {
775 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
776 data += sizeof(__be32);
777 }
778
779 /* bit13 undefined: filled with empty value */
780 if (trace->type.bit13) {
781 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
782 data += sizeof(__be32);
783 }
784
785 /* bit14 undefined: filled with empty value */
786 if (trace->type.bit14) {
787 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
788 data += sizeof(__be32);
789 }
790
791 /* bit15 undefined: filled with empty value */
792 if (trace->type.bit15) {
793 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
794 data += sizeof(__be32);
795 }
796
797 /* bit16 undefined: filled with empty value */
798 if (trace->type.bit16) {
799 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
800 data += sizeof(__be32);
801 }
802
803 /* bit17 undefined: filled with empty value */
804 if (trace->type.bit17) {
805 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
806 data += sizeof(__be32);
807 }
808
809 /* bit18 undefined: filled with empty value */
810 if (trace->type.bit18) {
811 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
812 data += sizeof(__be32);
813 }
814
815 /* bit19 undefined: filled with empty value */
816 if (trace->type.bit19) {
817 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
818 data += sizeof(__be32);
819 }
820
821 /* bit20 undefined: filled with empty value */
822 if (trace->type.bit20) {
823 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
824 data += sizeof(__be32);
825 }
826
827 /* bit21 undefined: filled with empty value */
828 if (trace->type.bit21) {
829 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
830 data += sizeof(__be32);
831 }
832
833 /* opaque state snapshot */
834 if (trace->type.bit22) {
835 if (!sc) {
836 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE >> 8);
837 } else {
838 *(__be32 *)data = sc->hdr;
839 data += sizeof(__be32);
840
841 memcpy(data, sc->data, sc->len);
842 }
843 }
844 }
845
846 /* called with rcu_read_lock() */
847 void ioam6_fill_trace_data(struct sk_buff *skb,
848 struct ioam6_namespace *ns,
849 struct ioam6_trace_hdr *trace)
850 {
851 struct ioam6_schema *sc;
852 u8 sclen = 0;
853
854 /* Skip if Overflow flag is set
855 */
856 if (trace->overflow)
857 return;
858
859 /* NodeLen does not include Opaque State Snapshot length. We need to
860 * take it into account if the corresponding bit is set (bit 22) and
861 * if the current IOAM namespace has an active schema attached to it
862 */
863 sc = rcu_dereference(ns->schema);
864 if (trace->type.bit22) {
865 sclen = sizeof_field(struct ioam6_schema, hdr) / 4;
866
867 if (sc)
868 sclen += sc->len / 4;
869 }
870
871 /* If there is no space remaining, we set the Overflow flag and we
872 * skip without filling the trace
873 */
874 if (!trace->remlen || trace->remlen < trace->nodelen + sclen) {
875 trace->overflow = 1;
876 return;
877 }
878
879 __ioam6_fill_trace_data(skb, ns, trace, sc, sclen);
880 trace->remlen -= trace->nodelen + sclen;
881 }
882
883 static int __net_init ioam6_net_init(struct net *net)
884 {
885 struct ioam6_pernet_data *nsdata;
886 int err = -ENOMEM;
887
888 nsdata = kzalloc(sizeof(*nsdata), GFP_KERNEL);
889 if (!nsdata)
890 goto out;
891
892 mutex_init(&nsdata->lock);
893 net->ipv6.ioam6_data = nsdata;
894
895 err = rhashtable_init(&nsdata->namespaces, &rht_ns_params);
896 if (err)
897 goto free_nsdata;
898
899 err = rhashtable_init(&nsdata->schemas, &rht_sc_params);
900 if (err)
901 goto free_rht_ns;
902
903 out:
904 return err;
905 free_rht_ns:
906 rhashtable_destroy(&nsdata->namespaces);
907 free_nsdata:
908 kfree(nsdata);
909 net->ipv6.ioam6_data = NULL;
910 goto out;
911 }
912
913 static void __net_exit ioam6_net_exit(struct net *net)
914 {
915 struct ioam6_pernet_data *nsdata = ioam6_pernet(net);
916
917 rhashtable_free_and_destroy(&nsdata->namespaces, ioam6_free_ns, NULL);
918 rhashtable_free_and_destroy(&nsdata->schemas, ioam6_free_sc, NULL);
919
920 kfree(nsdata);
921 }
922
923 static struct pernet_operations ioam6_net_ops = {
924 .init = ioam6_net_init,
925 .exit = ioam6_net_exit,
926 };
927
928 int __init ioam6_init(void)
929 {
930 int err = register_pernet_subsys(&ioam6_net_ops);
931 if (err)
932 goto out;
933
934 err = genl_register_family(&ioam6_genl_family);
935 if (err)
936 goto out_unregister_pernet_subsys;
937
938 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
939 err = ioam6_iptunnel_init();
940 if (err)
941 goto out_unregister_genl;
942 #endif
943
944 pr_info("In-situ OAM (IOAM) with IPv6\n");
945
946 out:
947 return err;
948 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
949 out_unregister_genl:
950 genl_unregister_family(&ioam6_genl_family);
951 #endif
952 out_unregister_pernet_subsys:
953 unregister_pernet_subsys(&ioam6_net_ops);
954 goto out;
955 }
956
957 void ioam6_exit(void)
958 {
959 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
960 ioam6_iptunnel_exit();
961 #endif
962 genl_unregister_family(&ioam6_genl_family);
963 unregister_pernet_subsys(&ioam6_net_ops);
964 }