]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/dsa/dsa2.c
tcp/dccp: block BH for SYN processing
[mirror_ubuntu-artful-kernel.git] / net / dsa / dsa2.c
1 /*
2 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
3 * Copyright (c) 2008-2009 Marvell Semiconductor
4 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
5 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/list.h>
16 #include <linux/slab.h>
17 #include <linux/rtnetlink.h>
18 #include <net/dsa.h>
19 #include <linux/of.h>
20 #include <linux/of_net.h>
21 #include "dsa_priv.h"
22
23 static LIST_HEAD(dsa_switch_trees);
24 static DEFINE_MUTEX(dsa2_mutex);
25
26 static struct dsa_switch_tree *dsa_get_dst(u32 tree)
27 {
28 struct dsa_switch_tree *dst;
29
30 list_for_each_entry(dst, &dsa_switch_trees, list)
31 if (dst->tree == tree) {
32 kref_get(&dst->refcount);
33 return dst;
34 }
35 return NULL;
36 }
37
38 static void dsa_free_dst(struct kref *ref)
39 {
40 struct dsa_switch_tree *dst = container_of(ref, struct dsa_switch_tree,
41 refcount);
42
43 list_del(&dst->list);
44 kfree(dst);
45 }
46
47 static void dsa_put_dst(struct dsa_switch_tree *dst)
48 {
49 kref_put(&dst->refcount, dsa_free_dst);
50 }
51
52 static struct dsa_switch_tree *dsa_add_dst(u32 tree)
53 {
54 struct dsa_switch_tree *dst;
55
56 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
57 if (!dst)
58 return NULL;
59 dst->tree = tree;
60 INIT_LIST_HEAD(&dst->list);
61 list_add_tail(&dsa_switch_trees, &dst->list);
62 kref_init(&dst->refcount);
63
64 return dst;
65 }
66
67 static void dsa_dst_add_ds(struct dsa_switch_tree *dst,
68 struct dsa_switch *ds, u32 index)
69 {
70 kref_get(&dst->refcount);
71 dst->ds[index] = ds;
72 }
73
74 static void dsa_dst_del_ds(struct dsa_switch_tree *dst,
75 struct dsa_switch *ds, u32 index)
76 {
77 dst->ds[index] = NULL;
78 kref_put(&dst->refcount, dsa_free_dst);
79 }
80
81 /* For platform data configurations, we need to have a valid name argument to
82 * differentiate a disabled port from an enabled one
83 */
84 static bool dsa_port_is_valid(struct dsa_port *port)
85 {
86 return !!(port->dn || port->name);
87 }
88
89 static bool dsa_port_is_dsa(struct dsa_port *port)
90 {
91 if (port->name && !strcmp(port->name, "dsa"))
92 return true;
93 else
94 return !!of_parse_phandle(port->dn, "link", 0);
95 }
96
97 static bool dsa_port_is_cpu(struct dsa_port *port)
98 {
99 if (port->name && !strcmp(port->name, "cpu"))
100 return true;
101 else
102 return !!of_parse_phandle(port->dn, "ethernet", 0);
103 }
104
105 static bool dsa_ds_find_port_dn(struct dsa_switch *ds,
106 struct device_node *port)
107 {
108 u32 index;
109
110 for (index = 0; index < ds->num_ports; index++)
111 if (ds->ports[index].dn == port)
112 return true;
113 return false;
114 }
115
116 static struct dsa_switch *dsa_dst_find_port_dn(struct dsa_switch_tree *dst,
117 struct device_node *port)
118 {
119 struct dsa_switch *ds;
120 u32 index;
121
122 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
123 ds = dst->ds[index];
124 if (!ds)
125 continue;
126
127 if (dsa_ds_find_port_dn(ds, port))
128 return ds;
129 }
130
131 return NULL;
132 }
133
134 static int dsa_port_complete(struct dsa_switch_tree *dst,
135 struct dsa_switch *src_ds,
136 struct dsa_port *port,
137 u32 src_port)
138 {
139 struct device_node *link;
140 int index;
141 struct dsa_switch *dst_ds;
142
143 for (index = 0;; index++) {
144 link = of_parse_phandle(port->dn, "link", index);
145 if (!link)
146 break;
147
148 dst_ds = dsa_dst_find_port_dn(dst, link);
149 of_node_put(link);
150
151 if (!dst_ds)
152 return 1;
153
154 src_ds->rtable[dst_ds->index] = src_port;
155 }
156
157 return 0;
158 }
159
160 /* A switch is complete if all the DSA ports phandles point to ports
161 * known in the tree. A return value of 1 means the tree is not
162 * complete. This is not an error condition. A value of 0 is
163 * success.
164 */
165 static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds)
166 {
167 struct dsa_port *port;
168 u32 index;
169 int err;
170
171 for (index = 0; index < ds->num_ports; index++) {
172 port = &ds->ports[index];
173 if (!dsa_port_is_valid(port))
174 continue;
175
176 if (!dsa_port_is_dsa(port))
177 continue;
178
179 err = dsa_port_complete(dst, ds, port, index);
180 if (err != 0)
181 return err;
182
183 ds->dsa_port_mask |= BIT(index);
184 }
185
186 return 0;
187 }
188
189 /* A tree is complete if all the DSA ports phandles point to ports
190 * known in the tree. A return value of 1 means the tree is not
191 * complete. This is not an error condition. A value of 0 is
192 * success.
193 */
194 static int dsa_dst_complete(struct dsa_switch_tree *dst)
195 {
196 struct dsa_switch *ds;
197 u32 index;
198 int err;
199
200 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
201 ds = dst->ds[index];
202 if (!ds)
203 continue;
204
205 err = dsa_ds_complete(dst, ds);
206 if (err != 0)
207 return err;
208 }
209
210 return 0;
211 }
212
213 static int dsa_dsa_port_apply(struct dsa_port *port, u32 index,
214 struct dsa_switch *ds)
215 {
216 int err;
217
218 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
219 if (err) {
220 dev_warn(ds->dev, "Failed to setup dsa port %d: %d\n",
221 index, err);
222 return err;
223 }
224
225 return 0;
226 }
227
228 static void dsa_dsa_port_unapply(struct dsa_port *port, u32 index,
229 struct dsa_switch *ds)
230 {
231 dsa_cpu_dsa_destroy(port);
232 }
233
234 static int dsa_cpu_port_apply(struct dsa_port *port, u32 index,
235 struct dsa_switch *ds)
236 {
237 int err;
238
239 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
240 if (err) {
241 dev_warn(ds->dev, "Failed to setup cpu port %d: %d\n",
242 index, err);
243 return err;
244 }
245
246 ds->cpu_port_mask |= BIT(index);
247
248 return 0;
249 }
250
251 static void dsa_cpu_port_unapply(struct dsa_port *port, u32 index,
252 struct dsa_switch *ds)
253 {
254 dsa_cpu_dsa_destroy(port);
255 ds->cpu_port_mask &= ~BIT(index);
256
257 }
258
259 static int dsa_user_port_apply(struct dsa_port *port, u32 index,
260 struct dsa_switch *ds)
261 {
262 const char *name = port->name;
263 int err;
264
265 if (port->dn)
266 name = of_get_property(port->dn, "label", NULL);
267 if (!name)
268 name = "eth%d";
269
270 err = dsa_slave_create(ds, ds->dev, index, name);
271 if (err) {
272 dev_warn(ds->dev, "Failed to create slave %d: %d\n",
273 index, err);
274 ds->ports[index].netdev = NULL;
275 return err;
276 }
277
278 return 0;
279 }
280
281 static void dsa_user_port_unapply(struct dsa_port *port, u32 index,
282 struct dsa_switch *ds)
283 {
284 if (ds->ports[index].netdev) {
285 dsa_slave_destroy(ds->ports[index].netdev);
286 ds->ports[index].netdev = NULL;
287 ds->enabled_port_mask &= ~(1 << index);
288 }
289 }
290
291 static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
292 {
293 struct dsa_port *port;
294 u32 index;
295 int err;
296
297 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
298 * driver and before ops->setup() has run, since the switch drivers and
299 * the slave MDIO bus driver rely on these values for probing PHY
300 * devices or not
301 */
302 ds->phys_mii_mask = ds->enabled_port_mask;
303
304 err = ds->ops->setup(ds);
305 if (err < 0)
306 return err;
307
308 err = dsa_switch_register_notifier(ds);
309 if (err)
310 return err;
311
312 if (ds->ops->set_addr) {
313 err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
314 if (err < 0)
315 return err;
316 }
317
318 if (!ds->slave_mii_bus && ds->ops->phy_read) {
319 ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
320 if (!ds->slave_mii_bus)
321 return -ENOMEM;
322
323 dsa_slave_mii_bus_init(ds);
324
325 err = mdiobus_register(ds->slave_mii_bus);
326 if (err < 0)
327 return err;
328 }
329
330 for (index = 0; index < ds->num_ports; index++) {
331 port = &ds->ports[index];
332 if (!dsa_port_is_valid(port))
333 continue;
334
335 if (dsa_port_is_dsa(port)) {
336 err = dsa_dsa_port_apply(port, index, ds);
337 if (err)
338 return err;
339 continue;
340 }
341
342 if (dsa_port_is_cpu(port)) {
343 err = dsa_cpu_port_apply(port, index, ds);
344 if (err)
345 return err;
346 continue;
347 }
348
349 err = dsa_user_port_apply(port, index, ds);
350 if (err)
351 continue;
352 }
353
354 return 0;
355 }
356
357 static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
358 {
359 struct dsa_port *port;
360 u32 index;
361
362 for (index = 0; index < ds->num_ports; index++) {
363 port = &ds->ports[index];
364 if (!dsa_port_is_valid(port))
365 continue;
366
367 if (dsa_port_is_dsa(port)) {
368 dsa_dsa_port_unapply(port, index, ds);
369 continue;
370 }
371
372 if (dsa_port_is_cpu(port)) {
373 dsa_cpu_port_unapply(port, index, ds);
374 continue;
375 }
376
377 dsa_user_port_unapply(port, index, ds);
378 }
379
380 if (ds->slave_mii_bus && ds->ops->phy_read)
381 mdiobus_unregister(ds->slave_mii_bus);
382
383 dsa_switch_unregister_notifier(ds);
384 }
385
386 static int dsa_dst_apply(struct dsa_switch_tree *dst)
387 {
388 struct dsa_switch *ds;
389 u32 index;
390 int err;
391
392 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
393 ds = dst->ds[index];
394 if (!ds)
395 continue;
396
397 err = dsa_ds_apply(dst, ds);
398 if (err)
399 return err;
400 }
401
402 if (dst->cpu_switch) {
403 err = dsa_cpu_port_ethtool_setup(dst->cpu_switch);
404 if (err)
405 return err;
406 }
407
408 /* If we use a tagging format that doesn't have an ethertype
409 * field, make sure that all packets from this point on get
410 * sent to the tag format's receive function.
411 */
412 wmb();
413 dst->master_netdev->dsa_ptr = (void *)dst;
414 dst->applied = true;
415
416 return 0;
417 }
418
419 static void dsa_dst_unapply(struct dsa_switch_tree *dst)
420 {
421 struct dsa_switch *ds;
422 u32 index;
423
424 if (!dst->applied)
425 return;
426
427 dst->master_netdev->dsa_ptr = NULL;
428
429 /* If we used a tagging format that doesn't have an ethertype
430 * field, make sure that all packets from this point get sent
431 * without the tag and go through the regular receive path.
432 */
433 wmb();
434
435 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
436 ds = dst->ds[index];
437 if (!ds)
438 continue;
439
440 dsa_ds_unapply(dst, ds);
441 }
442
443 if (dst->cpu_switch)
444 dsa_cpu_port_ethtool_restore(dst->cpu_switch);
445
446 pr_info("DSA: tree %d unapplied\n", dst->tree);
447 dst->applied = false;
448 }
449
450 static int dsa_cpu_parse(struct dsa_port *port, u32 index,
451 struct dsa_switch_tree *dst,
452 struct dsa_switch *ds)
453 {
454 enum dsa_tag_protocol tag_protocol;
455 struct net_device *ethernet_dev;
456 struct device_node *ethernet;
457
458 if (port->dn) {
459 ethernet = of_parse_phandle(port->dn, "ethernet", 0);
460 if (!ethernet)
461 return -EINVAL;
462 ethernet_dev = of_find_net_device_by_node(ethernet);
463 } else {
464 ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]);
465 dev_put(ethernet_dev);
466 }
467
468 if (!ethernet_dev)
469 return -EPROBE_DEFER;
470
471 if (!ds->master_netdev)
472 ds->master_netdev = ethernet_dev;
473
474 if (!dst->master_netdev)
475 dst->master_netdev = ethernet_dev;
476
477 if (!dst->cpu_switch) {
478 dst->cpu_switch = ds;
479 dst->cpu_port = index;
480 }
481
482 tag_protocol = ds->ops->get_tag_protocol(ds);
483 dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
484 if (IS_ERR(dst->tag_ops)) {
485 dev_warn(ds->dev, "No tagger for this switch\n");
486 return PTR_ERR(dst->tag_ops);
487 }
488
489 dst->rcv = dst->tag_ops->rcv;
490
491 return 0;
492 }
493
494 static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds)
495 {
496 struct dsa_port *port;
497 u32 index;
498 int err;
499
500 for (index = 0; index < ds->num_ports; index++) {
501 port = &ds->ports[index];
502 if (!dsa_port_is_valid(port))
503 continue;
504
505 if (dsa_port_is_cpu(port)) {
506 err = dsa_cpu_parse(port, index, dst, ds);
507 if (err)
508 return err;
509 }
510 }
511
512 pr_info("DSA: switch %d %d parsed\n", dst->tree, ds->index);
513
514 return 0;
515 }
516
517 static int dsa_dst_parse(struct dsa_switch_tree *dst)
518 {
519 struct dsa_switch *ds;
520 u32 index;
521 int err;
522
523 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
524 ds = dst->ds[index];
525 if (!ds)
526 continue;
527
528 err = dsa_ds_parse(dst, ds);
529 if (err)
530 return err;
531 }
532
533 if (!dst->master_netdev) {
534 pr_warn("Tree has no master device\n");
535 return -EINVAL;
536 }
537
538 pr_info("DSA: tree %d parsed\n", dst->tree);
539
540 return 0;
541 }
542
543 static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
544 {
545 struct device_node *port;
546 int err;
547 u32 reg;
548
549 for_each_available_child_of_node(ports, port) {
550 err = of_property_read_u32(port, "reg", &reg);
551 if (err)
552 return err;
553
554 if (reg >= ds->num_ports)
555 return -EINVAL;
556
557 ds->ports[reg].dn = port;
558
559 /* Initialize enabled_port_mask now for ops->setup()
560 * to have access to a correct value, just like what
561 * net/dsa/dsa.c::dsa_switch_setup_one does.
562 */
563 if (!dsa_port_is_cpu(&ds->ports[reg]))
564 ds->enabled_port_mask |= 1 << reg;
565 }
566
567 return 0;
568 }
569
570 static int dsa_parse_ports(struct dsa_chip_data *cd, struct dsa_switch *ds)
571 {
572 bool valid_name_found = false;
573 unsigned int i;
574
575 for (i = 0; i < DSA_MAX_PORTS; i++) {
576 if (!cd->port_names[i])
577 continue;
578
579 ds->ports[i].name = cd->port_names[i];
580
581 /* Initialize enabled_port_mask now for drv->setup()
582 * to have access to a correct value, just like what
583 * net/dsa/dsa.c::dsa_switch_setup_one does.
584 */
585 if (!dsa_port_is_cpu(&ds->ports[i]))
586 ds->enabled_port_mask |= 1 << i;
587
588 valid_name_found = true;
589 }
590
591 if (!valid_name_found && i == DSA_MAX_PORTS)
592 return -EINVAL;
593
594 return 0;
595 }
596
597 static int dsa_parse_member_dn(struct device_node *np, u32 *tree, u32 *index)
598 {
599 int err;
600
601 *tree = *index = 0;
602
603 err = of_property_read_u32_index(np, "dsa,member", 0, tree);
604 if (err) {
605 /* Does not exist, but it is optional */
606 if (err == -EINVAL)
607 return 0;
608 return err;
609 }
610
611 err = of_property_read_u32_index(np, "dsa,member", 1, index);
612 if (err)
613 return err;
614
615 if (*index >= DSA_MAX_SWITCHES)
616 return -EINVAL;
617
618 return 0;
619 }
620
621 static int dsa_parse_member(struct dsa_chip_data *pd, u32 *tree, u32 *index)
622 {
623 if (!pd)
624 return -ENODEV;
625
626 /* We do not support complex trees with dsa_chip_data */
627 *tree = 0;
628 *index = 0;
629
630 return 0;
631 }
632
633 static struct device_node *dsa_get_ports(struct dsa_switch *ds,
634 struct device_node *np)
635 {
636 struct device_node *ports;
637
638 ports = of_get_child_by_name(np, "ports");
639 if (!ports) {
640 dev_err(ds->dev, "no ports child node found\n");
641 return ERR_PTR(-EINVAL);
642 }
643
644 return ports;
645 }
646
647 static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev)
648 {
649 struct dsa_chip_data *pdata = dev->platform_data;
650 struct device_node *np = dev->of_node;
651 struct dsa_switch_tree *dst;
652 struct device_node *ports;
653 u32 tree, index;
654 int i, err;
655
656 if (np) {
657 err = dsa_parse_member_dn(np, &tree, &index);
658 if (err)
659 return err;
660
661 ports = dsa_get_ports(ds, np);
662 if (IS_ERR(ports))
663 return PTR_ERR(ports);
664
665 err = dsa_parse_ports_dn(ports, ds);
666 if (err)
667 return err;
668 } else {
669 err = dsa_parse_member(pdata, &tree, &index);
670 if (err)
671 return err;
672
673 err = dsa_parse_ports(pdata, ds);
674 if (err)
675 return err;
676 }
677
678 dst = dsa_get_dst(tree);
679 if (!dst) {
680 dst = dsa_add_dst(tree);
681 if (!dst)
682 return -ENOMEM;
683 }
684
685 if (dst->ds[index]) {
686 err = -EBUSY;
687 goto out;
688 }
689
690 ds->dst = dst;
691 ds->index = index;
692 ds->cd = pdata;
693
694 /* Initialize the routing table */
695 for (i = 0; i < DSA_MAX_SWITCHES; ++i)
696 ds->rtable[i] = DSA_RTABLE_NONE;
697
698 dsa_dst_add_ds(dst, ds, index);
699
700 err = dsa_dst_complete(dst);
701 if (err < 0)
702 goto out_del_dst;
703
704 if (err == 1) {
705 /* Not all switches registered yet */
706 err = 0;
707 goto out;
708 }
709
710 if (dst->applied) {
711 pr_info("DSA: Disjoint trees?\n");
712 return -EINVAL;
713 }
714
715 err = dsa_dst_parse(dst);
716 if (err) {
717 if (err == -EPROBE_DEFER) {
718 dsa_dst_del_ds(dst, ds, ds->index);
719 return err;
720 }
721
722 goto out_del_dst;
723 }
724
725 err = dsa_dst_apply(dst);
726 if (err) {
727 dsa_dst_unapply(dst);
728 goto out_del_dst;
729 }
730
731 dsa_put_dst(dst);
732 return 0;
733
734 out_del_dst:
735 dsa_dst_del_ds(dst, ds, ds->index);
736 out:
737 dsa_put_dst(dst);
738
739 return err;
740 }
741
742 struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n)
743 {
744 size_t size = sizeof(struct dsa_switch) + n * sizeof(struct dsa_port);
745 struct dsa_switch *ds;
746 int i;
747
748 ds = devm_kzalloc(dev, size, GFP_KERNEL);
749 if (!ds)
750 return NULL;
751
752 ds->dev = dev;
753 ds->num_ports = n;
754
755 for (i = 0; i < ds->num_ports; ++i) {
756 ds->ports[i].index = i;
757 ds->ports[i].ds = ds;
758 }
759
760 return ds;
761 }
762 EXPORT_SYMBOL_GPL(dsa_switch_alloc);
763
764 int dsa_register_switch(struct dsa_switch *ds, struct device *dev)
765 {
766 int err;
767
768 mutex_lock(&dsa2_mutex);
769 err = _dsa_register_switch(ds, dev);
770 mutex_unlock(&dsa2_mutex);
771
772 return err;
773 }
774 EXPORT_SYMBOL_GPL(dsa_register_switch);
775
776 static void _dsa_unregister_switch(struct dsa_switch *ds)
777 {
778 struct dsa_switch_tree *dst = ds->dst;
779
780 dsa_dst_unapply(dst);
781
782 dsa_dst_del_ds(dst, ds, ds->index);
783 }
784
785 void dsa_unregister_switch(struct dsa_switch *ds)
786 {
787 mutex_lock(&dsa2_mutex);
788 _dsa_unregister_switch(ds);
789 mutex_unlock(&dsa2_mutex);
790 }
791 EXPORT_SYMBOL_GPL(dsa_unregister_switch);