]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/dsa/bcm_sf2.c
2aac4073b0308e8470e83c1fe83dd500dfa90f56
[mirror_ubuntu-bionic-kernel.git] / drivers / net / dsa / bcm_sf2.c
1 /*
2 * Broadcom Starfighter 2 DSA switch driver
3 *
4 * Copyright (C) 2014, Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/phy.h>
18 #include <linux/phy_fixed.h>
19 #include <linux/mii.h>
20 #include <linux/of.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_address.h>
23 #include <linux/of_net.h>
24 #include <linux/of_mdio.h>
25 #include <net/dsa.h>
26 #include <linux/ethtool.h>
27 #include <linux/if_bridge.h>
28 #include <linux/brcmphy.h>
29 #include <linux/etherdevice.h>
30 #include <linux/platform_data/b53.h>
31
32 #include "bcm_sf2.h"
33 #include "bcm_sf2_regs.h"
34 #include "b53/b53_priv.h"
35 #include "b53/b53_regs.h"
36
37 static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds,
38 int port)
39 {
40 return DSA_TAG_PROTO_BRCM;
41 }
42
43 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
44 {
45 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
46 unsigned int i;
47 u32 reg, offset;
48
49 /* Enable the port memories */
50 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
51 reg &= ~P_TXQ_PSM_VDD(port);
52 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
53
54 /* Enable forwarding */
55 core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
56
57 /* Enable IMP port in dumb mode */
58 reg = core_readl(priv, CORE_SWITCH_CTRL);
59 reg |= MII_DUMB_FWDG_EN;
60 core_writel(priv, reg, CORE_SWITCH_CTRL);
61
62 /* Configure Traffic Class to QoS mapping, allow each priority to map
63 * to a different queue number
64 */
65 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
66 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
67 reg |= i << (PRT_TO_QID_SHIFT * i);
68 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
69
70 b53_brcm_hdr_setup(ds, port);
71
72 if (port == 8) {
73 if (priv->type == BCM7445_DEVICE_ID)
74 offset = CORE_STS_OVERRIDE_IMP;
75 else
76 offset = CORE_STS_OVERRIDE_IMP2;
77
78 /* Force link status for IMP port */
79 reg = core_readl(priv, offset);
80 reg |= (MII_SW_OR | LINK_STS);
81 core_writel(priv, reg, offset);
82
83 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
84 reg = core_readl(priv, CORE_IMP_CTL);
85 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
86 reg &= ~(RX_DIS | TX_DIS);
87 core_writel(priv, reg, CORE_IMP_CTL);
88 } else {
89 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
90 reg &= ~(RX_DIS | TX_DIS);
91 core_writel(priv, reg, CORE_G_PCTL_PORT(port));
92 }
93 }
94
95 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
96 {
97 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
98 u32 reg;
99
100 reg = reg_readl(priv, REG_SPHY_CNTRL);
101 if (enable) {
102 reg |= PHY_RESET;
103 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
104 reg_writel(priv, reg, REG_SPHY_CNTRL);
105 udelay(21);
106 reg = reg_readl(priv, REG_SPHY_CNTRL);
107 reg &= ~PHY_RESET;
108 } else {
109 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
110 reg_writel(priv, reg, REG_SPHY_CNTRL);
111 mdelay(1);
112 reg |= CK25_DIS;
113 }
114 reg_writel(priv, reg, REG_SPHY_CNTRL);
115
116 /* Use PHY-driven LED signaling */
117 if (!enable) {
118 reg = reg_readl(priv, REG_LED_CNTRL(0));
119 reg |= SPDLNK_SRC_SEL;
120 reg_writel(priv, reg, REG_LED_CNTRL(0));
121 }
122 }
123
124 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
125 int port)
126 {
127 unsigned int off;
128
129 switch (port) {
130 case 7:
131 off = P7_IRQ_OFF;
132 break;
133 case 0:
134 /* Port 0 interrupts are located on the first bank */
135 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
136 return;
137 default:
138 off = P_IRQ_OFF(port);
139 break;
140 }
141
142 intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
143 }
144
145 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
146 int port)
147 {
148 unsigned int off;
149
150 switch (port) {
151 case 7:
152 off = P7_IRQ_OFF;
153 break;
154 case 0:
155 /* Port 0 interrupts are located on the first bank */
156 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
157 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
158 return;
159 default:
160 off = P_IRQ_OFF(port);
161 break;
162 }
163
164 intrl2_1_mask_set(priv, P_IRQ_MASK(off));
165 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
166 }
167
168 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
169 struct phy_device *phy)
170 {
171 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
172 unsigned int i;
173 u32 reg;
174
175 /* Clear the memory power down */
176 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
177 reg &= ~P_TXQ_PSM_VDD(port);
178 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
179
180 /* Enable Broadcom tags for that port if requested */
181 if (priv->brcm_tag_mask & BIT(port))
182 b53_brcm_hdr_setup(ds, port);
183
184 /* Configure Traffic Class to QoS mapping, allow each priority to map
185 * to a different queue number
186 */
187 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
188 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
189 reg |= i << (PRT_TO_QID_SHIFT * i);
190 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
191
192 /* Re-enable the GPHY and re-apply workarounds */
193 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
194 bcm_sf2_gphy_enable_set(ds, true);
195 if (phy) {
196 /* if phy_stop() has been called before, phy
197 * will be in halted state, and phy_start()
198 * will call resume.
199 *
200 * the resume path does not configure back
201 * autoneg settings, and since we hard reset
202 * the phy manually here, we need to reset the
203 * state machine also.
204 */
205 phy->state = PHY_READY;
206 phy_init_hw(phy);
207 }
208 }
209
210 /* Enable MoCA port interrupts to get notified */
211 if (port == priv->moca_port)
212 bcm_sf2_port_intr_enable(priv, port);
213
214 /* Set per-queue pause threshold to 32 */
215 core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
216
217 /* Set ACB threshold to 24 */
218 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
219 reg = acb_readl(priv, ACB_QUEUE_CFG(port *
220 SF2_NUM_EGRESS_QUEUES + i));
221 reg &= ~XOFF_THRESHOLD_MASK;
222 reg |= 24;
223 acb_writel(priv, reg, ACB_QUEUE_CFG(port *
224 SF2_NUM_EGRESS_QUEUES + i));
225 }
226
227 return b53_enable_port(ds, port, phy);
228 }
229
230 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
231 struct phy_device *phy)
232 {
233 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
234 u32 off, reg;
235
236 if (priv->wol_ports_mask & (1 << port))
237 return;
238
239 if (port == priv->moca_port)
240 bcm_sf2_port_intr_disable(priv, port);
241
242 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
243 bcm_sf2_gphy_enable_set(ds, false);
244
245 if (dsa_is_cpu_port(ds, port))
246 off = CORE_IMP_CTL;
247 else
248 off = CORE_G_PCTL_PORT(port);
249
250 b53_disable_port(ds, port, phy);
251
252 /* Power down the port memory */
253 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
254 reg |= P_TXQ_PSM_VDD(port);
255 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
256 }
257
258
259 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
260 int regnum, u16 val)
261 {
262 int ret = 0;
263 u32 reg;
264
265 reg = reg_readl(priv, REG_SWITCH_CNTRL);
266 reg |= MDIO_MASTER_SEL;
267 reg_writel(priv, reg, REG_SWITCH_CNTRL);
268
269 /* Page << 8 | offset */
270 reg = 0x70;
271 reg <<= 2;
272 core_writel(priv, addr, reg);
273
274 /* Page << 8 | offset */
275 reg = 0x80 << 8 | regnum << 1;
276 reg <<= 2;
277
278 if (op)
279 ret = core_readl(priv, reg);
280 else
281 core_writel(priv, val, reg);
282
283 reg = reg_readl(priv, REG_SWITCH_CNTRL);
284 reg &= ~MDIO_MASTER_SEL;
285 reg_writel(priv, reg, REG_SWITCH_CNTRL);
286
287 return ret & 0xffff;
288 }
289
290 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
291 {
292 struct bcm_sf2_priv *priv = bus->priv;
293
294 /* Intercept reads from Broadcom pseudo-PHY address, else, send
295 * them to our master MDIO bus controller
296 */
297 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
298 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
299 else
300 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
301 }
302
303 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
304 u16 val)
305 {
306 struct bcm_sf2_priv *priv = bus->priv;
307
308 /* Intercept writes to the Broadcom pseudo-PHY address, else,
309 * send them to our master MDIO bus controller
310 */
311 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
312 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
313 else
314 mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val);
315
316 return 0;
317 }
318
319 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
320 {
321 struct bcm_sf2_priv *priv = dev_id;
322
323 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
324 ~priv->irq0_mask;
325 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
326
327 return IRQ_HANDLED;
328 }
329
330 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
331 {
332 struct bcm_sf2_priv *priv = dev_id;
333
334 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
335 ~priv->irq1_mask;
336 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
337
338 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
339 priv->port_sts[7].link = 1;
340 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
341 priv->port_sts[7].link = 0;
342
343 return IRQ_HANDLED;
344 }
345
346 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
347 {
348 unsigned int timeout = 1000;
349 u32 reg;
350
351 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
352 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
353 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
354
355 do {
356 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
357 if (!(reg & SOFTWARE_RESET))
358 break;
359
360 usleep_range(1000, 2000);
361 } while (timeout-- > 0);
362
363 if (timeout == 0)
364 return -ETIMEDOUT;
365
366 return 0;
367 }
368
369 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
370 {
371 intrl2_0_mask_set(priv, 0xffffffff);
372 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
373 intrl2_1_mask_set(priv, 0xffffffff);
374 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
375 }
376
377 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
378 struct device_node *dn)
379 {
380 struct device_node *port;
381 int mode;
382 unsigned int port_num;
383
384 priv->moca_port = -1;
385
386 for_each_available_child_of_node(dn, port) {
387 if (of_property_read_u32(port, "reg", &port_num))
388 continue;
389
390 /* Internal PHYs get assigned a specific 'phy-mode' property
391 * value: "internal" to help flag them before MDIO probing
392 * has completed, since they might be turned off at that
393 * time
394 */
395 mode = of_get_phy_mode(port);
396 if (mode < 0)
397 continue;
398
399 if (mode == PHY_INTERFACE_MODE_INTERNAL)
400 priv->int_phy_mask |= 1 << port_num;
401
402 if (mode == PHY_INTERFACE_MODE_MOCA)
403 priv->moca_port = port_num;
404
405 if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
406 priv->brcm_tag_mask |= 1 << port_num;
407 }
408 }
409
410 static int bcm_sf2_mdio_register(struct dsa_switch *ds)
411 {
412 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
413 struct device_node *dn;
414 static int index;
415 int err;
416
417 /* Find our integrated MDIO bus node */
418 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
419 priv->master_mii_bus = of_mdio_find_bus(dn);
420 if (!priv->master_mii_bus)
421 return -EPROBE_DEFER;
422
423 get_device(&priv->master_mii_bus->dev);
424 priv->master_mii_dn = dn;
425
426 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
427 if (!priv->slave_mii_bus)
428 return -ENOMEM;
429
430 priv->slave_mii_bus->priv = priv;
431 priv->slave_mii_bus->name = "sf2 slave mii";
432 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
433 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
434 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
435 index++);
436 priv->slave_mii_bus->dev.of_node = dn;
437
438 /* Include the pseudo-PHY address to divert reads towards our
439 * workaround. This is only required for 7445D0, since 7445E0
440 * disconnects the internal switch pseudo-PHY such that we can use the
441 * regular SWITCH_MDIO master controller instead.
442 *
443 * Here we flag the pseudo PHY as needing special treatment and would
444 * otherwise make all other PHY read/writes go to the master MDIO bus
445 * controller that comes with this switch backed by the "mdio-unimac"
446 * driver.
447 */
448 if (of_machine_is_compatible("brcm,bcm7445d0"))
449 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
450 else
451 priv->indir_phy_mask = 0;
452
453 ds->phys_mii_mask = priv->indir_phy_mask;
454 ds->slave_mii_bus = priv->slave_mii_bus;
455 priv->slave_mii_bus->parent = ds->dev->parent;
456 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
457
458 if (dn)
459 err = of_mdiobus_register(priv->slave_mii_bus, dn);
460 else
461 err = mdiobus_register(priv->slave_mii_bus);
462
463 if (err)
464 of_node_put(dn);
465
466 return err;
467 }
468
469 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
470 {
471 mdiobus_unregister(priv->slave_mii_bus);
472 if (priv->master_mii_dn)
473 of_node_put(priv->master_mii_dn);
474 }
475
476 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
477 {
478 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
479
480 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
481 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
482 * the REG_PHY_REVISION register layout is.
483 */
484
485 return priv->hw_params.gphy_rev;
486 }
487
488 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
489 struct phy_device *phydev)
490 {
491 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
492 struct ethtool_eee *p = &priv->dev->ports[port].eee;
493 u32 id_mode_dis = 0, port_mode;
494 const char *str = NULL;
495 u32 reg, offset;
496
497 if (priv->type == BCM7445_DEVICE_ID)
498 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
499 else
500 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
501
502 switch (phydev->interface) {
503 case PHY_INTERFACE_MODE_RGMII:
504 str = "RGMII (no delay)";
505 id_mode_dis = 1;
506 case PHY_INTERFACE_MODE_RGMII_TXID:
507 if (!str)
508 str = "RGMII (TX delay)";
509 port_mode = EXT_GPHY;
510 break;
511 case PHY_INTERFACE_MODE_MII:
512 str = "MII";
513 port_mode = EXT_EPHY;
514 break;
515 case PHY_INTERFACE_MODE_REVMII:
516 str = "Reverse MII";
517 port_mode = EXT_REVMII;
518 break;
519 default:
520 /* All other PHYs: internal and MoCA */
521 goto force_link;
522 }
523
524 /* If the link is down, just disable the interface to conserve power */
525 if (!phydev->link) {
526 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
527 reg &= ~RGMII_MODE_EN;
528 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
529 goto force_link;
530 }
531
532 /* Clear id_mode_dis bit, and the existing port mode, but
533 * make sure we enable the RGMII block for data to pass
534 */
535 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
536 reg &= ~ID_MODE_DIS;
537 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
538 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
539
540 reg |= port_mode | RGMII_MODE_EN;
541 if (id_mode_dis)
542 reg |= ID_MODE_DIS;
543
544 if (phydev->pause) {
545 if (phydev->asym_pause)
546 reg |= TX_PAUSE_EN;
547 reg |= RX_PAUSE_EN;
548 }
549
550 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
551
552 pr_info("Port %d configured for %s\n", port, str);
553
554 force_link:
555 /* Force link settings detected from the PHY */
556 reg = SW_OVERRIDE;
557 switch (phydev->speed) {
558 case SPEED_1000:
559 reg |= SPDSTS_1000 << SPEED_SHIFT;
560 break;
561 case SPEED_100:
562 reg |= SPDSTS_100 << SPEED_SHIFT;
563 break;
564 }
565
566 if (phydev->link)
567 reg |= LINK_STS;
568 if (phydev->duplex == DUPLEX_FULL)
569 reg |= DUPLX_MODE;
570
571 core_writel(priv, reg, offset);
572
573 if (!phydev->is_pseudo_fixed_link)
574 p->eee_enabled = b53_eee_init(ds, port, phydev);
575 }
576
577 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
578 struct fixed_phy_status *status)
579 {
580 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
581 u32 duplex, pause, offset;
582 u32 reg;
583
584 if (priv->type == BCM7445_DEVICE_ID)
585 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
586 else
587 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
588
589 duplex = core_readl(priv, CORE_DUPSTS);
590 pause = core_readl(priv, CORE_PAUSESTS);
591
592 status->link = 0;
593
594 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
595 * which means that we need to force the link at the port override
596 * level to get the data to flow. We do use what the interrupt handler
597 * did determine before.
598 *
599 * For the other ports, we just force the link status, since this is
600 * a fixed PHY device.
601 */
602 if (port == priv->moca_port) {
603 status->link = priv->port_sts[port].link;
604 /* For MoCA interfaces, also force a link down notification
605 * since some version of the user-space daemon (mocad) use
606 * cmd->autoneg to force the link, which messes up the PHY
607 * state machine and make it go in PHY_FORCING state instead.
608 */
609 if (!status->link)
610 netif_carrier_off(ds->ports[port].slave);
611 status->duplex = 1;
612 } else {
613 status->link = 1;
614 status->duplex = !!(duplex & (1 << port));
615 }
616
617 reg = core_readl(priv, offset);
618 reg |= SW_OVERRIDE;
619 if (status->link)
620 reg |= LINK_STS;
621 else
622 reg &= ~LINK_STS;
623 core_writel(priv, reg, offset);
624
625 if ((pause & (1 << port)) &&
626 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
627 status->asym_pause = 1;
628 status->pause = 1;
629 }
630
631 if (pause & (1 << port))
632 status->pause = 1;
633 }
634
635 static void bcm_sf2_enable_acb(struct dsa_switch *ds)
636 {
637 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
638 u32 reg;
639
640 /* Enable ACB globally */
641 reg = acb_readl(priv, ACB_CONTROL);
642 reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
643 acb_writel(priv, reg, ACB_CONTROL);
644 reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
645 reg |= ACB_EN | ACB_ALGORITHM;
646 acb_writel(priv, reg, ACB_CONTROL);
647 }
648
649 static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
650 {
651 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
652 unsigned int port;
653
654 bcm_sf2_intr_disable(priv);
655
656 /* Disable all ports physically present including the IMP
657 * port, the other ones have already been disabled during
658 * bcm_sf2_sw_setup
659 */
660 for (port = 0; port < ds->num_ports; port++) {
661 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
662 bcm_sf2_port_disable(ds, port, NULL);
663 }
664
665 return 0;
666 }
667
668 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
669 {
670 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
671 int ret;
672
673 ret = bcm_sf2_sw_rst(priv);
674 if (ret) {
675 pr_err("%s: failed to software reset switch\n", __func__);
676 return ret;
677 }
678
679 if (priv->hw_params.num_gphy == 1)
680 bcm_sf2_gphy_enable_set(ds, true);
681
682 ds->ops->setup(ds);
683
684 return 0;
685 }
686
687 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
688 struct ethtool_wolinfo *wol)
689 {
690 struct net_device *p = ds->ports[port].cpu_dp->master;
691 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
692 struct ethtool_wolinfo pwol = { };
693
694 /* Get the parent device WoL settings */
695 if (p->ethtool_ops->get_wol)
696 p->ethtool_ops->get_wol(p, &pwol);
697
698 /* Advertise the parent device supported settings */
699 wol->supported = pwol.supported;
700 memset(&wol->sopass, 0, sizeof(wol->sopass));
701
702 if (pwol.wolopts & WAKE_MAGICSECURE)
703 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
704
705 if (priv->wol_ports_mask & (1 << port))
706 wol->wolopts = pwol.wolopts;
707 else
708 wol->wolopts = 0;
709 }
710
711 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
712 struct ethtool_wolinfo *wol)
713 {
714 struct net_device *p = ds->ports[port].cpu_dp->master;
715 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
716 s8 cpu_port = ds->ports[port].cpu_dp->index;
717 struct ethtool_wolinfo pwol = { };
718
719 if (p->ethtool_ops->get_wol)
720 p->ethtool_ops->get_wol(p, &pwol);
721 if (wol->wolopts & ~pwol.supported)
722 return -EINVAL;
723
724 if (wol->wolopts)
725 priv->wol_ports_mask |= (1 << port);
726 else
727 priv->wol_ports_mask &= ~(1 << port);
728
729 /* If we have at least one port enabled, make sure the CPU port
730 * is also enabled. If the CPU port is the last one enabled, we disable
731 * it since this configuration does not make sense.
732 */
733 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
734 priv->wol_ports_mask |= (1 << cpu_port);
735 else
736 priv->wol_ports_mask &= ~(1 << cpu_port);
737
738 return p->ethtool_ops->set_wol(p, wol);
739 }
740
741 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
742 {
743 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
744 unsigned int port;
745
746 /* Enable all valid ports and disable those unused */
747 for (port = 0; port < priv->hw_params.num_ports; port++) {
748 /* IMP port receives special treatment */
749 if (dsa_is_user_port(ds, port))
750 bcm_sf2_port_setup(ds, port, NULL);
751 else if (dsa_is_cpu_port(ds, port))
752 bcm_sf2_imp_setup(ds, port);
753 else
754 bcm_sf2_port_disable(ds, port, NULL);
755 }
756
757 b53_configure_vlan(ds);
758 bcm_sf2_enable_acb(ds);
759
760 return 0;
761 }
762
763 /* The SWITCH_CORE register space is managed by b53 but operates on a page +
764 * register basis so we need to translate that into an address that the
765 * bus-glue understands.
766 */
767 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2)
768
769 static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
770 u8 *val)
771 {
772 struct bcm_sf2_priv *priv = dev->priv;
773
774 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
775
776 return 0;
777 }
778
779 static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
780 u16 *val)
781 {
782 struct bcm_sf2_priv *priv = dev->priv;
783
784 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
785
786 return 0;
787 }
788
789 static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
790 u32 *val)
791 {
792 struct bcm_sf2_priv *priv = dev->priv;
793
794 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
795
796 return 0;
797 }
798
799 static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
800 u64 *val)
801 {
802 struct bcm_sf2_priv *priv = dev->priv;
803
804 *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
805
806 return 0;
807 }
808
809 static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
810 u8 value)
811 {
812 struct bcm_sf2_priv *priv = dev->priv;
813
814 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
815
816 return 0;
817 }
818
819 static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
820 u16 value)
821 {
822 struct bcm_sf2_priv *priv = dev->priv;
823
824 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
825
826 return 0;
827 }
828
829 static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
830 u32 value)
831 {
832 struct bcm_sf2_priv *priv = dev->priv;
833
834 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
835
836 return 0;
837 }
838
839 static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
840 u64 value)
841 {
842 struct bcm_sf2_priv *priv = dev->priv;
843
844 core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
845
846 return 0;
847 }
848
849 static const struct b53_io_ops bcm_sf2_io_ops = {
850 .read8 = bcm_sf2_core_read8,
851 .read16 = bcm_sf2_core_read16,
852 .read32 = bcm_sf2_core_read32,
853 .read48 = bcm_sf2_core_read64,
854 .read64 = bcm_sf2_core_read64,
855 .write8 = bcm_sf2_core_write8,
856 .write16 = bcm_sf2_core_write16,
857 .write32 = bcm_sf2_core_write32,
858 .write48 = bcm_sf2_core_write64,
859 .write64 = bcm_sf2_core_write64,
860 };
861
862 static const struct dsa_switch_ops bcm_sf2_ops = {
863 .get_tag_protocol = bcm_sf2_sw_get_tag_protocol,
864 .setup = bcm_sf2_sw_setup,
865 .get_strings = b53_get_strings,
866 .get_ethtool_stats = b53_get_ethtool_stats,
867 .get_sset_count = b53_get_sset_count,
868 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
869 .adjust_link = bcm_sf2_sw_adjust_link,
870 .fixed_link_update = bcm_sf2_sw_fixed_link_update,
871 .suspend = bcm_sf2_sw_suspend,
872 .resume = bcm_sf2_sw_resume,
873 .get_wol = bcm_sf2_sw_get_wol,
874 .set_wol = bcm_sf2_sw_set_wol,
875 .port_enable = bcm_sf2_port_setup,
876 .port_disable = bcm_sf2_port_disable,
877 .get_mac_eee = b53_get_mac_eee,
878 .set_mac_eee = b53_set_mac_eee,
879 .port_bridge_join = b53_br_join,
880 .port_bridge_leave = b53_br_leave,
881 .port_stp_state_set = b53_br_set_stp_state,
882 .port_fast_age = b53_br_fast_age,
883 .port_vlan_filtering = b53_vlan_filtering,
884 .port_vlan_prepare = b53_vlan_prepare,
885 .port_vlan_add = b53_vlan_add,
886 .port_vlan_del = b53_vlan_del,
887 .port_fdb_dump = b53_fdb_dump,
888 .port_fdb_add = b53_fdb_add,
889 .port_fdb_del = b53_fdb_del,
890 .get_rxnfc = bcm_sf2_get_rxnfc,
891 .set_rxnfc = bcm_sf2_set_rxnfc,
892 .port_mirror_add = b53_mirror_add,
893 .port_mirror_del = b53_mirror_del,
894 };
895
896 struct bcm_sf2_of_data {
897 u32 type;
898 const u16 *reg_offsets;
899 unsigned int core_reg_align;
900 unsigned int num_cfp_rules;
901 };
902
903 /* Register offsets for the SWITCH_REG_* block */
904 static const u16 bcm_sf2_7445_reg_offsets[] = {
905 [REG_SWITCH_CNTRL] = 0x00,
906 [REG_SWITCH_STATUS] = 0x04,
907 [REG_DIR_DATA_WRITE] = 0x08,
908 [REG_DIR_DATA_READ] = 0x0C,
909 [REG_SWITCH_REVISION] = 0x18,
910 [REG_PHY_REVISION] = 0x1C,
911 [REG_SPHY_CNTRL] = 0x2C,
912 [REG_RGMII_0_CNTRL] = 0x34,
913 [REG_RGMII_1_CNTRL] = 0x40,
914 [REG_RGMII_2_CNTRL] = 0x4c,
915 [REG_LED_0_CNTRL] = 0x90,
916 [REG_LED_1_CNTRL] = 0x94,
917 [REG_LED_2_CNTRL] = 0x98,
918 };
919
920 static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
921 .type = BCM7445_DEVICE_ID,
922 .core_reg_align = 0,
923 .reg_offsets = bcm_sf2_7445_reg_offsets,
924 .num_cfp_rules = 256,
925 };
926
927 static const u16 bcm_sf2_7278_reg_offsets[] = {
928 [REG_SWITCH_CNTRL] = 0x00,
929 [REG_SWITCH_STATUS] = 0x04,
930 [REG_DIR_DATA_WRITE] = 0x08,
931 [REG_DIR_DATA_READ] = 0x0c,
932 [REG_SWITCH_REVISION] = 0x10,
933 [REG_PHY_REVISION] = 0x14,
934 [REG_SPHY_CNTRL] = 0x24,
935 [REG_RGMII_0_CNTRL] = 0xe0,
936 [REG_RGMII_1_CNTRL] = 0xec,
937 [REG_RGMII_2_CNTRL] = 0xf8,
938 [REG_LED_0_CNTRL] = 0x40,
939 [REG_LED_1_CNTRL] = 0x4c,
940 [REG_LED_2_CNTRL] = 0x58,
941 };
942
943 static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
944 .type = BCM7278_DEVICE_ID,
945 .core_reg_align = 1,
946 .reg_offsets = bcm_sf2_7278_reg_offsets,
947 .num_cfp_rules = 128,
948 };
949
950 static const struct of_device_id bcm_sf2_of_match[] = {
951 { .compatible = "brcm,bcm7445-switch-v4.0",
952 .data = &bcm_sf2_7445_data
953 },
954 { .compatible = "brcm,bcm7278-switch-v4.0",
955 .data = &bcm_sf2_7278_data
956 },
957 { /* sentinel */ },
958 };
959 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
960
961 static int bcm_sf2_sw_probe(struct platform_device *pdev)
962 {
963 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
964 struct device_node *dn = pdev->dev.of_node;
965 const struct of_device_id *of_id = NULL;
966 const struct bcm_sf2_of_data *data;
967 struct b53_platform_data *pdata;
968 struct dsa_switch_ops *ops;
969 struct bcm_sf2_priv *priv;
970 struct b53_device *dev;
971 struct dsa_switch *ds;
972 void __iomem **base;
973 struct resource *r;
974 unsigned int i;
975 u32 reg, rev;
976 int ret;
977
978 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
979 if (!priv)
980 return -ENOMEM;
981
982 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
983 if (!ops)
984 return -ENOMEM;
985
986 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
987 if (!dev)
988 return -ENOMEM;
989
990 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
991 if (!pdata)
992 return -ENOMEM;
993
994 of_id = of_match_node(bcm_sf2_of_match, dn);
995 if (!of_id || !of_id->data)
996 return -EINVAL;
997
998 data = of_id->data;
999
1000 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
1001 priv->type = data->type;
1002 priv->reg_offsets = data->reg_offsets;
1003 priv->core_reg_align = data->core_reg_align;
1004 priv->num_cfp_rules = data->num_cfp_rules;
1005
1006 /* Auto-detection using standard registers will not work, so
1007 * provide an indication of what kind of device we are for
1008 * b53_common to work with
1009 */
1010 pdata->chip_id = priv->type;
1011 dev->pdata = pdata;
1012
1013 priv->dev = dev;
1014 ds = dev->ds;
1015 ds->ops = &bcm_sf2_ops;
1016
1017 /* Advertise the 8 egress queues */
1018 ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
1019
1020 dev_set_drvdata(&pdev->dev, priv);
1021
1022 spin_lock_init(&priv->indir_lock);
1023 mutex_init(&priv->stats_mutex);
1024 mutex_init(&priv->cfp.lock);
1025
1026 /* CFP rule #0 cannot be used for specific classifications, flag it as
1027 * permanently used
1028 */
1029 set_bit(0, priv->cfp.used);
1030 set_bit(0, priv->cfp.unique);
1031
1032 bcm_sf2_identify_ports(priv, dn->child);
1033
1034 priv->irq0 = irq_of_parse_and_map(dn, 0);
1035 priv->irq1 = irq_of_parse_and_map(dn, 1);
1036
1037 base = &priv->core;
1038 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1039 r = platform_get_resource(pdev, IORESOURCE_MEM, i);
1040 *base = devm_ioremap_resource(&pdev->dev, r);
1041 if (IS_ERR(*base)) {
1042 pr_err("unable to find register: %s\n", reg_names[i]);
1043 return PTR_ERR(*base);
1044 }
1045 base++;
1046 }
1047
1048 ret = bcm_sf2_sw_rst(priv);
1049 if (ret) {
1050 pr_err("unable to software reset switch: %d\n", ret);
1051 return ret;
1052 }
1053
1054 bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1055
1056 ret = bcm_sf2_mdio_register(ds);
1057 if (ret) {
1058 pr_err("failed to register MDIO bus\n");
1059 return ret;
1060 }
1061
1062 bcm_sf2_gphy_enable_set(priv->dev->ds, false);
1063
1064 ret = bcm_sf2_cfp_rst(priv);
1065 if (ret) {
1066 pr_err("failed to reset CFP\n");
1067 goto out_mdio;
1068 }
1069
1070 /* Disable all interrupts and request them */
1071 bcm_sf2_intr_disable(priv);
1072
1073 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
1074 "switch_0", priv);
1075 if (ret < 0) {
1076 pr_err("failed to request switch_0 IRQ\n");
1077 goto out_mdio;
1078 }
1079
1080 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
1081 "switch_1", priv);
1082 if (ret < 0) {
1083 pr_err("failed to request switch_1 IRQ\n");
1084 goto out_mdio;
1085 }
1086
1087 /* Reset the MIB counters */
1088 reg = core_readl(priv, CORE_GMNCFGCFG);
1089 reg |= RST_MIB_CNT;
1090 core_writel(priv, reg, CORE_GMNCFGCFG);
1091 reg &= ~RST_MIB_CNT;
1092 core_writel(priv, reg, CORE_GMNCFGCFG);
1093
1094 /* Get the maximum number of ports for this switch */
1095 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1096 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1097 priv->hw_params.num_ports = DSA_MAX_PORTS;
1098
1099 /* Assume a single GPHY setup if we can't read that property */
1100 if (of_property_read_u32(dn, "brcm,num-gphy",
1101 &priv->hw_params.num_gphy))
1102 priv->hw_params.num_gphy = 1;
1103
1104 rev = reg_readl(priv, REG_SWITCH_REVISION);
1105 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1106 SWITCH_TOP_REV_MASK;
1107 priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1108
1109 rev = reg_readl(priv, REG_PHY_REVISION);
1110 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1111
1112 ret = b53_switch_register(dev);
1113 if (ret)
1114 goto out_mdio;
1115
1116 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1117 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1118 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1119 priv->core, priv->irq0, priv->irq1);
1120
1121 return 0;
1122
1123 out_mdio:
1124 bcm_sf2_mdio_unregister(priv);
1125 return ret;
1126 }
1127
1128 static int bcm_sf2_sw_remove(struct platform_device *pdev)
1129 {
1130 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1131
1132 priv->wol_ports_mask = 0;
1133 dsa_unregister_switch(priv->dev->ds);
1134 /* Disable all ports and interrupts */
1135 bcm_sf2_sw_suspend(priv->dev->ds);
1136 bcm_sf2_mdio_unregister(priv);
1137
1138 return 0;
1139 }
1140
1141 static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
1142 {
1143 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1144
1145 /* For a kernel about to be kexec'd we want to keep the GPHY on for a
1146 * successful MDIO bus scan to occur. If we did turn off the GPHY
1147 * before (e.g: port_disable), this will also power it back on.
1148 *
1149 * Do not rely on kexec_in_progress, just power the PHY on.
1150 */
1151 if (priv->hw_params.num_gphy == 1)
1152 bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1153 }
1154
1155 #ifdef CONFIG_PM_SLEEP
1156 static int bcm_sf2_suspend(struct device *dev)
1157 {
1158 struct platform_device *pdev = to_platform_device(dev);
1159 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1160
1161 return dsa_switch_suspend(priv->dev->ds);
1162 }
1163
1164 static int bcm_sf2_resume(struct device *dev)
1165 {
1166 struct platform_device *pdev = to_platform_device(dev);
1167 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1168
1169 return dsa_switch_resume(priv->dev->ds);
1170 }
1171 #endif /* CONFIG_PM_SLEEP */
1172
1173 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
1174 bcm_sf2_suspend, bcm_sf2_resume);
1175
1176
1177 static struct platform_driver bcm_sf2_driver = {
1178 .probe = bcm_sf2_sw_probe,
1179 .remove = bcm_sf2_sw_remove,
1180 .shutdown = bcm_sf2_sw_shutdown,
1181 .driver = {
1182 .name = "brcm-sf2",
1183 .of_match_table = bcm_sf2_of_match,
1184 .pm = &bcm_sf2_pm_ops,
1185 },
1186 };
1187 module_platform_driver(bcm_sf2_driver);
1188
1189 MODULE_AUTHOR("Broadcom Corporation");
1190 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1191 MODULE_LICENSE("GPL");
1192 MODULE_ALIAS("platform:brcm-sf2");