]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/dsa/bcm_sf2.c
Merge remote-tracking branches 'asoc/topic/cs35l32', 'asoc/topic/cs35l34', 'asoc...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / dsa / bcm_sf2.c
1 /*
2 * Broadcom Starfighter 2 DSA switch driver
3 *
4 * Copyright (C) 2014, Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/phy.h>
18 #include <linux/phy_fixed.h>
19 #include <linux/mii.h>
20 #include <linux/of.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_address.h>
23 #include <linux/of_net.h>
24 #include <linux/of_mdio.h>
25 #include <net/dsa.h>
26 #include <linux/ethtool.h>
27 #include <linux/if_bridge.h>
28 #include <linux/brcmphy.h>
29 #include <linux/etherdevice.h>
30 #include <linux/platform_data/b53.h>
31
32 #include "bcm_sf2.h"
33 #include "bcm_sf2_regs.h"
34 #include "b53/b53_priv.h"
35 #include "b53/b53_regs.h"
36
37 static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds,
38 int port)
39 {
40 return DSA_TAG_PROTO_BRCM;
41 }
42
43 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
44 {
45 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
46 unsigned int i;
47 u32 reg, offset;
48
49 if (priv->type == BCM7445_DEVICE_ID)
50 offset = CORE_STS_OVERRIDE_IMP;
51 else
52 offset = CORE_STS_OVERRIDE_IMP2;
53
54 /* Enable the port memories */
55 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
56 reg &= ~P_TXQ_PSM_VDD(port);
57 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
58
59 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
60 reg = core_readl(priv, CORE_IMP_CTL);
61 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
62 reg &= ~(RX_DIS | TX_DIS);
63 core_writel(priv, reg, CORE_IMP_CTL);
64
65 /* Enable forwarding */
66 core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
67
68 /* Enable IMP port in dumb mode */
69 reg = core_readl(priv, CORE_SWITCH_CTRL);
70 reg |= MII_DUMB_FWDG_EN;
71 core_writel(priv, reg, CORE_SWITCH_CTRL);
72
73 /* Configure Traffic Class to QoS mapping, allow each priority to map
74 * to a different queue number
75 */
76 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
77 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
78 reg |= i << (PRT_TO_QID_SHIFT * i);
79 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
80
81 b53_brcm_hdr_setup(ds, port);
82
83 /* Force link status for IMP port */
84 reg = core_readl(priv, offset);
85 reg |= (MII_SW_OR | LINK_STS);
86 core_writel(priv, reg, offset);
87 }
88
89 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
90 {
91 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
92 u32 reg;
93
94 reg = reg_readl(priv, REG_SPHY_CNTRL);
95 if (enable) {
96 reg |= PHY_RESET;
97 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
98 reg_writel(priv, reg, REG_SPHY_CNTRL);
99 udelay(21);
100 reg = reg_readl(priv, REG_SPHY_CNTRL);
101 reg &= ~PHY_RESET;
102 } else {
103 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
104 reg_writel(priv, reg, REG_SPHY_CNTRL);
105 mdelay(1);
106 reg |= CK25_DIS;
107 }
108 reg_writel(priv, reg, REG_SPHY_CNTRL);
109
110 /* Use PHY-driven LED signaling */
111 if (!enable) {
112 reg = reg_readl(priv, REG_LED_CNTRL(0));
113 reg |= SPDLNK_SRC_SEL;
114 reg_writel(priv, reg, REG_LED_CNTRL(0));
115 }
116 }
117
118 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
119 int port)
120 {
121 unsigned int off;
122
123 switch (port) {
124 case 7:
125 off = P7_IRQ_OFF;
126 break;
127 case 0:
128 /* Port 0 interrupts are located on the first bank */
129 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
130 return;
131 default:
132 off = P_IRQ_OFF(port);
133 break;
134 }
135
136 intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
137 }
138
139 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
140 int port)
141 {
142 unsigned int off;
143
144 switch (port) {
145 case 7:
146 off = P7_IRQ_OFF;
147 break;
148 case 0:
149 /* Port 0 interrupts are located on the first bank */
150 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
151 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
152 return;
153 default:
154 off = P_IRQ_OFF(port);
155 break;
156 }
157
158 intrl2_1_mask_set(priv, P_IRQ_MASK(off));
159 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
160 }
161
162 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
163 struct phy_device *phy)
164 {
165 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
166 unsigned int i;
167 u32 reg;
168
169 /* Clear the memory power down */
170 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
171 reg &= ~P_TXQ_PSM_VDD(port);
172 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
173
174 /* Enable Broadcom tags for that port if requested */
175 if (priv->brcm_tag_mask & BIT(port))
176 b53_brcm_hdr_setup(ds, port);
177
178 /* Configure Traffic Class to QoS mapping, allow each priority to map
179 * to a different queue number
180 */
181 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
182 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
183 reg |= i << (PRT_TO_QID_SHIFT * i);
184 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
185
186 /* Re-enable the GPHY and re-apply workarounds */
187 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
188 bcm_sf2_gphy_enable_set(ds, true);
189 if (phy) {
190 /* if phy_stop() has been called before, phy
191 * will be in halted state, and phy_start()
192 * will call resume.
193 *
194 * the resume path does not configure back
195 * autoneg settings, and since we hard reset
196 * the phy manually here, we need to reset the
197 * state machine also.
198 */
199 phy->state = PHY_READY;
200 phy_init_hw(phy);
201 }
202 }
203
204 /* Enable MoCA port interrupts to get notified */
205 if (port == priv->moca_port)
206 bcm_sf2_port_intr_enable(priv, port);
207
208 /* Set per-queue pause threshold to 32 */
209 core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
210
211 /* Set ACB threshold to 24 */
212 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
213 reg = acb_readl(priv, ACB_QUEUE_CFG(port *
214 SF2_NUM_EGRESS_QUEUES + i));
215 reg &= ~XOFF_THRESHOLD_MASK;
216 reg |= 24;
217 acb_writel(priv, reg, ACB_QUEUE_CFG(port *
218 SF2_NUM_EGRESS_QUEUES + i));
219 }
220
221 return b53_enable_port(ds, port, phy);
222 }
223
224 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
225 struct phy_device *phy)
226 {
227 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
228 u32 off, reg;
229
230 if (priv->wol_ports_mask & (1 << port))
231 return;
232
233 if (port == priv->moca_port)
234 bcm_sf2_port_intr_disable(priv, port);
235
236 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
237 bcm_sf2_gphy_enable_set(ds, false);
238
239 if (dsa_is_cpu_port(ds, port))
240 off = CORE_IMP_CTL;
241 else
242 off = CORE_G_PCTL_PORT(port);
243
244 b53_disable_port(ds, port, phy);
245
246 /* Power down the port memory */
247 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
248 reg |= P_TXQ_PSM_VDD(port);
249 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
250 }
251
252
253 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
254 int regnum, u16 val)
255 {
256 int ret = 0;
257 u32 reg;
258
259 reg = reg_readl(priv, REG_SWITCH_CNTRL);
260 reg |= MDIO_MASTER_SEL;
261 reg_writel(priv, reg, REG_SWITCH_CNTRL);
262
263 /* Page << 8 | offset */
264 reg = 0x70;
265 reg <<= 2;
266 core_writel(priv, addr, reg);
267
268 /* Page << 8 | offset */
269 reg = 0x80 << 8 | regnum << 1;
270 reg <<= 2;
271
272 if (op)
273 ret = core_readl(priv, reg);
274 else
275 core_writel(priv, val, reg);
276
277 reg = reg_readl(priv, REG_SWITCH_CNTRL);
278 reg &= ~MDIO_MASTER_SEL;
279 reg_writel(priv, reg, REG_SWITCH_CNTRL);
280
281 return ret & 0xffff;
282 }
283
284 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
285 {
286 struct bcm_sf2_priv *priv = bus->priv;
287
288 /* Intercept reads from Broadcom pseudo-PHY address, else, send
289 * them to our master MDIO bus controller
290 */
291 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
292 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
293 else
294 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
295 }
296
297 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
298 u16 val)
299 {
300 struct bcm_sf2_priv *priv = bus->priv;
301
302 /* Intercept writes to the Broadcom pseudo-PHY address, else,
303 * send them to our master MDIO bus controller
304 */
305 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
306 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
307 else
308 mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val);
309
310 return 0;
311 }
312
313 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
314 {
315 struct bcm_sf2_priv *priv = dev_id;
316
317 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
318 ~priv->irq0_mask;
319 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
320
321 return IRQ_HANDLED;
322 }
323
324 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
325 {
326 struct bcm_sf2_priv *priv = dev_id;
327
328 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
329 ~priv->irq1_mask;
330 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
331
332 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
333 priv->port_sts[7].link = 1;
334 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
335 priv->port_sts[7].link = 0;
336
337 return IRQ_HANDLED;
338 }
339
340 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
341 {
342 unsigned int timeout = 1000;
343 u32 reg;
344
345 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
346 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
347 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
348
349 do {
350 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
351 if (!(reg & SOFTWARE_RESET))
352 break;
353
354 usleep_range(1000, 2000);
355 } while (timeout-- > 0);
356
357 if (timeout == 0)
358 return -ETIMEDOUT;
359
360 return 0;
361 }
362
363 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
364 {
365 intrl2_0_mask_set(priv, 0xffffffff);
366 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
367 intrl2_1_mask_set(priv, 0xffffffff);
368 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
369 }
370
371 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
372 struct device_node *dn)
373 {
374 struct device_node *port;
375 int mode;
376 unsigned int port_num;
377
378 priv->moca_port = -1;
379
380 for_each_available_child_of_node(dn, port) {
381 if (of_property_read_u32(port, "reg", &port_num))
382 continue;
383
384 /* Internal PHYs get assigned a specific 'phy-mode' property
385 * value: "internal" to help flag them before MDIO probing
386 * has completed, since they might be turned off at that
387 * time
388 */
389 mode = of_get_phy_mode(port);
390 if (mode < 0)
391 continue;
392
393 if (mode == PHY_INTERFACE_MODE_INTERNAL)
394 priv->int_phy_mask |= 1 << port_num;
395
396 if (mode == PHY_INTERFACE_MODE_MOCA)
397 priv->moca_port = port_num;
398
399 if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
400 priv->brcm_tag_mask |= 1 << port_num;
401 }
402 }
403
404 static int bcm_sf2_mdio_register(struct dsa_switch *ds)
405 {
406 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
407 struct device_node *dn;
408 static int index;
409 int err;
410
411 /* Find our integrated MDIO bus node */
412 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
413 priv->master_mii_bus = of_mdio_find_bus(dn);
414 if (!priv->master_mii_bus)
415 return -EPROBE_DEFER;
416
417 get_device(&priv->master_mii_bus->dev);
418 priv->master_mii_dn = dn;
419
420 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
421 if (!priv->slave_mii_bus)
422 return -ENOMEM;
423
424 priv->slave_mii_bus->priv = priv;
425 priv->slave_mii_bus->name = "sf2 slave mii";
426 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
427 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
428 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
429 index++);
430 priv->slave_mii_bus->dev.of_node = dn;
431
432 /* Include the pseudo-PHY address to divert reads towards our
433 * workaround. This is only required for 7445D0, since 7445E0
434 * disconnects the internal switch pseudo-PHY such that we can use the
435 * regular SWITCH_MDIO master controller instead.
436 *
437 * Here we flag the pseudo PHY as needing special treatment and would
438 * otherwise make all other PHY read/writes go to the master MDIO bus
439 * controller that comes with this switch backed by the "mdio-unimac"
440 * driver.
441 */
442 if (of_machine_is_compatible("brcm,bcm7445d0"))
443 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
444 else
445 priv->indir_phy_mask = 0;
446
447 ds->phys_mii_mask = priv->indir_phy_mask;
448 ds->slave_mii_bus = priv->slave_mii_bus;
449 priv->slave_mii_bus->parent = ds->dev->parent;
450 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
451
452 if (dn)
453 err = of_mdiobus_register(priv->slave_mii_bus, dn);
454 else
455 err = mdiobus_register(priv->slave_mii_bus);
456
457 if (err)
458 of_node_put(dn);
459
460 return err;
461 }
462
463 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
464 {
465 mdiobus_unregister(priv->slave_mii_bus);
466 if (priv->master_mii_dn)
467 of_node_put(priv->master_mii_dn);
468 }
469
470 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
471 {
472 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
473
474 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
475 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
476 * the REG_PHY_REVISION register layout is.
477 */
478
479 return priv->hw_params.gphy_rev;
480 }
481
482 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
483 struct phy_device *phydev)
484 {
485 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
486 struct ethtool_eee *p = &priv->dev->ports[port].eee;
487 u32 id_mode_dis = 0, port_mode;
488 const char *str = NULL;
489 u32 reg, offset;
490
491 if (priv->type == BCM7445_DEVICE_ID)
492 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
493 else
494 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
495
496 switch (phydev->interface) {
497 case PHY_INTERFACE_MODE_RGMII:
498 str = "RGMII (no delay)";
499 id_mode_dis = 1;
500 case PHY_INTERFACE_MODE_RGMII_TXID:
501 if (!str)
502 str = "RGMII (TX delay)";
503 port_mode = EXT_GPHY;
504 break;
505 case PHY_INTERFACE_MODE_MII:
506 str = "MII";
507 port_mode = EXT_EPHY;
508 break;
509 case PHY_INTERFACE_MODE_REVMII:
510 str = "Reverse MII";
511 port_mode = EXT_REVMII;
512 break;
513 default:
514 /* All other PHYs: internal and MoCA */
515 goto force_link;
516 }
517
518 /* If the link is down, just disable the interface to conserve power */
519 if (!phydev->link) {
520 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
521 reg &= ~RGMII_MODE_EN;
522 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
523 goto force_link;
524 }
525
526 /* Clear id_mode_dis bit, and the existing port mode, but
527 * make sure we enable the RGMII block for data to pass
528 */
529 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
530 reg &= ~ID_MODE_DIS;
531 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
532 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
533
534 reg |= port_mode | RGMII_MODE_EN;
535 if (id_mode_dis)
536 reg |= ID_MODE_DIS;
537
538 if (phydev->pause) {
539 if (phydev->asym_pause)
540 reg |= TX_PAUSE_EN;
541 reg |= RX_PAUSE_EN;
542 }
543
544 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
545
546 pr_info("Port %d configured for %s\n", port, str);
547
548 force_link:
549 /* Force link settings detected from the PHY */
550 reg = SW_OVERRIDE;
551 switch (phydev->speed) {
552 case SPEED_1000:
553 reg |= SPDSTS_1000 << SPEED_SHIFT;
554 break;
555 case SPEED_100:
556 reg |= SPDSTS_100 << SPEED_SHIFT;
557 break;
558 }
559
560 if (phydev->link)
561 reg |= LINK_STS;
562 if (phydev->duplex == DUPLEX_FULL)
563 reg |= DUPLX_MODE;
564
565 core_writel(priv, reg, offset);
566
567 if (!phydev->is_pseudo_fixed_link)
568 p->eee_enabled = b53_eee_init(ds, port, phydev);
569 }
570
571 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
572 struct fixed_phy_status *status)
573 {
574 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
575 u32 duplex, pause, offset;
576 u32 reg;
577
578 if (priv->type == BCM7445_DEVICE_ID)
579 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
580 else
581 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
582
583 duplex = core_readl(priv, CORE_DUPSTS);
584 pause = core_readl(priv, CORE_PAUSESTS);
585
586 status->link = 0;
587
588 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
589 * which means that we need to force the link at the port override
590 * level to get the data to flow. We do use what the interrupt handler
591 * did determine before.
592 *
593 * For the other ports, we just force the link status, since this is
594 * a fixed PHY device.
595 */
596 if (port == priv->moca_port) {
597 status->link = priv->port_sts[port].link;
598 /* For MoCA interfaces, also force a link down notification
599 * since some version of the user-space daemon (mocad) use
600 * cmd->autoneg to force the link, which messes up the PHY
601 * state machine and make it go in PHY_FORCING state instead.
602 */
603 if (!status->link)
604 netif_carrier_off(ds->ports[port].slave);
605 status->duplex = 1;
606 } else {
607 status->link = 1;
608 status->duplex = !!(duplex & (1 << port));
609 }
610
611 reg = core_readl(priv, offset);
612 reg |= SW_OVERRIDE;
613 if (status->link)
614 reg |= LINK_STS;
615 else
616 reg &= ~LINK_STS;
617 core_writel(priv, reg, offset);
618
619 if ((pause & (1 << port)) &&
620 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
621 status->asym_pause = 1;
622 status->pause = 1;
623 }
624
625 if (pause & (1 << port))
626 status->pause = 1;
627 }
628
629 static void bcm_sf2_enable_acb(struct dsa_switch *ds)
630 {
631 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
632 u32 reg;
633
634 /* Enable ACB globally */
635 reg = acb_readl(priv, ACB_CONTROL);
636 reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
637 acb_writel(priv, reg, ACB_CONTROL);
638 reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
639 reg |= ACB_EN | ACB_ALGORITHM;
640 acb_writel(priv, reg, ACB_CONTROL);
641 }
642
643 static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
644 {
645 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
646 unsigned int port;
647
648 bcm_sf2_intr_disable(priv);
649
650 /* Disable all ports physically present including the IMP
651 * port, the other ones have already been disabled during
652 * bcm_sf2_sw_setup
653 */
654 for (port = 0; port < DSA_MAX_PORTS; port++) {
655 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
656 bcm_sf2_port_disable(ds, port, NULL);
657 }
658
659 return 0;
660 }
661
662 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
663 {
664 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
665 unsigned int port;
666 int ret;
667
668 ret = bcm_sf2_sw_rst(priv);
669 if (ret) {
670 pr_err("%s: failed to software reset switch\n", __func__);
671 return ret;
672 }
673
674 if (priv->hw_params.num_gphy == 1)
675 bcm_sf2_gphy_enable_set(ds, true);
676
677 for (port = 0; port < DSA_MAX_PORTS; port++) {
678 if (dsa_is_user_port(ds, port))
679 bcm_sf2_port_setup(ds, port, NULL);
680 else if (dsa_is_cpu_port(ds, port))
681 bcm_sf2_imp_setup(ds, port);
682 }
683
684 bcm_sf2_enable_acb(ds);
685
686 return 0;
687 }
688
689 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
690 struct ethtool_wolinfo *wol)
691 {
692 struct net_device *p = ds->ports[port].cpu_dp->master;
693 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
694 struct ethtool_wolinfo pwol;
695
696 /* Get the parent device WoL settings */
697 p->ethtool_ops->get_wol(p, &pwol);
698
699 /* Advertise the parent device supported settings */
700 wol->supported = pwol.supported;
701 memset(&wol->sopass, 0, sizeof(wol->sopass));
702
703 if (pwol.wolopts & WAKE_MAGICSECURE)
704 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
705
706 if (priv->wol_ports_mask & (1 << port))
707 wol->wolopts = pwol.wolopts;
708 else
709 wol->wolopts = 0;
710 }
711
712 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
713 struct ethtool_wolinfo *wol)
714 {
715 struct net_device *p = ds->ports[port].cpu_dp->master;
716 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
717 s8 cpu_port = ds->ports[port].cpu_dp->index;
718 struct ethtool_wolinfo pwol;
719
720 p->ethtool_ops->get_wol(p, &pwol);
721 if (wol->wolopts & ~pwol.supported)
722 return -EINVAL;
723
724 if (wol->wolopts)
725 priv->wol_ports_mask |= (1 << port);
726 else
727 priv->wol_ports_mask &= ~(1 << port);
728
729 /* If we have at least one port enabled, make sure the CPU port
730 * is also enabled. If the CPU port is the last one enabled, we disable
731 * it since this configuration does not make sense.
732 */
733 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
734 priv->wol_ports_mask |= (1 << cpu_port);
735 else
736 priv->wol_ports_mask &= ~(1 << cpu_port);
737
738 return p->ethtool_ops->set_wol(p, wol);
739 }
740
741 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
742 {
743 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
744 unsigned int port;
745
746 /* Enable all valid ports and disable those unused */
747 for (port = 0; port < priv->hw_params.num_ports; port++) {
748 /* IMP port receives special treatment */
749 if (dsa_is_user_port(ds, port))
750 bcm_sf2_port_setup(ds, port, NULL);
751 else if (dsa_is_cpu_port(ds, port))
752 bcm_sf2_imp_setup(ds, port);
753 else
754 bcm_sf2_port_disable(ds, port, NULL);
755 }
756
757 b53_configure_vlan(ds);
758 bcm_sf2_enable_acb(ds);
759
760 return 0;
761 }
762
763 /* The SWITCH_CORE register space is managed by b53 but operates on a page +
764 * register basis so we need to translate that into an address that the
765 * bus-glue understands.
766 */
767 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2)
768
769 static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
770 u8 *val)
771 {
772 struct bcm_sf2_priv *priv = dev->priv;
773
774 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
775
776 return 0;
777 }
778
779 static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
780 u16 *val)
781 {
782 struct bcm_sf2_priv *priv = dev->priv;
783
784 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
785
786 return 0;
787 }
788
789 static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
790 u32 *val)
791 {
792 struct bcm_sf2_priv *priv = dev->priv;
793
794 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
795
796 return 0;
797 }
798
799 static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
800 u64 *val)
801 {
802 struct bcm_sf2_priv *priv = dev->priv;
803
804 *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
805
806 return 0;
807 }
808
809 static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
810 u8 value)
811 {
812 struct bcm_sf2_priv *priv = dev->priv;
813
814 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
815
816 return 0;
817 }
818
819 static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
820 u16 value)
821 {
822 struct bcm_sf2_priv *priv = dev->priv;
823
824 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
825
826 return 0;
827 }
828
829 static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
830 u32 value)
831 {
832 struct bcm_sf2_priv *priv = dev->priv;
833
834 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
835
836 return 0;
837 }
838
839 static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
840 u64 value)
841 {
842 struct bcm_sf2_priv *priv = dev->priv;
843
844 core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
845
846 return 0;
847 }
848
849 static const struct b53_io_ops bcm_sf2_io_ops = {
850 .read8 = bcm_sf2_core_read8,
851 .read16 = bcm_sf2_core_read16,
852 .read32 = bcm_sf2_core_read32,
853 .read48 = bcm_sf2_core_read64,
854 .read64 = bcm_sf2_core_read64,
855 .write8 = bcm_sf2_core_write8,
856 .write16 = bcm_sf2_core_write16,
857 .write32 = bcm_sf2_core_write32,
858 .write48 = bcm_sf2_core_write64,
859 .write64 = bcm_sf2_core_write64,
860 };
861
862 static const struct dsa_switch_ops bcm_sf2_ops = {
863 .get_tag_protocol = bcm_sf2_sw_get_tag_protocol,
864 .setup = bcm_sf2_sw_setup,
865 .get_strings = b53_get_strings,
866 .get_ethtool_stats = b53_get_ethtool_stats,
867 .get_sset_count = b53_get_sset_count,
868 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
869 .adjust_link = bcm_sf2_sw_adjust_link,
870 .fixed_link_update = bcm_sf2_sw_fixed_link_update,
871 .suspend = bcm_sf2_sw_suspend,
872 .resume = bcm_sf2_sw_resume,
873 .get_wol = bcm_sf2_sw_get_wol,
874 .set_wol = bcm_sf2_sw_set_wol,
875 .port_enable = bcm_sf2_port_setup,
876 .port_disable = bcm_sf2_port_disable,
877 .get_mac_eee = b53_get_mac_eee,
878 .set_mac_eee = b53_set_mac_eee,
879 .port_bridge_join = b53_br_join,
880 .port_bridge_leave = b53_br_leave,
881 .port_stp_state_set = b53_br_set_stp_state,
882 .port_fast_age = b53_br_fast_age,
883 .port_vlan_filtering = b53_vlan_filtering,
884 .port_vlan_prepare = b53_vlan_prepare,
885 .port_vlan_add = b53_vlan_add,
886 .port_vlan_del = b53_vlan_del,
887 .port_fdb_dump = b53_fdb_dump,
888 .port_fdb_add = b53_fdb_add,
889 .port_fdb_del = b53_fdb_del,
890 .get_rxnfc = bcm_sf2_get_rxnfc,
891 .set_rxnfc = bcm_sf2_set_rxnfc,
892 .port_mirror_add = b53_mirror_add,
893 .port_mirror_del = b53_mirror_del,
894 };
895
896 struct bcm_sf2_of_data {
897 u32 type;
898 const u16 *reg_offsets;
899 unsigned int core_reg_align;
900 unsigned int num_cfp_rules;
901 };
902
903 /* Register offsets for the SWITCH_REG_* block */
904 static const u16 bcm_sf2_7445_reg_offsets[] = {
905 [REG_SWITCH_CNTRL] = 0x00,
906 [REG_SWITCH_STATUS] = 0x04,
907 [REG_DIR_DATA_WRITE] = 0x08,
908 [REG_DIR_DATA_READ] = 0x0C,
909 [REG_SWITCH_REVISION] = 0x18,
910 [REG_PHY_REVISION] = 0x1C,
911 [REG_SPHY_CNTRL] = 0x2C,
912 [REG_RGMII_0_CNTRL] = 0x34,
913 [REG_RGMII_1_CNTRL] = 0x40,
914 [REG_RGMII_2_CNTRL] = 0x4c,
915 [REG_LED_0_CNTRL] = 0x90,
916 [REG_LED_1_CNTRL] = 0x94,
917 [REG_LED_2_CNTRL] = 0x98,
918 };
919
920 static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
921 .type = BCM7445_DEVICE_ID,
922 .core_reg_align = 0,
923 .reg_offsets = bcm_sf2_7445_reg_offsets,
924 .num_cfp_rules = 256,
925 };
926
927 static const u16 bcm_sf2_7278_reg_offsets[] = {
928 [REG_SWITCH_CNTRL] = 0x00,
929 [REG_SWITCH_STATUS] = 0x04,
930 [REG_DIR_DATA_WRITE] = 0x08,
931 [REG_DIR_DATA_READ] = 0x0c,
932 [REG_SWITCH_REVISION] = 0x10,
933 [REG_PHY_REVISION] = 0x14,
934 [REG_SPHY_CNTRL] = 0x24,
935 [REG_RGMII_0_CNTRL] = 0xe0,
936 [REG_RGMII_1_CNTRL] = 0xec,
937 [REG_RGMII_2_CNTRL] = 0xf8,
938 [REG_LED_0_CNTRL] = 0x40,
939 [REG_LED_1_CNTRL] = 0x4c,
940 [REG_LED_2_CNTRL] = 0x58,
941 };
942
943 static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
944 .type = BCM7278_DEVICE_ID,
945 .core_reg_align = 1,
946 .reg_offsets = bcm_sf2_7278_reg_offsets,
947 .num_cfp_rules = 128,
948 };
949
950 static const struct of_device_id bcm_sf2_of_match[] = {
951 { .compatible = "brcm,bcm7445-switch-v4.0",
952 .data = &bcm_sf2_7445_data
953 },
954 { .compatible = "brcm,bcm7278-switch-v4.0",
955 .data = &bcm_sf2_7278_data
956 },
957 { /* sentinel */ },
958 };
959 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
960
961 static int bcm_sf2_sw_probe(struct platform_device *pdev)
962 {
963 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
964 struct device_node *dn = pdev->dev.of_node;
965 const struct of_device_id *of_id = NULL;
966 const struct bcm_sf2_of_data *data;
967 struct b53_platform_data *pdata;
968 struct dsa_switch_ops *ops;
969 struct bcm_sf2_priv *priv;
970 struct b53_device *dev;
971 struct dsa_switch *ds;
972 void __iomem **base;
973 struct resource *r;
974 unsigned int i;
975 u32 reg, rev;
976 int ret;
977
978 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
979 if (!priv)
980 return -ENOMEM;
981
982 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
983 if (!ops)
984 return -ENOMEM;
985
986 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
987 if (!dev)
988 return -ENOMEM;
989
990 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
991 if (!pdata)
992 return -ENOMEM;
993
994 of_id = of_match_node(bcm_sf2_of_match, dn);
995 if (!of_id || !of_id->data)
996 return -EINVAL;
997
998 data = of_id->data;
999
1000 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
1001 priv->type = data->type;
1002 priv->reg_offsets = data->reg_offsets;
1003 priv->core_reg_align = data->core_reg_align;
1004 priv->num_cfp_rules = data->num_cfp_rules;
1005
1006 /* Auto-detection using standard registers will not work, so
1007 * provide an indication of what kind of device we are for
1008 * b53_common to work with
1009 */
1010 pdata->chip_id = priv->type;
1011 dev->pdata = pdata;
1012
1013 priv->dev = dev;
1014 ds = dev->ds;
1015 ds->ops = &bcm_sf2_ops;
1016
1017 /* Advertise the 8 egress queues */
1018 ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
1019
1020 dev_set_drvdata(&pdev->dev, priv);
1021
1022 spin_lock_init(&priv->indir_lock);
1023 mutex_init(&priv->stats_mutex);
1024 mutex_init(&priv->cfp.lock);
1025
1026 /* CFP rule #0 cannot be used for specific classifications, flag it as
1027 * permanently used
1028 */
1029 set_bit(0, priv->cfp.used);
1030 set_bit(0, priv->cfp.unique);
1031
1032 bcm_sf2_identify_ports(priv, dn->child);
1033
1034 priv->irq0 = irq_of_parse_and_map(dn, 0);
1035 priv->irq1 = irq_of_parse_and_map(dn, 1);
1036
1037 base = &priv->core;
1038 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1039 r = platform_get_resource(pdev, IORESOURCE_MEM, i);
1040 *base = devm_ioremap_resource(&pdev->dev, r);
1041 if (IS_ERR(*base)) {
1042 pr_err("unable to find register: %s\n", reg_names[i]);
1043 return PTR_ERR(*base);
1044 }
1045 base++;
1046 }
1047
1048 ret = bcm_sf2_sw_rst(priv);
1049 if (ret) {
1050 pr_err("unable to software reset switch: %d\n", ret);
1051 return ret;
1052 }
1053
1054 ret = bcm_sf2_mdio_register(ds);
1055 if (ret) {
1056 pr_err("failed to register MDIO bus\n");
1057 return ret;
1058 }
1059
1060 ret = bcm_sf2_cfp_rst(priv);
1061 if (ret) {
1062 pr_err("failed to reset CFP\n");
1063 goto out_mdio;
1064 }
1065
1066 /* Disable all interrupts and request them */
1067 bcm_sf2_intr_disable(priv);
1068
1069 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
1070 "switch_0", priv);
1071 if (ret < 0) {
1072 pr_err("failed to request switch_0 IRQ\n");
1073 goto out_mdio;
1074 }
1075
1076 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
1077 "switch_1", priv);
1078 if (ret < 0) {
1079 pr_err("failed to request switch_1 IRQ\n");
1080 goto out_mdio;
1081 }
1082
1083 /* Reset the MIB counters */
1084 reg = core_readl(priv, CORE_GMNCFGCFG);
1085 reg |= RST_MIB_CNT;
1086 core_writel(priv, reg, CORE_GMNCFGCFG);
1087 reg &= ~RST_MIB_CNT;
1088 core_writel(priv, reg, CORE_GMNCFGCFG);
1089
1090 /* Get the maximum number of ports for this switch */
1091 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1092 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1093 priv->hw_params.num_ports = DSA_MAX_PORTS;
1094
1095 /* Assume a single GPHY setup if we can't read that property */
1096 if (of_property_read_u32(dn, "brcm,num-gphy",
1097 &priv->hw_params.num_gphy))
1098 priv->hw_params.num_gphy = 1;
1099
1100 rev = reg_readl(priv, REG_SWITCH_REVISION);
1101 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1102 SWITCH_TOP_REV_MASK;
1103 priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1104
1105 rev = reg_readl(priv, REG_PHY_REVISION);
1106 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1107
1108 ret = b53_switch_register(dev);
1109 if (ret)
1110 goto out_mdio;
1111
1112 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1113 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1114 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1115 priv->core, priv->irq0, priv->irq1);
1116
1117 return 0;
1118
1119 out_mdio:
1120 bcm_sf2_mdio_unregister(priv);
1121 return ret;
1122 }
1123
1124 static int bcm_sf2_sw_remove(struct platform_device *pdev)
1125 {
1126 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1127
1128 /* Disable all ports and interrupts */
1129 priv->wol_ports_mask = 0;
1130 bcm_sf2_sw_suspend(priv->dev->ds);
1131 dsa_unregister_switch(priv->dev->ds);
1132 bcm_sf2_mdio_unregister(priv);
1133
1134 return 0;
1135 }
1136
1137 static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
1138 {
1139 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1140
1141 /* For a kernel about to be kexec'd we want to keep the GPHY on for a
1142 * successful MDIO bus scan to occur. If we did turn off the GPHY
1143 * before (e.g: port_disable), this will also power it back on.
1144 *
1145 * Do not rely on kexec_in_progress, just power the PHY on.
1146 */
1147 if (priv->hw_params.num_gphy == 1)
1148 bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1149 }
1150
1151 #ifdef CONFIG_PM_SLEEP
1152 static int bcm_sf2_suspend(struct device *dev)
1153 {
1154 struct platform_device *pdev = to_platform_device(dev);
1155 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1156
1157 return dsa_switch_suspend(priv->dev->ds);
1158 }
1159
1160 static int bcm_sf2_resume(struct device *dev)
1161 {
1162 struct platform_device *pdev = to_platform_device(dev);
1163 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1164
1165 return dsa_switch_resume(priv->dev->ds);
1166 }
1167 #endif /* CONFIG_PM_SLEEP */
1168
1169 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
1170 bcm_sf2_suspend, bcm_sf2_resume);
1171
1172
1173 static struct platform_driver bcm_sf2_driver = {
1174 .probe = bcm_sf2_sw_probe,
1175 .remove = bcm_sf2_sw_remove,
1176 .shutdown = bcm_sf2_sw_shutdown,
1177 .driver = {
1178 .name = "brcm-sf2",
1179 .of_match_table = bcm_sf2_of_match,
1180 .pm = &bcm_sf2_pm_ops,
1181 },
1182 };
1183 module_platform_driver(bcm_sf2_driver);
1184
1185 MODULE_AUTHOR("Broadcom Corporation");
1186 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1187 MODULE_LICENSE("GPL");
1188 MODULE_ALIAS("platform:brcm-sf2");