]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/dsa/mv88e6xxx.c
net: dsa: mv88e6xxx: flush ATU on initial setup
[mirror_ubuntu-bionic-kernel.git] / drivers / net / dsa / mv88e6xxx.c
CommitLineData
91da11f8
LB
1/*
2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
4 *
b8fee957
VD
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
7 *
91da11f8
LB
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
87c8cefb 14#include <linux/debugfs.h>
19b2f97e 15#include <linux/delay.h>
defb05b9 16#include <linux/etherdevice.h>
dea87024 17#include <linux/ethtool.h>
facd95b2 18#include <linux/if_bridge.h>
19b2f97e 19#include <linux/jiffies.h>
91da11f8 20#include <linux/list.h>
2bbba277 21#include <linux/module.h>
91da11f8
LB
22#include <linux/netdevice.h>
23#include <linux/phy.h>
87c8cefb 24#include <linux/seq_file.h>
c8f0b869 25#include <net/dsa.h>
91da11f8
LB
26#include "mv88e6xxx.h"
27
16fe24fc
AL
28/* MDIO bus access can be nested in the case of PHYs connected to the
29 * internal MDIO bus of the switch, which is accessed via MDIO bus of
30 * the Ethernet interface. Avoid lockdep false positives by using
31 * mutex_lock_nested().
32 */
33static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
34{
35 int ret;
36
37 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
38 ret = bus->read(bus, addr, regnum);
39 mutex_unlock(&bus->mdio_lock);
40
41 return ret;
42}
43
44static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
45 u16 val)
46{
47 int ret;
48
49 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
50 ret = bus->write(bus, addr, regnum, val);
51 mutex_unlock(&bus->mdio_lock);
52
53 return ret;
54}
55
3675c8d7 56/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
91da11f8
LB
57 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
58 * will be directly accessible on some {device address,register address}
59 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
60 * will only respond to SMI transactions to that specific address, and
61 * an indirect addressing mechanism needs to be used to access its
62 * registers.
63 */
64static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
65{
66 int ret;
67 int i;
68
69 for (i = 0; i < 16; i++) {
16fe24fc 70 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
91da11f8
LB
71 if (ret < 0)
72 return ret;
73
cca8b133 74 if ((ret & SMI_CMD_BUSY) == 0)
91da11f8
LB
75 return 0;
76 }
77
78 return -ETIMEDOUT;
79}
80
81int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
82{
83 int ret;
84
85 if (sw_addr == 0)
16fe24fc 86 return mv88e6xxx_mdiobus_read(bus, addr, reg);
91da11f8 87
3675c8d7 88 /* Wait for the bus to become free. */
91da11f8
LB
89 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
90 if (ret < 0)
91 return ret;
92
3675c8d7 93 /* Transmit the read command. */
16fe24fc
AL
94 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
95 SMI_CMD_OP_22_READ | (addr << 5) | reg);
91da11f8
LB
96 if (ret < 0)
97 return ret;
98
3675c8d7 99 /* Wait for the read command to complete. */
91da11f8
LB
100 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
101 if (ret < 0)
102 return ret;
103
3675c8d7 104 /* Read the data. */
16fe24fc 105 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
91da11f8
LB
106 if (ret < 0)
107 return ret;
108
109 return ret & 0xffff;
110}
111
8d6d09e7
GR
112/* Must be called with SMI mutex held */
113static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
91da11f8 114{
b184e497 115 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
91da11f8
LB
116 int ret;
117
b184e497
GR
118 if (bus == NULL)
119 return -EINVAL;
120
b184e497 121 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
bb92ea5e
VD
122 if (ret < 0)
123 return ret;
124
125 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
126 addr, reg, ret);
127
91da11f8
LB
128 return ret;
129}
130
8d6d09e7
GR
131int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
132{
133 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
134 int ret;
135
136 mutex_lock(&ps->smi_mutex);
137 ret = _mv88e6xxx_reg_read(ds, addr, reg);
138 mutex_unlock(&ps->smi_mutex);
139
140 return ret;
141}
142
91da11f8
LB
143int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
144 int reg, u16 val)
145{
146 int ret;
147
148 if (sw_addr == 0)
16fe24fc 149 return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
91da11f8 150
3675c8d7 151 /* Wait for the bus to become free. */
91da11f8
LB
152 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
153 if (ret < 0)
154 return ret;
155
3675c8d7 156 /* Transmit the data to write. */
16fe24fc 157 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
91da11f8
LB
158 if (ret < 0)
159 return ret;
160
3675c8d7 161 /* Transmit the write command. */
16fe24fc
AL
162 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
163 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
91da11f8
LB
164 if (ret < 0)
165 return ret;
166
3675c8d7 167 /* Wait for the write command to complete. */
91da11f8
LB
168 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
169 if (ret < 0)
170 return ret;
171
172 return 0;
173}
174
8d6d09e7
GR
175/* Must be called with SMI mutex held */
176static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
177 u16 val)
91da11f8 178{
b184e497 179 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
91da11f8 180
b184e497
GR
181 if (bus == NULL)
182 return -EINVAL;
183
bb92ea5e
VD
184 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
185 addr, reg, val);
186
8d6d09e7
GR
187 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
188}
189
190int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
191{
192 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
193 int ret;
194
91da11f8 195 mutex_lock(&ps->smi_mutex);
8d6d09e7 196 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
91da11f8
LB
197 mutex_unlock(&ps->smi_mutex);
198
199 return ret;
200}
201
2e5f0320
LB
202int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
203{
cca8b133
AL
204 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
205 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
206 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
2e5f0320
LB
207
208 return 0;
209}
210
91da11f8
LB
211int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
212{
213 int i;
214 int ret;
215
216 for (i = 0; i < 6; i++) {
217 int j;
218
3675c8d7 219 /* Write the MAC address byte. */
cca8b133
AL
220 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
221 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
91da11f8 222
3675c8d7 223 /* Wait for the write to complete. */
91da11f8 224 for (j = 0; j < 16; j++) {
cca8b133
AL
225 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
226 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
91da11f8
LB
227 break;
228 }
229 if (j == 16)
230 return -ETIMEDOUT;
231 }
232
233 return 0;
234}
235
3898c148 236/* Must be called with SMI mutex held */
fd3a0ee4 237static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
91da11f8
LB
238{
239 if (addr >= 0)
3898c148 240 return _mv88e6xxx_reg_read(ds, addr, regnum);
91da11f8
LB
241 return 0xffff;
242}
243
3898c148 244/* Must be called with SMI mutex held */
fd3a0ee4
AL
245static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
246 u16 val)
91da11f8
LB
247{
248 if (addr >= 0)
3898c148 249 return _mv88e6xxx_reg_write(ds, addr, regnum, val);
91da11f8
LB
250 return 0;
251}
252
2e5f0320
LB
253#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
254static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
255{
256 int ret;
19b2f97e 257 unsigned long timeout;
2e5f0320 258
cca8b133
AL
259 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
260 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
261 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
2e5f0320 262
19b2f97e
BG
263 timeout = jiffies + 1 * HZ;
264 while (time_before(jiffies, timeout)) {
cca8b133 265 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
19b2f97e 266 usleep_range(1000, 2000);
cca8b133
AL
267 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
268 GLOBAL_STATUS_PPU_POLLING)
85686581 269 return 0;
2e5f0320
LB
270 }
271
272 return -ETIMEDOUT;
273}
274
275static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
276{
277 int ret;
19b2f97e 278 unsigned long timeout;
2e5f0320 279
cca8b133
AL
280 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
281 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
2e5f0320 282
19b2f97e
BG
283 timeout = jiffies + 1 * HZ;
284 while (time_before(jiffies, timeout)) {
cca8b133 285 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
19b2f97e 286 usleep_range(1000, 2000);
cca8b133
AL
287 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
288 GLOBAL_STATUS_PPU_POLLING)
85686581 289 return 0;
2e5f0320
LB
290 }
291
292 return -ETIMEDOUT;
293}
294
295static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
296{
297 struct mv88e6xxx_priv_state *ps;
298
299 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
300 if (mutex_trylock(&ps->ppu_mutex)) {
85686581 301 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
2e5f0320 302
85686581
BG
303 if (mv88e6xxx_ppu_enable(ds) == 0)
304 ps->ppu_disabled = 0;
305 mutex_unlock(&ps->ppu_mutex);
2e5f0320
LB
306 }
307}
308
309static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
310{
311 struct mv88e6xxx_priv_state *ps = (void *)_ps;
312
313 schedule_work(&ps->ppu_work);
314}
315
316static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
317{
a22adce5 318 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2e5f0320
LB
319 int ret;
320
321 mutex_lock(&ps->ppu_mutex);
322
3675c8d7 323 /* If the PHY polling unit is enabled, disable it so that
2e5f0320
LB
324 * we can access the PHY registers. If it was already
325 * disabled, cancel the timer that is going to re-enable
326 * it.
327 */
328 if (!ps->ppu_disabled) {
85686581
BG
329 ret = mv88e6xxx_ppu_disable(ds);
330 if (ret < 0) {
331 mutex_unlock(&ps->ppu_mutex);
332 return ret;
333 }
334 ps->ppu_disabled = 1;
2e5f0320 335 } else {
85686581
BG
336 del_timer(&ps->ppu_timer);
337 ret = 0;
2e5f0320
LB
338 }
339
340 return ret;
341}
342
343static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
344{
a22adce5 345 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2e5f0320 346
3675c8d7 347 /* Schedule a timer to re-enable the PHY polling unit. */
2e5f0320
LB
348 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
349 mutex_unlock(&ps->ppu_mutex);
350}
351
352void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
353{
a22adce5 354 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2e5f0320
LB
355
356 mutex_init(&ps->ppu_mutex);
357 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
358 init_timer(&ps->ppu_timer);
359 ps->ppu_timer.data = (unsigned long)ps;
360 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
361}
362
363int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
364{
365 int ret;
366
367 ret = mv88e6xxx_ppu_access_get(ds);
368 if (ret >= 0) {
85686581
BG
369 ret = mv88e6xxx_reg_read(ds, addr, regnum);
370 mv88e6xxx_ppu_access_put(ds);
2e5f0320
LB
371 }
372
373 return ret;
374}
375
376int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
377 int regnum, u16 val)
378{
379 int ret;
380
381 ret = mv88e6xxx_ppu_access_get(ds);
382 if (ret >= 0) {
85686581
BG
383 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
384 mv88e6xxx_ppu_access_put(ds);
2e5f0320
LB
385 }
386
387 return ret;
388}
389#endif
390
91da11f8
LB
391void mv88e6xxx_poll_link(struct dsa_switch *ds)
392{
393 int i;
394
395 for (i = 0; i < DSA_MAX_PORTS; i++) {
396 struct net_device *dev;
2a9e7978 397 int uninitialized_var(port_status);
8b59d19e 398 int pcs_ctrl;
91da11f8
LB
399 int link;
400 int speed;
401 int duplex;
402 int fc;
403
404 dev = ds->ports[i];
405 if (dev == NULL)
406 continue;
407
8b59d19e
AL
408 pcs_ctrl = mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_PCS_CTRL);
409 if (pcs_ctrl < 0 || pcs_ctrl & PORT_PCS_CTRL_FORCE_LINK)
410 continue;
411
91da11f8
LB
412 link = 0;
413 if (dev->flags & IFF_UP) {
cca8b133
AL
414 port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
415 PORT_STATUS);
91da11f8
LB
416 if (port_status < 0)
417 continue;
418
cca8b133 419 link = !!(port_status & PORT_STATUS_LINK);
91da11f8
LB
420 }
421
422 if (!link) {
423 if (netif_carrier_ok(dev)) {
ab381a93 424 netdev_info(dev, "link down\n");
91da11f8
LB
425 netif_carrier_off(dev);
426 }
427 continue;
428 }
429
cca8b133
AL
430 switch (port_status & PORT_STATUS_SPEED_MASK) {
431 case PORT_STATUS_SPEED_10:
91da11f8
LB
432 speed = 10;
433 break;
cca8b133 434 case PORT_STATUS_SPEED_100:
91da11f8
LB
435 speed = 100;
436 break;
cca8b133 437 case PORT_STATUS_SPEED_1000:
91da11f8
LB
438 speed = 1000;
439 break;
440 default:
441 speed = -1;
442 break;
443 }
cca8b133
AL
444 duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
445 fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
91da11f8
LB
446
447 if (!netif_carrier_ok(dev)) {
ab381a93
BG
448 netdev_info(dev,
449 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
450 speed,
451 duplex ? "full" : "half",
452 fc ? "en" : "dis");
91da11f8
LB
453 netif_carrier_on(dev);
454 }
455 }
456}
457
54d792f2
AL
458static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
459{
460 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
461
462 switch (ps->id) {
463 case PORT_SWITCH_ID_6031:
464 case PORT_SWITCH_ID_6061:
465 case PORT_SWITCH_ID_6035:
466 case PORT_SWITCH_ID_6065:
467 return true;
468 }
469 return false;
470}
471
472static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
473{
474 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
475
476 switch (ps->id) {
477 case PORT_SWITCH_ID_6092:
478 case PORT_SWITCH_ID_6095:
479 return true;
480 }
481 return false;
482}
483
484static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
485{
486 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
487
488 switch (ps->id) {
489 case PORT_SWITCH_ID_6046:
490 case PORT_SWITCH_ID_6085:
491 case PORT_SWITCH_ID_6096:
492 case PORT_SWITCH_ID_6097:
493 return true;
494 }
495 return false;
496}
497
498static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
499{
500 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
501
502 switch (ps->id) {
503 case PORT_SWITCH_ID_6123:
504 case PORT_SWITCH_ID_6161:
505 case PORT_SWITCH_ID_6165:
506 return true;
507 }
508 return false;
509}
510
511static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
512{
513 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
514
515 switch (ps->id) {
516 case PORT_SWITCH_ID_6121:
517 case PORT_SWITCH_ID_6122:
518 case PORT_SWITCH_ID_6152:
519 case PORT_SWITCH_ID_6155:
520 case PORT_SWITCH_ID_6182:
521 case PORT_SWITCH_ID_6185:
522 case PORT_SWITCH_ID_6108:
523 case PORT_SWITCH_ID_6131:
524 return true;
525 }
526 return false;
527}
528
c22995c5 529static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
7c3d0d67
AK
530{
531 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
532
533 switch (ps->id) {
534 case PORT_SWITCH_ID_6320:
535 case PORT_SWITCH_ID_6321:
536 return true;
537 }
538 return false;
539}
540
54d792f2
AL
541static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
542{
543 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
544
545 switch (ps->id) {
546 case PORT_SWITCH_ID_6171:
547 case PORT_SWITCH_ID_6175:
548 case PORT_SWITCH_ID_6350:
549 case PORT_SWITCH_ID_6351:
550 return true;
551 }
552 return false;
553}
554
f3a8b6b6
AL
555static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
556{
557 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
558
559 switch (ps->id) {
f3a8b6b6
AL
560 case PORT_SWITCH_ID_6172:
561 case PORT_SWITCH_ID_6176:
54d792f2
AL
562 case PORT_SWITCH_ID_6240:
563 case PORT_SWITCH_ID_6352:
f3a8b6b6
AL
564 return true;
565 }
566 return false;
567}
568
dea87024
AL
569/* We expect the switch to perform auto negotiation if there is a real
570 * phy. However, in the case of a fixed link phy, we force the port
571 * settings from the fixed link settings.
572 */
573void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
574 struct phy_device *phydev)
575{
576 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
577 u32 ret, reg;
578
579 if (!phy_is_pseudo_fixed_link(phydev))
580 return;
581
582 mutex_lock(&ps->smi_mutex);
583
584 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
585 if (ret < 0)
586 goto out;
587
588 reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
589 PORT_PCS_CTRL_FORCE_LINK |
590 PORT_PCS_CTRL_DUPLEX_FULL |
591 PORT_PCS_CTRL_FORCE_DUPLEX |
592 PORT_PCS_CTRL_UNFORCED);
593
594 reg |= PORT_PCS_CTRL_FORCE_LINK;
595 if (phydev->link)
596 reg |= PORT_PCS_CTRL_LINK_UP;
597
598 if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
599 goto out;
600
601 switch (phydev->speed) {
602 case SPEED_1000:
603 reg |= PORT_PCS_CTRL_1000;
604 break;
605 case SPEED_100:
606 reg |= PORT_PCS_CTRL_100;
607 break;
608 case SPEED_10:
609 reg |= PORT_PCS_CTRL_10;
610 break;
611 default:
612 pr_info("Unknown speed");
613 goto out;
614 }
615
616 reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
617 if (phydev->duplex == DUPLEX_FULL)
618 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
619
e7e72ac0
AL
620 if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
621 (port >= ps->num_ports - 2)) {
622 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
623 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
624 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
625 reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
626 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
627 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
628 PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
629 }
dea87024
AL
630 _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
631
632out:
633 mutex_unlock(&ps->smi_mutex);
634}
635
31888234
AL
636/* Must be called with SMI mutex held */
637static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
91da11f8
LB
638{
639 int ret;
640 int i;
641
642 for (i = 0; i < 10; i++) {
31888234 643 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
cca8b133 644 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
91da11f8
LB
645 return 0;
646 }
647
648 return -ETIMEDOUT;
649}
650
31888234
AL
651/* Must be called with SMI mutex held */
652static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
91da11f8
LB
653{
654 int ret;
655
7c3d0d67 656 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
f3a8b6b6
AL
657 port = (port + 1) << 5;
658
3675c8d7 659 /* Snapshot the hardware statistics counters for this port. */
31888234
AL
660 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
661 GLOBAL_STATS_OP_CAPTURE_PORT |
662 GLOBAL_STATS_OP_HIST_RX_TX | port);
663 if (ret < 0)
664 return ret;
91da11f8 665
3675c8d7 666 /* Wait for the snapshotting to complete. */
31888234 667 ret = _mv88e6xxx_stats_wait(ds);
91da11f8
LB
668 if (ret < 0)
669 return ret;
670
671 return 0;
672}
673
31888234
AL
674/* Must be called with SMI mutex held */
675static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
91da11f8
LB
676{
677 u32 _val;
678 int ret;
679
680 *val = 0;
681
31888234
AL
682 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
683 GLOBAL_STATS_OP_READ_CAPTURED |
684 GLOBAL_STATS_OP_HIST_RX_TX | stat);
91da11f8
LB
685 if (ret < 0)
686 return;
687
31888234 688 ret = _mv88e6xxx_stats_wait(ds);
91da11f8
LB
689 if (ret < 0)
690 return;
691
31888234 692 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
91da11f8
LB
693 if (ret < 0)
694 return;
695
696 _val = ret << 16;
697
31888234 698 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
91da11f8
LB
699 if (ret < 0)
700 return;
701
702 *val = _val | ret;
703}
704
e413e7e1
AL
705static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
706 { "in_good_octets", 8, 0x00, },
707 { "in_bad_octets", 4, 0x02, },
708 { "in_unicast", 4, 0x04, },
709 { "in_broadcasts", 4, 0x06, },
710 { "in_multicasts", 4, 0x07, },
711 { "in_pause", 4, 0x16, },
712 { "in_undersize", 4, 0x18, },
713 { "in_fragments", 4, 0x19, },
714 { "in_oversize", 4, 0x1a, },
715 { "in_jabber", 4, 0x1b, },
716 { "in_rx_error", 4, 0x1c, },
717 { "in_fcs_error", 4, 0x1d, },
718 { "out_octets", 8, 0x0e, },
719 { "out_unicast", 4, 0x10, },
720 { "out_broadcasts", 4, 0x13, },
721 { "out_multicasts", 4, 0x12, },
722 { "out_pause", 4, 0x15, },
723 { "excessive", 4, 0x11, },
724 { "collisions", 4, 0x1e, },
725 { "deferred", 4, 0x05, },
726 { "single", 4, 0x14, },
727 { "multiple", 4, 0x17, },
728 { "out_fcs_error", 4, 0x03, },
729 { "late", 4, 0x1f, },
730 { "hist_64bytes", 4, 0x08, },
731 { "hist_65_127bytes", 4, 0x09, },
732 { "hist_128_255bytes", 4, 0x0a, },
733 { "hist_256_511bytes", 4, 0x0b, },
734 { "hist_512_1023bytes", 4, 0x0c, },
735 { "hist_1024_max_bytes", 4, 0x0d, },
736 /* Not all devices have the following counters */
737 { "sw_in_discards", 4, 0x110, },
738 { "sw_in_filtered", 2, 0x112, },
739 { "sw_out_filtered", 2, 0x113, },
740
741};
742
743static bool have_sw_in_discards(struct dsa_switch *ds)
744{
745 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
746
747 switch (ps->id) {
cca8b133
AL
748 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
749 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
750 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
751 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
752 case PORT_SWITCH_ID_6352:
e413e7e1
AL
753 return true;
754 default:
755 return false;
756 }
757}
758
759static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
760 int nr_stats,
761 struct mv88e6xxx_hw_stat *stats,
762 int port, uint8_t *data)
91da11f8
LB
763{
764 int i;
765
766 for (i = 0; i < nr_stats; i++) {
767 memcpy(data + i * ETH_GSTRING_LEN,
768 stats[i].string, ETH_GSTRING_LEN);
769 }
770}
771
80c4627b
AL
772static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
773 int stat,
774 struct mv88e6xxx_hw_stat *stats,
775 int port)
776{
777 struct mv88e6xxx_hw_stat *s = stats + stat;
778 u32 low;
779 u32 high = 0;
780 int ret;
781 u64 value;
782
783 if (s->reg >= 0x100) {
784 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
785 s->reg - 0x100);
786 if (ret < 0)
787 return UINT64_MAX;
788
789 low = ret;
790 if (s->sizeof_stat == 4) {
791 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
792 s->reg - 0x100 + 1);
793 if (ret < 0)
794 return UINT64_MAX;
795 high = ret;
796 }
797 } else {
798 _mv88e6xxx_stats_read(ds, s->reg, &low);
799 if (s->sizeof_stat == 8)
800 _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
801 }
802 value = (((u64)high) << 16) | low;
803 return value;
804}
805
e413e7e1
AL
806static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
807 int nr_stats,
808 struct mv88e6xxx_hw_stat *stats,
809 int port, uint64_t *data)
91da11f8 810{
a22adce5 811 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
91da11f8
LB
812 int ret;
813 int i;
814
31888234 815 mutex_lock(&ps->smi_mutex);
91da11f8 816
31888234 817 ret = _mv88e6xxx_stats_snapshot(ds, port);
91da11f8 818 if (ret < 0) {
31888234 819 mutex_unlock(&ps->smi_mutex);
91da11f8
LB
820 return;
821 }
822
3675c8d7 823 /* Read each of the counters. */
80c4627b
AL
824 for (i = 0; i < nr_stats; i++)
825 data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
91da11f8 826
31888234 827 mutex_unlock(&ps->smi_mutex);
91da11f8 828}
98e67308 829
e413e7e1
AL
830/* All the statistics in the table */
831void
832mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
833{
834 if (have_sw_in_discards(ds))
835 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
836 mv88e6xxx_hw_stats, port, data);
837 else
838 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
839 mv88e6xxx_hw_stats, port, data);
840}
841
842int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
843{
844 if (have_sw_in_discards(ds))
845 return ARRAY_SIZE(mv88e6xxx_hw_stats);
846 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
847}
848
849void
850mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
851 int port, uint64_t *data)
852{
853 if (have_sw_in_discards(ds))
854 _mv88e6xxx_get_ethtool_stats(
855 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
856 mv88e6xxx_hw_stats, port, data);
857 else
858 _mv88e6xxx_get_ethtool_stats(
859 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
860 mv88e6xxx_hw_stats, port, data);
861}
862
a1ab91f3
GR
863int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
864{
865 return 32 * sizeof(u16);
866}
867
868void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
869 struct ethtool_regs *regs, void *_p)
870{
871 u16 *p = _p;
872 int i;
873
874 regs->version = 0;
875
876 memset(p, 0xff, 32 * sizeof(u16));
877
878 for (i = 0; i < 32; i++) {
879 int ret;
880
881 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
882 if (ret >= 0)
883 p[i] = ret;
884 }
885}
886
3898c148
AL
887/* Must be called with SMI lock held */
888static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
889 u16 mask)
f3044683
AL
890{
891 unsigned long timeout = jiffies + HZ / 10;
892
893 while (time_before(jiffies, timeout)) {
894 int ret;
895
3898c148
AL
896 ret = _mv88e6xxx_reg_read(ds, reg, offset);
897 if (ret < 0)
898 return ret;
f3044683
AL
899 if (!(ret & mask))
900 return 0;
901
902 usleep_range(1000, 2000);
903 }
904 return -ETIMEDOUT;
905}
906
3898c148
AL
907static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
908{
909 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
910 int ret;
911
912 mutex_lock(&ps->smi_mutex);
913 ret = _mv88e6xxx_wait(ds, reg, offset, mask);
914 mutex_unlock(&ps->smi_mutex);
915
916 return ret;
917}
918
919static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
f3044683 920{
3898c148
AL
921 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
922 GLOBAL2_SMI_OP_BUSY);
f3044683
AL
923}
924
925int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
926{
cca8b133
AL
927 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
928 GLOBAL2_EEPROM_OP_LOAD);
f3044683
AL
929}
930
931int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
932{
cca8b133
AL
933 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
934 GLOBAL2_EEPROM_OP_BUSY);
f3044683
AL
935}
936
facd95b2
GR
937/* Must be called with SMI lock held */
938static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
939{
cca8b133
AL
940 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
941 GLOBAL_ATU_OP_BUSY);
facd95b2
GR
942}
943
56d95e22
AL
944/* Must be called with SMI lock held */
945static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
946{
947 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
948 GLOBAL2_SCRATCH_BUSY);
949}
950
3898c148 951/* Must be called with SMI mutex held */
fd3a0ee4
AL
952static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
953 int regnum)
f3044683
AL
954{
955 int ret;
956
3898c148
AL
957 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
958 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
959 regnum);
960 if (ret < 0)
961 return ret;
f3044683 962
3898c148 963 ret = _mv88e6xxx_phy_wait(ds);
f3044683
AL
964 if (ret < 0)
965 return ret;
966
3898c148 967 return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
f3044683
AL
968}
969
3898c148 970/* Must be called with SMI mutex held */
fd3a0ee4
AL
971static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
972 int regnum, u16 val)
f3044683 973{
3898c148
AL
974 int ret;
975
976 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
977 if (ret < 0)
978 return ret;
f3044683 979
3898c148
AL
980 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
981 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
982 regnum);
983
984 return _mv88e6xxx_phy_wait(ds);
f3044683
AL
985}
986
11b3b45d
GR
987int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
988{
2f40c698 989 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
11b3b45d
GR
990 int reg;
991
3898c148 992 mutex_lock(&ps->smi_mutex);
2f40c698
AL
993
994 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
11b3b45d 995 if (reg < 0)
2f40c698 996 goto out;
11b3b45d
GR
997
998 e->eee_enabled = !!(reg & 0x0200);
999 e->tx_lpi_enabled = !!(reg & 0x0100);
1000
3898c148 1001 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
11b3b45d 1002 if (reg < 0)
2f40c698 1003 goto out;
11b3b45d 1004
cca8b133 1005 e->eee_active = !!(reg & PORT_STATUS_EEE);
2f40c698 1006 reg = 0;
11b3b45d 1007
2f40c698 1008out:
3898c148 1009 mutex_unlock(&ps->smi_mutex);
2f40c698 1010 return reg;
11b3b45d
GR
1011}
1012
1013int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
1014 struct phy_device *phydev, struct ethtool_eee *e)
1015{
2f40c698
AL
1016 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1017 int reg;
11b3b45d
GR
1018 int ret;
1019
3898c148 1020 mutex_lock(&ps->smi_mutex);
11b3b45d 1021
2f40c698
AL
1022 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
1023 if (ret < 0)
1024 goto out;
1025
1026 reg = ret & ~0x0300;
1027 if (e->eee_enabled)
1028 reg |= 0x0200;
1029 if (e->tx_lpi_enabled)
1030 reg |= 0x0100;
1031
1032 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
1033out:
3898c148 1034 mutex_unlock(&ps->smi_mutex);
2f40c698
AL
1035
1036 return ret;
11b3b45d
GR
1037}
1038
70cc99d1 1039static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
facd95b2
GR
1040{
1041 int ret;
1042
cca8b133 1043 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
facd95b2
GR
1044 if (ret < 0)
1045 return ret;
1046
1047 return _mv88e6xxx_atu_wait(ds);
1048}
1049
37705b73
VD
1050static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
1051 struct mv88e6xxx_atu_entry *entry)
1052{
1053 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
1054
1055 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1056 unsigned int mask, shift;
1057
1058 if (entry->trunk) {
1059 data |= GLOBAL_ATU_DATA_TRUNK;
1060 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1061 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1062 } else {
1063 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1064 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1065 }
1066
1067 data |= (entry->portv_trunkid << shift) & mask;
1068 }
1069
1070 return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
1071}
1072
7fb5e755
VD
1073static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
1074 struct mv88e6xxx_atu_entry *entry,
1075 bool static_too)
facd95b2 1076{
7fb5e755
VD
1077 int op;
1078 int err;
facd95b2 1079
7fb5e755
VD
1080 err = _mv88e6xxx_atu_wait(ds);
1081 if (err)
1082 return err;
facd95b2 1083
7fb5e755
VD
1084 err = _mv88e6xxx_atu_data_write(ds, entry);
1085 if (err)
1086 return err;
1087
1088 if (entry->fid) {
1089 err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
1090 entry->fid);
1091 if (err)
1092 return err;
1093
1094 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
1095 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
1096 } else {
1097 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
1098 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
1099 }
1100
1101 return _mv88e6xxx_atu_cmd(ds, op);
1102}
1103
1104static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
1105{
1106 struct mv88e6xxx_atu_entry entry = {
1107 .fid = fid,
1108 .state = 0, /* EntryState bits must be 0 */
1109 };
70cc99d1 1110
7fb5e755
VD
1111 return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
1112}
1113
1114static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
1115{
1116 return _mv88e6xxx_atu_flush(ds, fid, false);
facd95b2
GR
1117}
1118
1119static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
1120{
1121 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
c3ffe6d2 1122 int reg, ret = 0;
facd95b2
GR
1123 u8 oldstate;
1124
1125 mutex_lock(&ps->smi_mutex);
1126
cca8b133 1127 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
538cc282
GR
1128 if (reg < 0) {
1129 ret = reg;
facd95b2 1130 goto abort;
538cc282 1131 }
facd95b2 1132
cca8b133 1133 oldstate = reg & PORT_CONTROL_STATE_MASK;
facd95b2
GR
1134 if (oldstate != state) {
1135 /* Flush forwarding database if we're moving a port
1136 * from Learning or Forwarding state to Disabled or
1137 * Blocking or Listening state.
1138 */
cca8b133
AL
1139 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
1140 state <= PORT_CONTROL_STATE_BLOCKING) {
facd95b2
GR
1141 ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
1142 if (ret)
1143 goto abort;
1144 }
cca8b133
AL
1145 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1146 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
1147 reg);
facd95b2
GR
1148 }
1149
1150abort:
1151 mutex_unlock(&ps->smi_mutex);
1152 return ret;
1153}
1154
1155/* Must be called with smi lock held */
1156static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
1157{
1158 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1159 u8 fid = ps->fid[port];
1160 u16 reg = fid << 12;
1161
1162 if (dsa_is_cpu_port(ds, port))
1163 reg |= ds->phys_port_mask;
1164 else
1165 reg |= (ps->bridge_mask[fid] |
1166 (1 << dsa_upstream_port(ds))) & ~(1 << port);
1167
cca8b133 1168 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
facd95b2
GR
1169}
1170
1171/* Must be called with smi lock held */
1172static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
1173{
1174 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1175 int port;
1176 u32 mask;
1177 int ret;
1178
1179 mask = ds->phys_port_mask;
1180 while (mask) {
1181 port = __ffs(mask);
1182 mask &= ~(1 << port);
1183 if (ps->fid[port] != fid)
1184 continue;
1185
1186 ret = _mv88e6xxx_update_port_config(ds, port);
1187 if (ret)
1188 return ret;
1189 }
1190
1191 return _mv88e6xxx_flush_fid(ds, fid);
1192}
1193
1194/* Bridge handling functions */
1195
1196int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1197{
1198 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1199 int ret = 0;
1200 u32 nmask;
1201 int fid;
1202
1203 /* If the bridge group is not empty, join that group.
1204 * Otherwise create a new group.
1205 */
1206 fid = ps->fid[port];
1207 nmask = br_port_mask & ~(1 << port);
1208 if (nmask)
1209 fid = ps->fid[__ffs(nmask)];
1210
1211 nmask = ps->bridge_mask[fid] | (1 << port);
1212 if (nmask != br_port_mask) {
1213 netdev_err(ds->ports[port],
1214 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1215 fid, br_port_mask, nmask);
1216 return -EINVAL;
1217 }
1218
1219 mutex_lock(&ps->smi_mutex);
1220
1221 ps->bridge_mask[fid] = br_port_mask;
1222
1223 if (fid != ps->fid[port]) {
194fea7b 1224 clear_bit(ps->fid[port], ps->fid_bitmap);
facd95b2
GR
1225 ps->fid[port] = fid;
1226 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1227 }
1228
1229 mutex_unlock(&ps->smi_mutex);
1230
1231 return ret;
1232}
1233
1234int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1235{
1236 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1237 u8 fid, newfid;
1238 int ret;
1239
1240 fid = ps->fid[port];
1241
1242 if (ps->bridge_mask[fid] != br_port_mask) {
1243 netdev_err(ds->ports[port],
1244 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1245 fid, br_port_mask, ps->bridge_mask[fid]);
1246 return -EINVAL;
1247 }
1248
1249 /* If the port was the last port of a bridge, we are done.
1250 * Otherwise assign a new fid to the port, and fix up
1251 * the bridge configuration.
1252 */
1253 if (br_port_mask == (1 << port))
1254 return 0;
1255
1256 mutex_lock(&ps->smi_mutex);
1257
194fea7b
VD
1258 newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
1259 if (unlikely(newfid > ps->num_ports)) {
1260 netdev_err(ds->ports[port], "all first %d FIDs are used\n",
1261 ps->num_ports);
1262 ret = -ENOSPC;
1263 goto unlock;
1264 }
1265
facd95b2 1266 ps->fid[port] = newfid;
194fea7b 1267 set_bit(newfid, ps->fid_bitmap);
facd95b2
GR
1268 ps->bridge_mask[fid] &= ~(1 << port);
1269 ps->bridge_mask[newfid] = 1 << port;
1270
1271 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1272 if (!ret)
1273 ret = _mv88e6xxx_update_bridge_config(ds, newfid);
1274
194fea7b 1275unlock:
facd95b2
GR
1276 mutex_unlock(&ps->smi_mutex);
1277
1278 return ret;
1279}
1280
1281int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1282{
1283 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1284 int stp_state;
1285
1286 switch (state) {
1287 case BR_STATE_DISABLED:
cca8b133 1288 stp_state = PORT_CONTROL_STATE_DISABLED;
facd95b2
GR
1289 break;
1290 case BR_STATE_BLOCKING:
1291 case BR_STATE_LISTENING:
cca8b133 1292 stp_state = PORT_CONTROL_STATE_BLOCKING;
facd95b2
GR
1293 break;
1294 case BR_STATE_LEARNING:
cca8b133 1295 stp_state = PORT_CONTROL_STATE_LEARNING;
facd95b2
GR
1296 break;
1297 case BR_STATE_FORWARDING:
1298 default:
cca8b133 1299 stp_state = PORT_CONTROL_STATE_FORWARDING;
facd95b2
GR
1300 break;
1301 }
1302
1303 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1304
1305 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1306 * so we can not update the port state directly but need to schedule it.
1307 */
1308 ps->port_state[port] = stp_state;
1309 set_bit(port, &ps->port_state_update_mask);
1310 schedule_work(&ps->bridge_work);
1311
1312 return 0;
1313}
1314
b8fee957
VD
1315int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
1316{
1317 int ret;
1318
1319 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
1320 if (ret < 0)
1321 return ret;
1322
1323 *pvid = ret & PORT_DEFAULT_VLAN_MASK;
1324
1325 return 0;
1326}
1327
0d3b33e6
VD
1328int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
1329{
1330 return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1331 pvid & PORT_DEFAULT_VLAN_MASK);
1332}
1333
6b17e864
VD
1334static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
1335{
1336 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
1337 GLOBAL_VTU_OP_BUSY);
1338}
1339
1340static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
1341{
1342 int ret;
1343
1344 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
1345 if (ret < 0)
1346 return ret;
1347
1348 return _mv88e6xxx_vtu_wait(ds);
1349}
1350
1351static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
1352{
1353 int ret;
1354
1355 ret = _mv88e6xxx_vtu_wait(ds);
1356 if (ret < 0)
1357 return ret;
1358
1359 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
1360}
1361
b8fee957
VD
1362static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
1363 struct mv88e6xxx_vtu_stu_entry *entry,
1364 unsigned int nibble_offset)
1365{
1366 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1367 u16 regs[3];
1368 int i;
1369 int ret;
1370
1371 for (i = 0; i < 3; ++i) {
1372 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1373 GLOBAL_VTU_DATA_0_3 + i);
1374 if (ret < 0)
1375 return ret;
1376
1377 regs[i] = ret;
1378 }
1379
1380 for (i = 0; i < ps->num_ports; ++i) {
1381 unsigned int shift = (i % 4) * 4 + nibble_offset;
1382 u16 reg = regs[i / 4];
1383
1384 entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1385 }
1386
1387 return 0;
1388}
1389
7dad08d7
VD
1390static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
1391 struct mv88e6xxx_vtu_stu_entry *entry,
1392 unsigned int nibble_offset)
1393{
1394 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1395 u16 regs[3] = { 0 };
1396 int i;
1397 int ret;
1398
1399 for (i = 0; i < ps->num_ports; ++i) {
1400 unsigned int shift = (i % 4) * 4 + nibble_offset;
1401 u8 data = entry->data[i];
1402
1403 regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1404 }
1405
1406 for (i = 0; i < 3; ++i) {
1407 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
1408 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1409 if (ret < 0)
1410 return ret;
1411 }
1412
1413 return 0;
1414}
1415
b8fee957
VD
1416static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
1417 struct mv88e6xxx_vtu_stu_entry *entry)
1418{
1419 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1420 int ret;
1421
1422 ret = _mv88e6xxx_vtu_wait(ds);
1423 if (ret < 0)
1424 return ret;
1425
1426 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
1427 vid & GLOBAL_VTU_VID_MASK);
1428 if (ret < 0)
1429 return ret;
1430
1431 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
1432 if (ret < 0)
1433 return ret;
1434
1435 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1436 if (ret < 0)
1437 return ret;
1438
1439 next.vid = ret & GLOBAL_VTU_VID_MASK;
1440 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1441
1442 if (next.valid) {
1443 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
1444 if (ret < 0)
1445 return ret;
1446
1447 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1448 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1449 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1450 GLOBAL_VTU_FID);
1451 if (ret < 0)
1452 return ret;
1453
1454 next.fid = ret & GLOBAL_VTU_FID_MASK;
1455
1456 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1457 GLOBAL_VTU_SID);
1458 if (ret < 0)
1459 return ret;
1460
1461 next.sid = ret & GLOBAL_VTU_SID_MASK;
1462 }
1463 }
1464
1465 *entry = next;
1466 return 0;
1467}
1468
7dad08d7
VD
1469static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
1470 struct mv88e6xxx_vtu_stu_entry *entry)
1471{
1472 u16 reg = 0;
1473 int ret;
1474
1475 ret = _mv88e6xxx_vtu_wait(ds);
1476 if (ret < 0)
1477 return ret;
1478
1479 if (!entry->valid)
1480 goto loadpurge;
1481
1482 /* Write port member tags */
1483 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
1484 if (ret < 0)
1485 return ret;
1486
1487 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1488 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1489 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1490 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1491 if (ret < 0)
1492 return ret;
1493
1494 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1495 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1496 if (ret < 0)
1497 return ret;
1498 }
1499
1500 reg = GLOBAL_VTU_VID_VALID;
1501loadpurge:
1502 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1503 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1504 if (ret < 0)
1505 return ret;
1506
1507 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
1508}
1509
0d3b33e6
VD
1510static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
1511 struct mv88e6xxx_vtu_stu_entry *entry)
1512{
1513 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1514 int ret;
1515
1516 ret = _mv88e6xxx_vtu_wait(ds);
1517 if (ret < 0)
1518 return ret;
1519
1520 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
1521 sid & GLOBAL_VTU_SID_MASK);
1522 if (ret < 0)
1523 return ret;
1524
1525 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
1526 if (ret < 0)
1527 return ret;
1528
1529 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
1530 if (ret < 0)
1531 return ret;
1532
1533 next.sid = ret & GLOBAL_VTU_SID_MASK;
1534
1535 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1536 if (ret < 0)
1537 return ret;
1538
1539 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1540
1541 if (next.valid) {
1542 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
1543 if (ret < 0)
1544 return ret;
1545 }
1546
1547 *entry = next;
1548 return 0;
1549}
1550
1551static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
1552 struct mv88e6xxx_vtu_stu_entry *entry)
1553{
1554 u16 reg = 0;
1555 int ret;
1556
1557 ret = _mv88e6xxx_vtu_wait(ds);
1558 if (ret < 0)
1559 return ret;
1560
1561 if (!entry->valid)
1562 goto loadpurge;
1563
1564 /* Write port states */
1565 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
1566 if (ret < 0)
1567 return ret;
1568
1569 reg = GLOBAL_VTU_VID_VALID;
1570loadpurge:
1571 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1572 if (ret < 0)
1573 return ret;
1574
1575 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1576 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1577 if (ret < 0)
1578 return ret;
1579
1580 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1581}
1582
1583static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
1584 struct mv88e6xxx_vtu_stu_entry *entry)
1585{
1586 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1587 struct mv88e6xxx_vtu_stu_entry vlan = {
1588 .valid = true,
1589 .vid = vid,
1590 };
1591 int i;
1592
1593 /* exclude all ports except the CPU */
1594 for (i = 0; i < ps->num_ports; ++i)
1595 vlan.data[i] = dsa_is_cpu_port(ds, i) ?
1596 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
1597 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1598
1599 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1600 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1601 struct mv88e6xxx_vtu_stu_entry vstp;
1602 int err;
1603
1604 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1605 * implemented, only one STU entry is needed to cover all VTU
1606 * entries. Thus, validate the SID 0.
1607 */
1608 vlan.sid = 0;
1609 err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
1610 if (err)
1611 return err;
1612
1613 if (vstp.sid != vlan.sid || !vstp.valid) {
1614 memset(&vstp, 0, sizeof(vstp));
1615 vstp.valid = true;
1616 vstp.sid = vlan.sid;
1617
1618 err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
1619 if (err)
1620 return err;
1621 }
1622
1623 /* Non-bridged ports and bridge groups use FIDs from 1 to
1624 * num_ports; VLANs use FIDs from num_ports+1 to 4095.
1625 */
1626 vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
1627 ps->num_ports + 1);
1628 if (unlikely(vlan.fid == VLAN_N_VID)) {
1629 pr_err("no more FID available for VLAN %d\n", vid);
1630 return -ENOSPC;
1631 }
1632
1633 err = _mv88e6xxx_flush_fid(ds, vlan.fid);
1634 if (err)
1635 return err;
1636
1637 set_bit(vlan.fid, ps->fid_bitmap);
1638 }
1639
1640 *entry = vlan;
1641 return 0;
1642}
1643
1644int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
1645 bool untagged)
1646{
1647 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1648 struct mv88e6xxx_vtu_stu_entry vlan;
1649 int err;
1650
1651 mutex_lock(&ps->smi_mutex);
1652 err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
1653 if (err)
1654 goto unlock;
1655
1656 if (vlan.vid != vid || !vlan.valid) {
1657 err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
1658 if (err)
1659 goto unlock;
1660 }
1661
1662 vlan.data[port] = untagged ?
1663 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
1664 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
1665
1666 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1667unlock:
1668 mutex_unlock(&ps->smi_mutex);
1669
1670 return err;
1671}
1672
7dad08d7
VD
1673int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1674{
1675 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1676 struct mv88e6xxx_vtu_stu_entry vlan;
1677 bool keep = false;
1678 int i, err;
1679
1680 mutex_lock(&ps->smi_mutex);
1681
1682 err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
1683 if (err)
1684 goto unlock;
1685
1686 if (vlan.vid != vid || !vlan.valid ||
1687 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1688 err = -ENOENT;
1689 goto unlock;
1690 }
1691
1692 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1693
1694 /* keep the VLAN unless all ports are excluded */
1695 for (i = 0; i < ps->num_ports; ++i) {
1696 if (dsa_is_cpu_port(ds, i))
1697 continue;
1698
1699 if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1700 keep = true;
1701 break;
1702 }
1703 }
1704
1705 vlan.valid = keep;
1706 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1707 if (err)
1708 goto unlock;
1709
1710 if (!keep)
1711 clear_bit(vlan.fid, ps->fid_bitmap);
1712
1713unlock:
1714 mutex_unlock(&ps->smi_mutex);
1715
1716 return err;
1717}
1718
02512b6f
VD
1719static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
1720 struct mv88e6xxx_vtu_stu_entry *entry)
1721{
1722 int err;
1723
1724 do {
1725 if (vid == 4095)
1726 return -ENOENT;
1727
1728 err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
1729 if (err)
1730 return err;
1731
1732 if (!entry->valid)
1733 return -ENOENT;
1734
1735 vid = entry->vid;
1736 } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
1737 entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
1738
1739 return 0;
1740}
1741
b8fee957
VD
1742int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
1743 unsigned long *ports, unsigned long *untagged)
1744{
1745 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1746 struct mv88e6xxx_vtu_stu_entry next;
1747 int port;
1748 int err;
1749
1750 if (*vid == 4095)
1751 return -ENOENT;
1752
1753 mutex_lock(&ps->smi_mutex);
1754 err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
1755 mutex_unlock(&ps->smi_mutex);
1756
1757 if (err)
1758 return err;
1759
1760 if (!next.valid)
1761 return -ENOENT;
1762
1763 *vid = next.vid;
1764
1765 for (port = 0; port < ps->num_ports; ++port) {
1766 clear_bit(port, ports);
1767 clear_bit(port, untagged);
1768
1769 if (dsa_is_cpu_port(ds, port))
1770 continue;
1771
1772 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
1773 next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1774 set_bit(port, ports);
1775
1776 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1777 set_bit(port, untagged);
1778 }
1779
1780 return 0;
1781}
1782
c5723ac5
VD
1783static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
1784 const unsigned char *addr)
defb05b9
GR
1785{
1786 int i, ret;
1787
1788 for (i = 0; i < 3; i++) {
cca8b133
AL
1789 ret = _mv88e6xxx_reg_write(
1790 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1791 (addr[i * 2] << 8) | addr[i * 2 + 1]);
defb05b9
GR
1792 if (ret < 0)
1793 return ret;
1794 }
1795
1796 return 0;
1797}
1798
c5723ac5 1799static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
defb05b9
GR
1800{
1801 int i, ret;
1802
1803 for (i = 0; i < 3; i++) {
cca8b133
AL
1804 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1805 GLOBAL_ATU_MAC_01 + i);
defb05b9
GR
1806 if (ret < 0)
1807 return ret;
1808 addr[i * 2] = ret >> 8;
1809 addr[i * 2 + 1] = ret & 0xff;
1810 }
1811
1812 return 0;
1813}
1814
fd231c82
VD
1815static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
1816 struct mv88e6xxx_atu_entry *entry)
defb05b9 1817{
6630e236
VD
1818 int ret;
1819
defb05b9
GR
1820 ret = _mv88e6xxx_atu_wait(ds);
1821 if (ret < 0)
1822 return ret;
1823
fd231c82 1824 ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
defb05b9
GR
1825 if (ret < 0)
1826 return ret;
1827
37705b73 1828 ret = _mv88e6xxx_atu_data_write(ds, entry);
fd231c82 1829 if (ret < 0)
87820510
VD
1830 return ret;
1831
70cc99d1
VD
1832 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
1833 if (ret < 0)
1834 return ret;
1835
1836 return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
fd231c82 1837}
87820510 1838
fd231c82
VD
1839static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
1840{
1841 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
02512b6f
VD
1842 struct mv88e6xxx_vtu_stu_entry vlan;
1843 int err;
fd231c82
VD
1844
1845 if (vid == 0)
1846 return ps->fid[port];
1847
02512b6f
VD
1848 err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
1849 if (err)
1850 return err;
1851
1852 if (vlan.vid == vid)
1853 return vlan.fid;
1854
fd231c82
VD
1855 return -ENOENT;
1856}
1857
1858static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
1859 const unsigned char *addr, u16 vid,
1860 u8 state)
1861{
1862 struct mv88e6xxx_atu_entry entry = { 0 };
1863 int ret;
1864
1865 ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
1866 if (ret < 0)
1867 return ret;
1868
1869 entry.fid = ret;
1870 entry.state = state;
1871 ether_addr_copy(entry.mac, addr);
1872 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1873 entry.trunk = false;
1874 entry.portv_trunkid = BIT(port);
1875 }
1876
1877 return _mv88e6xxx_atu_load(ds, &entry);
87820510
VD
1878}
1879
cdf09697
DM
1880int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1881 const unsigned char *addr, u16 vid)
87820510 1882{
cdf09697 1883 int state = is_multicast_ether_addr(addr) ?
87820510
VD
1884 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1885 GLOBAL_ATU_DATA_STATE_UC_STATIC;
cdf09697 1886 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
87820510
VD
1887 int ret;
1888
1889 mutex_lock(&ps->smi_mutex);
fd231c82 1890 ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
87820510
VD
1891 mutex_unlock(&ps->smi_mutex);
1892
1893 return ret;
1894}
1895
cdf09697
DM
1896int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1897 const unsigned char *addr, u16 vid)
87820510
VD
1898{
1899 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
87820510
VD
1900 int ret;
1901
1902 mutex_lock(&ps->smi_mutex);
fd231c82 1903 ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
cdf09697 1904 GLOBAL_ATU_DATA_STATE_UNUSED);
87820510
VD
1905 mutex_unlock(&ps->smi_mutex);
1906
1907 return ret;
1908}
1909
1d194046
VD
1910static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
1911 const unsigned char *addr,
1912 struct mv88e6xxx_atu_entry *entry)
6630e236 1913{
1d194046
VD
1914 struct mv88e6xxx_atu_entry next = { 0 };
1915 int ret;
1916
1917 next.fid = fid;
defb05b9 1918
cdf09697
DM
1919 ret = _mv88e6xxx_atu_wait(ds);
1920 if (ret < 0)
1921 return ret;
6630e236 1922
c5723ac5 1923 ret = _mv88e6xxx_atu_mac_write(ds, addr);
6630e236 1924 if (ret < 0)
cdf09697 1925 return ret;
6630e236 1926
70cc99d1
VD
1927 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
1928 if (ret < 0)
1929 return ret;
1930
1931 ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
1d194046
VD
1932 if (ret < 0)
1933 return ret;
6630e236 1934
1d194046
VD
1935 ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
1936 if (ret < 0)
1937 return ret;
6630e236 1938
1d194046 1939 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
cdf09697
DM
1940 if (ret < 0)
1941 return ret;
6630e236 1942
1d194046
VD
1943 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1944 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1945 unsigned int mask, shift;
1946
1947 if (ret & GLOBAL_ATU_DATA_TRUNK) {
1948 next.trunk = true;
1949 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1950 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1951 } else {
1952 next.trunk = false;
1953 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1954 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1955 }
1956
1957 next.portv_trunkid = (ret & mask) >> shift;
1958 }
cdf09697 1959
1d194046 1960 *entry = next;
cdf09697
DM
1961 return 0;
1962}
1963
1964/* get next entry for port */
1965int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
2a778e1b 1966 unsigned char *addr, u16 *vid, bool *is_static)
cdf09697
DM
1967{
1968 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1d194046
VD
1969 struct mv88e6xxx_atu_entry next;
1970 u16 fid;
cdf09697 1971 int ret;
6630e236 1972
cdf09697 1973 mutex_lock(&ps->smi_mutex);
1d194046
VD
1974
1975 ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
1976 if (ret < 0)
1977 goto unlock;
1978 fid = ret;
1979
1980 do {
1981 if (is_broadcast_ether_addr(addr)) {
02512b6f
VD
1982 struct mv88e6xxx_vtu_stu_entry vtu;
1983
1984 ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
1985 if (ret < 0)
1986 goto unlock;
1987
1988 *vid = vtu.vid;
1989 fid = vtu.fid;
1d194046
VD
1990 }
1991
1992 ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
1993 if (ret < 0)
1994 goto unlock;
1995
1996 ether_addr_copy(addr, next.mac);
1997
1998 if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
1999 continue;
2000 } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
2001
2002 *is_static = next.state == (is_multicast_ether_addr(addr) ?
2003 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2004 GLOBAL_ATU_DATA_STATE_UC_STATIC);
2005unlock:
defb05b9
GR
2006 mutex_unlock(&ps->smi_mutex);
2007
2008 return ret;
2009}
2010
facd95b2
GR
2011static void mv88e6xxx_bridge_work(struct work_struct *work)
2012{
2013 struct mv88e6xxx_priv_state *ps;
2014 struct dsa_switch *ds;
2015 int port;
2016
2017 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
2018 ds = ((struct dsa_switch *)ps) - 1;
2019
2020 while (ps->port_state_update_mask) {
2021 port = __ffs(ps->port_state_update_mask);
2022 clear_bit(port, &ps->port_state_update_mask);
2023 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
2024 }
2025}
2026
dbde9e66 2027static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
d827e88a
GR
2028{
2029 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
facd95b2 2030 int ret, fid;
54d792f2 2031 u16 reg;
d827e88a
GR
2032
2033 mutex_lock(&ps->smi_mutex);
2034
54d792f2
AL
2035 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2036 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2037 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
7c3d0d67 2038 mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
54d792f2
AL
2039 /* MAC Forcing register: don't force link, speed,
2040 * duplex or flow control state to any particular
2041 * values on physical ports, but force the CPU port
2042 * and all DSA ports to their maximum bandwidth and
2043 * full duplex.
2044 */
2045 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
60045cbf 2046 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
54d792f2
AL
2047 reg |= PORT_PCS_CTRL_FORCE_LINK |
2048 PORT_PCS_CTRL_LINK_UP |
2049 PORT_PCS_CTRL_DUPLEX_FULL |
2050 PORT_PCS_CTRL_FORCE_DUPLEX;
2051 if (mv88e6xxx_6065_family(ds))
2052 reg |= PORT_PCS_CTRL_100;
2053 else
2054 reg |= PORT_PCS_CTRL_1000;
2055 } else {
2056 reg |= PORT_PCS_CTRL_UNFORCED;
2057 }
2058
2059 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2060 PORT_PCS_CTRL, reg);
2061 if (ret)
2062 goto abort;
2063 }
2064
2065 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2066 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2067 * tunneling, determine priority by looking at 802.1p and IP
2068 * priority fields (IP prio has precedence), and set STP state
2069 * to Forwarding.
2070 *
2071 * If this is the CPU link, use DSA or EDSA tagging depending
2072 * on which tagging mode was configured.
2073 *
2074 * If this is a link to another switch, use DSA tagging mode.
2075 *
2076 * If this is the upstream port for this switch, enable
2077 * forwarding of unknown unicasts and multicasts.
2078 */
2079 reg = 0;
2080 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2081 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2082 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
7c3d0d67 2083 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
54d792f2
AL
2084 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
2085 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
2086 PORT_CONTROL_STATE_FORWARDING;
2087 if (dsa_is_cpu_port(ds, port)) {
2088 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2089 reg |= PORT_CONTROL_DSA_TAG;
2090 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
7c3d0d67
AK
2091 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2092 mv88e6xxx_6320_family(ds)) {
54d792f2
AL
2093 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2094 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2095 else
2096 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2097 }
2098
2099 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2100 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2101 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
7c3d0d67 2102 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
54d792f2
AL
2103 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2104 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
2105 }
2106 }
6083ce71
AL
2107 if (dsa_is_dsa_port(ds, port)) {
2108 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2109 reg |= PORT_CONTROL_DSA_TAG;
2110 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2111 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2112 mv88e6xxx_6320_family(ds)) {
54d792f2 2113 reg |= PORT_CONTROL_FRAME_MODE_DSA;
6083ce71
AL
2114 }
2115
54d792f2
AL
2116 if (port == dsa_upstream_port(ds))
2117 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2118 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2119 }
2120 if (reg) {
2121 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2122 PORT_CONTROL, reg);
2123 if (ret)
2124 goto abort;
2125 }
2126
8efdda4a
VD
2127 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2128 * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
2129 * untagged frames on this port, do a destination address lookup on all
2130 * received packets as usual, disable ARP mirroring and don't send a
2131 * copy of all transmitted/received frames on this port to the CPU.
54d792f2
AL
2132 */
2133 reg = 0;
2134 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2135 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
7c3d0d67 2136 mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
54d792f2
AL
2137 reg = PORT_CONTROL_2_MAP_DA;
2138
2139 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
7c3d0d67 2140 mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
54d792f2
AL
2141 reg |= PORT_CONTROL_2_JUMBO_10240;
2142
2143 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
2144 /* Set the upstream port this port should use */
2145 reg |= dsa_upstream_port(ds);
2146 /* enable forwarding of unknown multicast addresses to
2147 * the upstream port
2148 */
2149 if (port == dsa_upstream_port(ds))
2150 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
2151 }
2152
f5117ce4 2153 reg |= PORT_CONTROL_2_8021Q_FALLBACK;
8efdda4a 2154
54d792f2
AL
2155 if (reg) {
2156 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2157 PORT_CONTROL_2, reg);
2158 if (ret)
2159 goto abort;
2160 }
2161
2162 /* Port Association Vector: when learning source addresses
2163 * of packets, add the address to the address database using
2164 * a port bitmap that has only the bit for this port set and
2165 * the other bits clear.
2166 */
2167 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
2168 1 << port);
2169 if (ret)
2170 goto abort;
2171
2172 /* Egress rate control 2: disable egress rate control. */
2173 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
2174 0x0000);
2175 if (ret)
2176 goto abort;
2177
2178 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
7c3d0d67
AK
2179 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2180 mv88e6xxx_6320_family(ds)) {
54d792f2
AL
2181 /* Do not limit the period of time that this port can
2182 * be paused for by the remote end or the period of
2183 * time that this port can pause the remote end.
2184 */
2185 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2186 PORT_PAUSE_CTRL, 0x0000);
2187 if (ret)
2188 goto abort;
2189
2190 /* Port ATU control: disable limiting the number of
2191 * address database entries that this port is allowed
2192 * to use.
2193 */
2194 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2195 PORT_ATU_CONTROL, 0x0000);
2196 /* Priority Override: disable DA, SA and VTU priority
2197 * override.
2198 */
2199 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2200 PORT_PRI_OVERRIDE, 0x0000);
2201 if (ret)
2202 goto abort;
2203
2204 /* Port Ethertype: use the Ethertype DSA Ethertype
2205 * value.
2206 */
2207 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2208 PORT_ETH_TYPE, ETH_P_EDSA);
2209 if (ret)
2210 goto abort;
2211 /* Tag Remap: use an identity 802.1p prio -> switch
2212 * prio mapping.
2213 */
2214 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2215 PORT_TAG_REGMAP_0123, 0x3210);
2216 if (ret)
2217 goto abort;
2218
2219 /* Tag Remap 2: use an identity 802.1p prio -> switch
2220 * prio mapping.
2221 */
2222 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2223 PORT_TAG_REGMAP_4567, 0x7654);
2224 if (ret)
2225 goto abort;
2226 }
2227
2228 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2229 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
7c3d0d67
AK
2230 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2231 mv88e6xxx_6320_family(ds)) {
54d792f2
AL
2232 /* Rate Control: disable ingress rate limiting. */
2233 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2234 PORT_RATE_CONTROL, 0x0001);
2235 if (ret)
2236 goto abort;
2237 }
2238
366f0a0f
GR
2239 /* Port Control 1: disable trunking, disable sending
2240 * learning messages to this port.
d827e88a 2241 */
614f03fc 2242 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
d827e88a
GR
2243 if (ret)
2244 goto abort;
2245
2246 /* Port based VLAN map: give each port its own address
2247 * database, allow the CPU port to talk to each of the 'real'
2248 * ports, and allow each of the 'real' ports to only talk to
2249 * the upstream port.
2250 */
194fea7b 2251 fid = port + 1;
facd95b2 2252 ps->fid[port] = fid;
194fea7b 2253 set_bit(fid, ps->fid_bitmap);
facd95b2
GR
2254
2255 if (!dsa_is_cpu_port(ds, port))
2256 ps->bridge_mask[fid] = 1 << port;
d827e88a 2257
facd95b2 2258 ret = _mv88e6xxx_update_port_config(ds, port);
d827e88a
GR
2259 if (ret)
2260 goto abort;
2261
2262 /* Default VLAN ID and priority: don't set a default VLAN
2263 * ID, and set the default packet priority to zero.
2264 */
47cf1e65
VD
2265 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
2266 0x0000);
d827e88a
GR
2267abort:
2268 mutex_unlock(&ps->smi_mutex);
2269 return ret;
2270}
2271
dbde9e66
AL
2272int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2273{
2274 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2275 int ret;
2276 int i;
2277
2278 for (i = 0; i < ps->num_ports; i++) {
2279 ret = mv88e6xxx_setup_port(ds, i);
2280 if (ret < 0)
2281 return ret;
2282 }
2283 return 0;
2284}
2285
87c8cefb
AL
2286static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
2287{
2288 struct dsa_switch *ds = s->private;
2289
2290 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2291 int reg, port;
2292
2293 seq_puts(s, " GLOBAL GLOBAL2 ");
2294 for (port = 0 ; port < ps->num_ports; port++)
2295 seq_printf(s, " %2d ", port);
2296 seq_puts(s, "\n");
2297
2298 for (reg = 0; reg < 32; reg++) {
2299 seq_printf(s, "%2x: ", reg);
2300 seq_printf(s, " %4x %4x ",
2301 mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
2302 mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
2303
2304 for (port = 0 ; port < ps->num_ports; port++)
2305 seq_printf(s, "%4x ",
2306 mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
2307 seq_puts(s, "\n");
2308 }
2309
2310 return 0;
2311}
2312
2313static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
2314{
2315 return single_open(file, mv88e6xxx_regs_show, inode->i_private);
2316}
2317
2318static const struct file_operations mv88e6xxx_regs_fops = {
2319 .open = mv88e6xxx_regs_open,
2320 .read = seq_read,
2321 .llseek = no_llseek,
2322 .release = single_release,
2323 .owner = THIS_MODULE,
2324};
2325
8a0a265d
AL
2326static void mv88e6xxx_atu_show_header(struct seq_file *s)
2327{
2328 seq_puts(s, "DB T/P Vec State Addr\n");
2329}
2330
2331static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
2332 unsigned char *addr, int data)
2333{
2334 bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
2335 int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
2336 GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
2337 int state = data & GLOBAL_ATU_DATA_STATE_MASK;
2338
2339 seq_printf(s, "%03x %5s %10pb %x %pM\n",
2340 dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
2341}
2342
2343static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
2344 int dbnum)
2345{
2346 unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2347 unsigned char addr[6];
2348 int ret, data, state;
2349
c5723ac5 2350 ret = _mv88e6xxx_atu_mac_write(ds, bcast);
8a0a265d
AL
2351 if (ret < 0)
2352 return ret;
2353
2354 do {
70cc99d1
VD
2355 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
2356 dbnum);
8a0a265d
AL
2357 if (ret < 0)
2358 return ret;
70cc99d1
VD
2359
2360 ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
2361 if (ret < 0)
2362 return ret;
2363
8a0a265d
AL
2364 data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
2365 if (data < 0)
2366 return data;
2367
2368 state = data & GLOBAL_ATU_DATA_STATE_MASK;
2369 if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
2370 break;
c5723ac5 2371 ret = _mv88e6xxx_atu_mac_read(ds, addr);
8a0a265d
AL
2372 if (ret < 0)
2373 return ret;
2374 mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
2375 } while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
2376
2377 return 0;
2378}
2379
2380static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
2381{
2382 struct dsa_switch *ds = s->private;
2383 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2384 int dbnum;
2385
2386 mv88e6xxx_atu_show_header(s);
2387
2388 for (dbnum = 0; dbnum < 255; dbnum++) {
2389 mutex_lock(&ps->smi_mutex);
2390 mv88e6xxx_atu_show_db(s, ds, dbnum);
2391 mutex_unlock(&ps->smi_mutex);
2392 }
2393
2394 return 0;
2395}
2396
2397static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
2398{
2399 return single_open(file, mv88e6xxx_atu_show, inode->i_private);
2400}
2401
2402static const struct file_operations mv88e6xxx_atu_fops = {
2403 .open = mv88e6xxx_atu_open,
2404 .read = seq_read,
2405 .llseek = no_llseek,
2406 .release = single_release,
2407 .owner = THIS_MODULE,
2408};
2409
532c7a35
AL
2410static void mv88e6xxx_stats_show_header(struct seq_file *s,
2411 struct mv88e6xxx_priv_state *ps)
2412{
2413 int port;
2414
2415 seq_puts(s, " Statistic ");
2416 for (port = 0 ; port < ps->num_ports; port++)
2417 seq_printf(s, "Port %2d ", port);
2418 seq_puts(s, "\n");
2419}
2420
2421static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
2422{
2423 struct dsa_switch *ds = s->private;
2424 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2425 struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
2426 int port, stat, max_stats;
2427 uint64_t value;
2428
2429 if (have_sw_in_discards(ds))
2430 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
2431 else
2432 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
2433
2434 mv88e6xxx_stats_show_header(s, ps);
2435
2436 mutex_lock(&ps->smi_mutex);
2437
2438 for (stat = 0; stat < max_stats; stat++) {
2439 seq_printf(s, "%19s: ", stats[stat].string);
2440 for (port = 0 ; port < ps->num_ports; port++) {
2441 _mv88e6xxx_stats_snapshot(ds, port);
2442 value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
2443 port);
2444 seq_printf(s, "%8llu ", value);
2445 }
2446 seq_puts(s, "\n");
2447 }
2448 mutex_unlock(&ps->smi_mutex);
2449
2450 return 0;
2451}
2452
2453static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
2454{
2455 return single_open(file, mv88e6xxx_stats_show, inode->i_private);
2456}
2457
2458static const struct file_operations mv88e6xxx_stats_fops = {
2459 .open = mv88e6xxx_stats_open,
2460 .read = seq_read,
2461 .llseek = no_llseek,
2462 .release = single_release,
2463 .owner = THIS_MODULE,
2464};
2465
d35bd876
AL
2466static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
2467{
2468 struct dsa_switch *ds = s->private;
2469 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2470 int target, ret;
2471
2472 seq_puts(s, "Target Port\n");
2473
2474 mutex_lock(&ps->smi_mutex);
2475 for (target = 0; target < 32; target++) {
2476 ret = _mv88e6xxx_reg_write(
2477 ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2478 target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
2479 if (ret < 0)
2480 goto out;
2481 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
2482 GLOBAL2_DEVICE_MAPPING);
2483 seq_printf(s, " %2d %2d\n", target,
2484 ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
2485 }
2486out:
2487 mutex_unlock(&ps->smi_mutex);
2488
2489 return 0;
2490}
2491
2492static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
2493{
2494 return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
2495}
2496
2497static const struct file_operations mv88e6xxx_device_map_fops = {
2498 .open = mv88e6xxx_device_map_open,
2499 .read = seq_read,
2500 .llseek = no_llseek,
2501 .release = single_release,
2502 .owner = THIS_MODULE,
2503};
2504
56d95e22
AL
2505static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
2506{
2507 struct dsa_switch *ds = s->private;
2508 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2509 int reg, ret;
2510
2511 seq_puts(s, "Register Value\n");
2512
2513 mutex_lock(&ps->smi_mutex);
2514 for (reg = 0; reg < 0x80; reg++) {
2515 ret = _mv88e6xxx_reg_write(
2516 ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
2517 reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
2518 if (ret < 0)
2519 goto out;
2520
2521 ret = _mv88e6xxx_scratch_wait(ds);
2522 if (ret < 0)
2523 goto out;
2524
2525 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
2526 GLOBAL2_SCRATCH_MISC);
2527 seq_printf(s, " %2x %2x\n", reg,
2528 ret & GLOBAL2_SCRATCH_VALUE_MASK);
2529 }
2530out:
2531 mutex_unlock(&ps->smi_mutex);
2532
2533 return 0;
2534}
2535
2536static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
2537{
2538 return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
2539}
2540
2541static const struct file_operations mv88e6xxx_scratch_fops = {
2542 .open = mv88e6xxx_scratch_open,
2543 .read = seq_read,
2544 .llseek = no_llseek,
2545 .release = single_release,
2546 .owner = THIS_MODULE,
2547};
2548
acdaffcc
GR
2549int mv88e6xxx_setup_common(struct dsa_switch *ds)
2550{
2551 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
87c8cefb 2552 char *name;
acdaffcc
GR
2553
2554 mutex_init(&ps->smi_mutex);
acdaffcc 2555
cca8b133 2556 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
a8f064c6 2557
facd95b2
GR
2558 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
2559
87c8cefb
AL
2560 name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
2561 ps->dbgfs = debugfs_create_dir(name, NULL);
2562 kfree(name);
2563
2564 debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
2565 &mv88e6xxx_regs_fops);
2566
8a0a265d
AL
2567 debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
2568 &mv88e6xxx_atu_fops);
2569
532c7a35
AL
2570 debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
2571 &mv88e6xxx_stats_fops);
2572
d35bd876
AL
2573 debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
2574 &mv88e6xxx_device_map_fops);
56d95e22
AL
2575
2576 debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
2577 &mv88e6xxx_scratch_fops);
acdaffcc
GR
2578 return 0;
2579}
2580
54d792f2
AL
2581int mv88e6xxx_setup_global(struct dsa_switch *ds)
2582{
2583 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
24751e29 2584 int ret;
54d792f2
AL
2585 int i;
2586
2587 /* Set the default address aging time to 5 minutes, and
2588 * enable address learn messages to be sent to all message
2589 * ports.
2590 */
2591 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
2592 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2593
2594 /* Configure the IP ToS mapping registers. */
2595 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2596 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2597 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2598 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2599 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2600 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2601 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2602 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2603
2604 /* Configure the IEEE 802.1p priority mapping register. */
2605 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2606
2607 /* Send all frames with destination addresses matching
2608 * 01:80:c2:00:00:0x to the CPU port.
2609 */
2610 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2611
2612 /* Ignore removed tag data on doubly tagged packets, disable
2613 * flow control messages, force flow control priority to the
2614 * highest, and send all special multicast frames to the CPU
2615 * port at the highest priority.
2616 */
2617 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2618 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
2619 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
2620
2621 /* Program the DSA routing table. */
2622 for (i = 0; i < 32; i++) {
2623 int nexthop = 0x1f;
2624
2625 if (ds->pd->rtable &&
2626 i != ds->index && i < ds->dst->pd->nr_chips)
2627 nexthop = ds->pd->rtable[i] & 0x1f;
2628
2629 REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2630 GLOBAL2_DEVICE_MAPPING_UPDATE |
2631 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
2632 nexthop);
2633 }
2634
2635 /* Clear all trunk masks. */
2636 for (i = 0; i < 8; i++)
2637 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2638 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2639 ((1 << ps->num_ports) - 1));
2640
2641 /* Clear all trunk mappings. */
2642 for (i = 0; i < 16; i++)
2643 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
2644 GLOBAL2_TRUNK_MAPPING_UPDATE |
2645 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2646
2647 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
7c3d0d67
AK
2648 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2649 mv88e6xxx_6320_family(ds)) {
54d792f2
AL
2650 /* Send all frames with destination addresses matching
2651 * 01:80:c2:00:00:2x to the CPU port.
2652 */
2653 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
2654
2655 /* Initialise cross-chip port VLAN table to reset
2656 * defaults.
2657 */
2658 REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
2659
2660 /* Clear the priority override table. */
2661 for (i = 0; i < 16; i++)
2662 REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
2663 0x8000 | (i << 8));
2664 }
2665
2666 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2667 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
7c3d0d67
AK
2668 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2669 mv88e6xxx_6320_family(ds)) {
54d792f2
AL
2670 /* Disable ingress rate limiting by resetting all
2671 * ingress rate limit registers to their initial
2672 * state.
2673 */
2674 for (i = 0; i < ps->num_ports; i++)
2675 REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
2676 0x9000 | (i << 8));
2677 }
2678
db687a56
AL
2679 /* Clear the statistics counters for all ports */
2680 REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
2681
2682 /* Wait for the flush to complete. */
24751e29
VD
2683 mutex_lock(&ps->smi_mutex);
2684 ret = _mv88e6xxx_stats_wait(ds);
6b17e864
VD
2685 if (ret < 0)
2686 goto unlock;
2687
c161d0a5
VD
2688 /* Clear all ATU entries */
2689 ret = _mv88e6xxx_atu_flush(ds, 0, true);
2690 if (ret < 0)
2691 goto unlock;
2692
6b17e864
VD
2693 /* Clear all the VTU and STU entries */
2694 ret = _mv88e6xxx_vtu_stu_flush(ds);
2695unlock:
24751e29 2696 mutex_unlock(&ps->smi_mutex);
db687a56 2697
24751e29 2698 return ret;
54d792f2
AL
2699}
2700
143a8307
AL
2701int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
2702{
2703 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2704 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2705 unsigned long timeout;
2706 int ret;
2707 int i;
2708
2709 /* Set all ports to the disabled state. */
2710 for (i = 0; i < ps->num_ports; i++) {
cca8b133
AL
2711 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
2712 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
143a8307
AL
2713 }
2714
2715 /* Wait for transmit queues to drain. */
2716 usleep_range(2000, 4000);
2717
2718 /* Reset the switch. Keep the PPU active if requested. The PPU
2719 * needs to be active to support indirect phy register access
2720 * through global registers 0x18 and 0x19.
2721 */
2722 if (ppu_active)
2723 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
2724 else
2725 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
2726
2727 /* Wait up to one second for reset to complete. */
2728 timeout = jiffies + 1 * HZ;
2729 while (time_before(jiffies, timeout)) {
2730 ret = REG_READ(REG_GLOBAL, 0x00);
2731 if ((ret & is_reset) == is_reset)
2732 break;
2733 usleep_range(1000, 2000);
2734 }
2735 if (time_after(jiffies, timeout))
2736 return -ETIMEDOUT;
2737
2738 return 0;
2739}
2740
49143585
AL
2741int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2742{
2743 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2744 int ret;
2745
3898c148 2746 mutex_lock(&ps->smi_mutex);
fd3a0ee4 2747 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
49143585
AL
2748 if (ret < 0)
2749 goto error;
fd3a0ee4 2750 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
49143585 2751error:
fd3a0ee4 2752 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
3898c148 2753 mutex_unlock(&ps->smi_mutex);
49143585
AL
2754 return ret;
2755}
2756
2757int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2758 int reg, int val)
2759{
2760 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2761 int ret;
2762
3898c148 2763 mutex_lock(&ps->smi_mutex);
fd3a0ee4 2764 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
49143585
AL
2765 if (ret < 0)
2766 goto error;
2767
fd3a0ee4 2768 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
49143585 2769error:
fd3a0ee4 2770 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
3898c148 2771 mutex_unlock(&ps->smi_mutex);
fd3a0ee4
AL
2772 return ret;
2773}
2774
2775static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
2776{
2777 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2778
2779 if (port >= 0 && port < ps->num_ports)
2780 return port;
2781 return -EINVAL;
2782}
2783
2784int
2785mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
2786{
2787 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2788 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2789 int ret;
2790
2791 if (addr < 0)
2792 return addr;
2793
3898c148 2794 mutex_lock(&ps->smi_mutex);
fd3a0ee4 2795 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
3898c148 2796 mutex_unlock(&ps->smi_mutex);
fd3a0ee4
AL
2797 return ret;
2798}
2799
2800int
2801mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
2802{
2803 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2804 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2805 int ret;
2806
2807 if (addr < 0)
2808 return addr;
2809
3898c148 2810 mutex_lock(&ps->smi_mutex);
fd3a0ee4 2811 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
3898c148 2812 mutex_unlock(&ps->smi_mutex);
fd3a0ee4
AL
2813 return ret;
2814}
2815
2816int
2817mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
2818{
2819 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2820 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2821 int ret;
2822
2823 if (addr < 0)
2824 return addr;
2825
3898c148 2826 mutex_lock(&ps->smi_mutex);
fd3a0ee4 2827 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
3898c148 2828 mutex_unlock(&ps->smi_mutex);
fd3a0ee4
AL
2829 return ret;
2830}
2831
2832int
2833mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
2834 u16 val)
2835{
2836 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2837 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2838 int ret;
2839
2840 if (addr < 0)
2841 return addr;
2842
3898c148 2843 mutex_lock(&ps->smi_mutex);
fd3a0ee4 2844 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
3898c148 2845 mutex_unlock(&ps->smi_mutex);
49143585
AL
2846 return ret;
2847}
2848
c22995c5
GR
2849#ifdef CONFIG_NET_DSA_HWMON
2850
2851static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
2852{
2853 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2854 int ret;
2855 int val;
2856
2857 *temp = 0;
2858
2859 mutex_lock(&ps->smi_mutex);
2860
2861 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
2862 if (ret < 0)
2863 goto error;
2864
2865 /* Enable temperature sensor */
2866 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2867 if (ret < 0)
2868 goto error;
2869
2870 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
2871 if (ret < 0)
2872 goto error;
2873
2874 /* Wait for temperature to stabilize */
2875 usleep_range(10000, 12000);
2876
2877 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2878 if (val < 0) {
2879 ret = val;
2880 goto error;
2881 }
2882
2883 /* Disable temperature sensor */
2884 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
2885 if (ret < 0)
2886 goto error;
2887
2888 *temp = ((val & 0x1f) - 5) * 5;
2889
2890error:
2891 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
2892 mutex_unlock(&ps->smi_mutex);
2893 return ret;
2894}
2895
2896static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
2897{
2898 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2899 int ret;
2900
2901 *temp = 0;
2902
2903 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
2904 if (ret < 0)
2905 return ret;
2906
2907 *temp = (ret & 0xff) - 25;
2908
2909 return 0;
2910}
2911
2912int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
2913{
2914 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
2915 return mv88e63xx_get_temp(ds, temp);
2916
2917 return mv88e61xx_get_temp(ds, temp);
2918}
2919
2920int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
2921{
2922 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2923 int ret;
2924
2925 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2926 return -EOPNOTSUPP;
2927
2928 *temp = 0;
2929
2930 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2931 if (ret < 0)
2932 return ret;
2933
2934 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
2935
2936 return 0;
2937}
2938
2939int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
2940{
2941 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2942 int ret;
2943
2944 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2945 return -EOPNOTSUPP;
2946
2947 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2948 if (ret < 0)
2949 return ret;
2950 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
2951 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
2952 (ret & 0xe0ff) | (temp << 8));
2953}
2954
2955int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
2956{
2957 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2958 int ret;
2959
2960 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2961 return -EOPNOTSUPP;
2962
2963 *alarm = false;
2964
2965 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2966 if (ret < 0)
2967 return ret;
2968
2969 *alarm = !!(ret & 0x40);
2970
2971 return 0;
2972}
2973#endif /* CONFIG_NET_DSA_HWMON */
2974
98e67308
BH
2975static int __init mv88e6xxx_init(void)
2976{
2977#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2978 register_switch_driver(&mv88e6131_switch_driver);
2979#endif
2980#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2981 register_switch_driver(&mv88e6123_61_65_switch_driver);
42f27253 2982#endif
3ad50cca
GR
2983#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2984 register_switch_driver(&mv88e6352_switch_driver);
2985#endif
42f27253
AL
2986#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2987 register_switch_driver(&mv88e6171_switch_driver);
98e67308
BH
2988#endif
2989 return 0;
2990}
2991module_init(mv88e6xxx_init);
2992
2993static void __exit mv88e6xxx_cleanup(void)
2994{
42f27253
AL
2995#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2996 unregister_switch_driver(&mv88e6171_switch_driver);
2997#endif
4212b543
VD
2998#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2999 unregister_switch_driver(&mv88e6352_switch_driver);
3000#endif
98e67308
BH
3001#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
3002 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
3003#endif
3004#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
3005 unregister_switch_driver(&mv88e6131_switch_driver);
3006#endif
3007}
3008module_exit(mv88e6xxx_cleanup);
3d825ede
BH
3009
3010MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
3011MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
3012MODULE_LICENSE("GPL");