2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/delay.h>
15 #include <linux/etherdevice.h>
16 #include <linux/ethtool.h>
17 #include <linux/if_bridge.h>
18 #include <linux/jiffies.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/phy.h>
24 #include <net/switchdev.h>
25 #include "mv88e6xxx.h"
27 static void assert_smi_lock(struct dsa_switch
*ds
)
29 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
31 if (unlikely(!mutex_is_locked(&ps
->smi_mutex
))) {
32 dev_err(ds
->master_dev
, "SMI lock not held!\n");
37 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
38 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
39 * will be directly accessible on some {device address,register address}
40 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
41 * will only respond to SMI transactions to that specific address, and
42 * an indirect addressing mechanism needs to be used to access its
45 static int mv88e6xxx_reg_wait_ready(struct mii_bus
*bus
, int sw_addr
)
50 for (i
= 0; i
< 16; i
++) {
51 ret
= mdiobus_read_nested(bus
, sw_addr
, SMI_CMD
);
55 if ((ret
& SMI_CMD_BUSY
) == 0)
62 static int __mv88e6xxx_reg_read(struct mii_bus
*bus
, int sw_addr
, int addr
,
68 return mdiobus_read_nested(bus
, addr
, reg
);
70 /* Wait for the bus to become free. */
71 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
75 /* Transmit the read command. */
76 ret
= mdiobus_write_nested(bus
, sw_addr
, SMI_CMD
,
77 SMI_CMD_OP_22_READ
| (addr
<< 5) | reg
);
81 /* Wait for the read command to complete. */
82 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
87 ret
= mdiobus_read_nested(bus
, sw_addr
, SMI_DATA
);
94 static int _mv88e6xxx_reg_read(struct dsa_switch
*ds
, int addr
, int reg
)
96 struct mii_bus
*bus
= dsa_host_dev_to_mii_bus(ds
->master_dev
);
104 ret
= __mv88e6xxx_reg_read(bus
, ds
->pd
->sw_addr
, addr
, reg
);
108 dev_dbg(ds
->master_dev
, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
114 int mv88e6xxx_reg_read(struct dsa_switch
*ds
, int addr
, int reg
)
116 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
119 mutex_lock(&ps
->smi_mutex
);
120 ret
= _mv88e6xxx_reg_read(ds
, addr
, reg
);
121 mutex_unlock(&ps
->smi_mutex
);
126 static int __mv88e6xxx_reg_write(struct mii_bus
*bus
, int sw_addr
, int addr
,
132 return mdiobus_write_nested(bus
, addr
, reg
, val
);
134 /* Wait for the bus to become free. */
135 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
139 /* Transmit the data to write. */
140 ret
= mdiobus_write_nested(bus
, sw_addr
, SMI_DATA
, val
);
144 /* Transmit the write command. */
145 ret
= mdiobus_write_nested(bus
, sw_addr
, SMI_CMD
,
146 SMI_CMD_OP_22_WRITE
| (addr
<< 5) | reg
);
150 /* Wait for the write command to complete. */
151 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
158 static int _mv88e6xxx_reg_write(struct dsa_switch
*ds
, int addr
, int reg
,
161 struct mii_bus
*bus
= dsa_host_dev_to_mii_bus(ds
->master_dev
);
168 dev_dbg(ds
->master_dev
, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
171 return __mv88e6xxx_reg_write(bus
, ds
->pd
->sw_addr
, addr
, reg
, val
);
174 int mv88e6xxx_reg_write(struct dsa_switch
*ds
, int addr
, int reg
, u16 val
)
176 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
179 mutex_lock(&ps
->smi_mutex
);
180 ret
= _mv88e6xxx_reg_write(ds
, addr
, reg
, val
);
181 mutex_unlock(&ps
->smi_mutex
);
186 int mv88e6xxx_set_addr_direct(struct dsa_switch
*ds
, u8
*addr
)
188 REG_WRITE(REG_GLOBAL
, GLOBAL_MAC_01
, (addr
[0] << 8) | addr
[1]);
189 REG_WRITE(REG_GLOBAL
, GLOBAL_MAC_23
, (addr
[2] << 8) | addr
[3]);
190 REG_WRITE(REG_GLOBAL
, GLOBAL_MAC_45
, (addr
[4] << 8) | addr
[5]);
195 int mv88e6xxx_set_addr_indirect(struct dsa_switch
*ds
, u8
*addr
)
200 for (i
= 0; i
< 6; i
++) {
203 /* Write the MAC address byte. */
204 REG_WRITE(REG_GLOBAL2
, GLOBAL2_SWITCH_MAC
,
205 GLOBAL2_SWITCH_MAC_BUSY
| (i
<< 8) | addr
[i
]);
207 /* Wait for the write to complete. */
208 for (j
= 0; j
< 16; j
++) {
209 ret
= REG_READ(REG_GLOBAL2
, GLOBAL2_SWITCH_MAC
);
210 if ((ret
& GLOBAL2_SWITCH_MAC_BUSY
) == 0)
220 static int _mv88e6xxx_phy_read(struct dsa_switch
*ds
, int addr
, int regnum
)
223 return _mv88e6xxx_reg_read(ds
, addr
, regnum
);
227 static int _mv88e6xxx_phy_write(struct dsa_switch
*ds
, int addr
, int regnum
,
231 return _mv88e6xxx_reg_write(ds
, addr
, regnum
, val
);
235 #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
236 static int mv88e6xxx_ppu_disable(struct dsa_switch
*ds
)
239 unsigned long timeout
;
241 ret
= REG_READ(REG_GLOBAL
, GLOBAL_CONTROL
);
242 REG_WRITE(REG_GLOBAL
, GLOBAL_CONTROL
,
243 ret
& ~GLOBAL_CONTROL_PPU_ENABLE
);
245 timeout
= jiffies
+ 1 * HZ
;
246 while (time_before(jiffies
, timeout
)) {
247 ret
= REG_READ(REG_GLOBAL
, GLOBAL_STATUS
);
248 usleep_range(1000, 2000);
249 if ((ret
& GLOBAL_STATUS_PPU_MASK
) !=
250 GLOBAL_STATUS_PPU_POLLING
)
257 static int mv88e6xxx_ppu_enable(struct dsa_switch
*ds
)
260 unsigned long timeout
;
262 ret
= REG_READ(REG_GLOBAL
, GLOBAL_CONTROL
);
263 REG_WRITE(REG_GLOBAL
, GLOBAL_CONTROL
, ret
| GLOBAL_CONTROL_PPU_ENABLE
);
265 timeout
= jiffies
+ 1 * HZ
;
266 while (time_before(jiffies
, timeout
)) {
267 ret
= REG_READ(REG_GLOBAL
, GLOBAL_STATUS
);
268 usleep_range(1000, 2000);
269 if ((ret
& GLOBAL_STATUS_PPU_MASK
) ==
270 GLOBAL_STATUS_PPU_POLLING
)
277 static void mv88e6xxx_ppu_reenable_work(struct work_struct
*ugly
)
279 struct mv88e6xxx_priv_state
*ps
;
281 ps
= container_of(ugly
, struct mv88e6xxx_priv_state
, ppu_work
);
282 if (mutex_trylock(&ps
->ppu_mutex
)) {
283 struct dsa_switch
*ds
= ((struct dsa_switch
*)ps
) - 1;
285 if (mv88e6xxx_ppu_enable(ds
) == 0)
286 ps
->ppu_disabled
= 0;
287 mutex_unlock(&ps
->ppu_mutex
);
291 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps
)
293 struct mv88e6xxx_priv_state
*ps
= (void *)_ps
;
295 schedule_work(&ps
->ppu_work
);
298 static int mv88e6xxx_ppu_access_get(struct dsa_switch
*ds
)
300 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
303 mutex_lock(&ps
->ppu_mutex
);
305 /* If the PHY polling unit is enabled, disable it so that
306 * we can access the PHY registers. If it was already
307 * disabled, cancel the timer that is going to re-enable
310 if (!ps
->ppu_disabled
) {
311 ret
= mv88e6xxx_ppu_disable(ds
);
313 mutex_unlock(&ps
->ppu_mutex
);
316 ps
->ppu_disabled
= 1;
318 del_timer(&ps
->ppu_timer
);
325 static void mv88e6xxx_ppu_access_put(struct dsa_switch
*ds
)
327 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
329 /* Schedule a timer to re-enable the PHY polling unit. */
330 mod_timer(&ps
->ppu_timer
, jiffies
+ msecs_to_jiffies(10));
331 mutex_unlock(&ps
->ppu_mutex
);
334 void mv88e6xxx_ppu_state_init(struct dsa_switch
*ds
)
336 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
338 mutex_init(&ps
->ppu_mutex
);
339 INIT_WORK(&ps
->ppu_work
, mv88e6xxx_ppu_reenable_work
);
340 init_timer(&ps
->ppu_timer
);
341 ps
->ppu_timer
.data
= (unsigned long)ps
;
342 ps
->ppu_timer
.function
= mv88e6xxx_ppu_reenable_timer
;
345 int mv88e6xxx_phy_read_ppu(struct dsa_switch
*ds
, int addr
, int regnum
)
349 ret
= mv88e6xxx_ppu_access_get(ds
);
351 ret
= mv88e6xxx_reg_read(ds
, addr
, regnum
);
352 mv88e6xxx_ppu_access_put(ds
);
358 int mv88e6xxx_phy_write_ppu(struct dsa_switch
*ds
, int addr
,
363 ret
= mv88e6xxx_ppu_access_get(ds
);
365 ret
= mv88e6xxx_reg_write(ds
, addr
, regnum
, val
);
366 mv88e6xxx_ppu_access_put(ds
);
373 static bool mv88e6xxx_6065_family(struct dsa_switch
*ds
)
375 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
378 case PORT_SWITCH_ID_6031
:
379 case PORT_SWITCH_ID_6061
:
380 case PORT_SWITCH_ID_6035
:
381 case PORT_SWITCH_ID_6065
:
387 static bool mv88e6xxx_6095_family(struct dsa_switch
*ds
)
389 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
392 case PORT_SWITCH_ID_6092
:
393 case PORT_SWITCH_ID_6095
:
399 static bool mv88e6xxx_6097_family(struct dsa_switch
*ds
)
401 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
404 case PORT_SWITCH_ID_6046
:
405 case PORT_SWITCH_ID_6085
:
406 case PORT_SWITCH_ID_6096
:
407 case PORT_SWITCH_ID_6097
:
413 static bool mv88e6xxx_6165_family(struct dsa_switch
*ds
)
415 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
418 case PORT_SWITCH_ID_6123
:
419 case PORT_SWITCH_ID_6161
:
420 case PORT_SWITCH_ID_6165
:
426 static bool mv88e6xxx_6185_family(struct dsa_switch
*ds
)
428 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
431 case PORT_SWITCH_ID_6121
:
432 case PORT_SWITCH_ID_6122
:
433 case PORT_SWITCH_ID_6152
:
434 case PORT_SWITCH_ID_6155
:
435 case PORT_SWITCH_ID_6182
:
436 case PORT_SWITCH_ID_6185
:
437 case PORT_SWITCH_ID_6108
:
438 case PORT_SWITCH_ID_6131
:
444 static bool mv88e6xxx_6320_family(struct dsa_switch
*ds
)
446 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
449 case PORT_SWITCH_ID_6320
:
450 case PORT_SWITCH_ID_6321
:
456 static bool mv88e6xxx_6351_family(struct dsa_switch
*ds
)
458 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
461 case PORT_SWITCH_ID_6171
:
462 case PORT_SWITCH_ID_6175
:
463 case PORT_SWITCH_ID_6350
:
464 case PORT_SWITCH_ID_6351
:
470 static bool mv88e6xxx_6352_family(struct dsa_switch
*ds
)
472 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
475 case PORT_SWITCH_ID_6172
:
476 case PORT_SWITCH_ID_6176
:
477 case PORT_SWITCH_ID_6240
:
478 case PORT_SWITCH_ID_6352
:
484 /* We expect the switch to perform auto negotiation if there is a real
485 * phy. However, in the case of a fixed link phy, we force the port
486 * settings from the fixed link settings.
488 void mv88e6xxx_adjust_link(struct dsa_switch
*ds
, int port
,
489 struct phy_device
*phydev
)
491 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
495 if (!phy_is_pseudo_fixed_link(phydev
))
498 mutex_lock(&ps
->smi_mutex
);
500 ret
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_PCS_CTRL
);
504 reg
= ret
& ~(PORT_PCS_CTRL_LINK_UP
|
505 PORT_PCS_CTRL_FORCE_LINK
|
506 PORT_PCS_CTRL_DUPLEX_FULL
|
507 PORT_PCS_CTRL_FORCE_DUPLEX
|
508 PORT_PCS_CTRL_UNFORCED
);
510 reg
|= PORT_PCS_CTRL_FORCE_LINK
;
512 reg
|= PORT_PCS_CTRL_LINK_UP
;
514 if (mv88e6xxx_6065_family(ds
) && phydev
->speed
> SPEED_100
)
517 switch (phydev
->speed
) {
519 reg
|= PORT_PCS_CTRL_1000
;
522 reg
|= PORT_PCS_CTRL_100
;
525 reg
|= PORT_PCS_CTRL_10
;
528 pr_info("Unknown speed");
532 reg
|= PORT_PCS_CTRL_FORCE_DUPLEX
;
533 if (phydev
->duplex
== DUPLEX_FULL
)
534 reg
|= PORT_PCS_CTRL_DUPLEX_FULL
;
536 if ((mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
)) &&
537 (port
>= ps
->num_ports
- 2)) {
538 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_RXID
)
539 reg
|= PORT_PCS_CTRL_RGMII_DELAY_RXCLK
;
540 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_TXID
)
541 reg
|= PORT_PCS_CTRL_RGMII_DELAY_TXCLK
;
542 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_ID
)
543 reg
|= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK
|
544 PORT_PCS_CTRL_RGMII_DELAY_TXCLK
);
546 _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_PCS_CTRL
, reg
);
549 mutex_unlock(&ps
->smi_mutex
);
552 static int _mv88e6xxx_stats_wait(struct dsa_switch
*ds
)
557 for (i
= 0; i
< 10; i
++) {
558 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_STATS_OP
);
559 if ((ret
& GLOBAL_STATS_OP_BUSY
) == 0)
566 static int _mv88e6xxx_stats_snapshot(struct dsa_switch
*ds
, int port
)
570 if (mv88e6xxx_6320_family(ds
) || mv88e6xxx_6352_family(ds
))
571 port
= (port
+ 1) << 5;
573 /* Snapshot the hardware statistics counters for this port. */
574 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_STATS_OP
,
575 GLOBAL_STATS_OP_CAPTURE_PORT
|
576 GLOBAL_STATS_OP_HIST_RX_TX
| port
);
580 /* Wait for the snapshotting to complete. */
581 ret
= _mv88e6xxx_stats_wait(ds
);
588 static void _mv88e6xxx_stats_read(struct dsa_switch
*ds
, int stat
, u32
*val
)
595 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_STATS_OP
,
596 GLOBAL_STATS_OP_READ_CAPTURED
|
597 GLOBAL_STATS_OP_HIST_RX_TX
| stat
);
601 ret
= _mv88e6xxx_stats_wait(ds
);
605 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_STATS_COUNTER_32
);
611 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_STATS_COUNTER_01
);
618 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats
[] = {
619 { "in_good_octets", 8, 0x00, },
620 { "in_bad_octets", 4, 0x02, },
621 { "in_unicast", 4, 0x04, },
622 { "in_broadcasts", 4, 0x06, },
623 { "in_multicasts", 4, 0x07, },
624 { "in_pause", 4, 0x16, },
625 { "in_undersize", 4, 0x18, },
626 { "in_fragments", 4, 0x19, },
627 { "in_oversize", 4, 0x1a, },
628 { "in_jabber", 4, 0x1b, },
629 { "in_rx_error", 4, 0x1c, },
630 { "in_fcs_error", 4, 0x1d, },
631 { "out_octets", 8, 0x0e, },
632 { "out_unicast", 4, 0x10, },
633 { "out_broadcasts", 4, 0x13, },
634 { "out_multicasts", 4, 0x12, },
635 { "out_pause", 4, 0x15, },
636 { "excessive", 4, 0x11, },
637 { "collisions", 4, 0x1e, },
638 { "deferred", 4, 0x05, },
639 { "single", 4, 0x14, },
640 { "multiple", 4, 0x17, },
641 { "out_fcs_error", 4, 0x03, },
642 { "late", 4, 0x1f, },
643 { "hist_64bytes", 4, 0x08, },
644 { "hist_65_127bytes", 4, 0x09, },
645 { "hist_128_255bytes", 4, 0x0a, },
646 { "hist_256_511bytes", 4, 0x0b, },
647 { "hist_512_1023bytes", 4, 0x0c, },
648 { "hist_1024_max_bytes", 4, 0x0d, },
649 /* Not all devices have the following counters */
650 { "sw_in_discards", 4, 0x110, },
651 { "sw_in_filtered", 2, 0x112, },
652 { "sw_out_filtered", 2, 0x113, },
656 static bool have_sw_in_discards(struct dsa_switch
*ds
)
658 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
661 case PORT_SWITCH_ID_6095
: case PORT_SWITCH_ID_6161
:
662 case PORT_SWITCH_ID_6165
: case PORT_SWITCH_ID_6171
:
663 case PORT_SWITCH_ID_6172
: case PORT_SWITCH_ID_6176
:
664 case PORT_SWITCH_ID_6182
: case PORT_SWITCH_ID_6185
:
665 case PORT_SWITCH_ID_6352
:
672 static void _mv88e6xxx_get_strings(struct dsa_switch
*ds
,
674 struct mv88e6xxx_hw_stat
*stats
,
675 int port
, uint8_t *data
)
679 for (i
= 0; i
< nr_stats
; i
++) {
680 memcpy(data
+ i
* ETH_GSTRING_LEN
,
681 stats
[i
].string
, ETH_GSTRING_LEN
);
685 static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch
*ds
,
687 struct mv88e6xxx_hw_stat
*stats
,
690 struct mv88e6xxx_hw_stat
*s
= stats
+ stat
;
696 if (s
->reg
>= 0x100) {
697 ret
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
),
703 if (s
->sizeof_stat
== 4) {
704 ret
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
),
711 _mv88e6xxx_stats_read(ds
, s
->reg
, &low
);
712 if (s
->sizeof_stat
== 8)
713 _mv88e6xxx_stats_read(ds
, s
->reg
+ 1, &high
);
715 value
= (((u64
)high
) << 16) | low
;
719 static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch
*ds
,
721 struct mv88e6xxx_hw_stat
*stats
,
722 int port
, uint64_t *data
)
724 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
728 mutex_lock(&ps
->smi_mutex
);
730 ret
= _mv88e6xxx_stats_snapshot(ds
, port
);
732 mutex_unlock(&ps
->smi_mutex
);
736 /* Read each of the counters. */
737 for (i
= 0; i
< nr_stats
; i
++)
738 data
[i
] = _mv88e6xxx_get_ethtool_stat(ds
, i
, stats
, port
);
740 mutex_unlock(&ps
->smi_mutex
);
743 /* All the statistics in the table */
745 mv88e6xxx_get_strings(struct dsa_switch
*ds
, int port
, uint8_t *data
)
747 if (have_sw_in_discards(ds
))
748 _mv88e6xxx_get_strings(ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
),
749 mv88e6xxx_hw_stats
, port
, data
);
751 _mv88e6xxx_get_strings(ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
) - 3,
752 mv88e6xxx_hw_stats
, port
, data
);
755 int mv88e6xxx_get_sset_count(struct dsa_switch
*ds
)
757 if (have_sw_in_discards(ds
))
758 return ARRAY_SIZE(mv88e6xxx_hw_stats
);
759 return ARRAY_SIZE(mv88e6xxx_hw_stats
) - 3;
763 mv88e6xxx_get_ethtool_stats(struct dsa_switch
*ds
,
764 int port
, uint64_t *data
)
766 if (have_sw_in_discards(ds
))
767 _mv88e6xxx_get_ethtool_stats(
768 ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
),
769 mv88e6xxx_hw_stats
, port
, data
);
771 _mv88e6xxx_get_ethtool_stats(
772 ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
) - 3,
773 mv88e6xxx_hw_stats
, port
, data
);
776 int mv88e6xxx_get_regs_len(struct dsa_switch
*ds
, int port
)
778 return 32 * sizeof(u16
);
781 void mv88e6xxx_get_regs(struct dsa_switch
*ds
, int port
,
782 struct ethtool_regs
*regs
, void *_p
)
789 memset(p
, 0xff, 32 * sizeof(u16
));
791 for (i
= 0; i
< 32; i
++) {
794 ret
= mv88e6xxx_reg_read(ds
, REG_PORT(port
), i
);
800 static int _mv88e6xxx_wait(struct dsa_switch
*ds
, int reg
, int offset
,
803 unsigned long timeout
= jiffies
+ HZ
/ 10;
805 while (time_before(jiffies
, timeout
)) {
808 ret
= _mv88e6xxx_reg_read(ds
, reg
, offset
);
814 usleep_range(1000, 2000);
819 static int mv88e6xxx_wait(struct dsa_switch
*ds
, int reg
, int offset
, u16 mask
)
821 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
824 mutex_lock(&ps
->smi_mutex
);
825 ret
= _mv88e6xxx_wait(ds
, reg
, offset
, mask
);
826 mutex_unlock(&ps
->smi_mutex
);
831 static int _mv88e6xxx_phy_wait(struct dsa_switch
*ds
)
833 return _mv88e6xxx_wait(ds
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
834 GLOBAL2_SMI_OP_BUSY
);
837 int mv88e6xxx_eeprom_load_wait(struct dsa_switch
*ds
)
839 return mv88e6xxx_wait(ds
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
840 GLOBAL2_EEPROM_OP_LOAD
);
843 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch
*ds
)
845 return mv88e6xxx_wait(ds
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
846 GLOBAL2_EEPROM_OP_BUSY
);
849 static int _mv88e6xxx_atu_wait(struct dsa_switch
*ds
)
851 return _mv88e6xxx_wait(ds
, REG_GLOBAL
, GLOBAL_ATU_OP
,
855 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch
*ds
, int addr
,
860 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
861 GLOBAL2_SMI_OP_22_READ
| (addr
<< 5) |
866 ret
= _mv88e6xxx_phy_wait(ds
);
870 return _mv88e6xxx_reg_read(ds
, REG_GLOBAL2
, GLOBAL2_SMI_DATA
);
873 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch
*ds
, int addr
,
878 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL2
, GLOBAL2_SMI_DATA
, val
);
882 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
883 GLOBAL2_SMI_OP_22_WRITE
| (addr
<< 5) |
886 return _mv88e6xxx_phy_wait(ds
);
889 int mv88e6xxx_get_eee(struct dsa_switch
*ds
, int port
, struct ethtool_eee
*e
)
891 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
894 mutex_lock(&ps
->smi_mutex
);
896 reg
= _mv88e6xxx_phy_read_indirect(ds
, port
, 16);
900 e
->eee_enabled
= !!(reg
& 0x0200);
901 e
->tx_lpi_enabled
= !!(reg
& 0x0100);
903 reg
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_STATUS
);
907 e
->eee_active
= !!(reg
& PORT_STATUS_EEE
);
911 mutex_unlock(&ps
->smi_mutex
);
915 int mv88e6xxx_set_eee(struct dsa_switch
*ds
, int port
,
916 struct phy_device
*phydev
, struct ethtool_eee
*e
)
918 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
922 mutex_lock(&ps
->smi_mutex
);
924 ret
= _mv88e6xxx_phy_read_indirect(ds
, port
, 16);
931 if (e
->tx_lpi_enabled
)
934 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, 16, reg
);
936 mutex_unlock(&ps
->smi_mutex
);
941 static int _mv88e6xxx_atu_cmd(struct dsa_switch
*ds
, u16 cmd
)
945 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_ATU_OP
, cmd
);
949 return _mv88e6xxx_atu_wait(ds
);
952 static int _mv88e6xxx_atu_data_write(struct dsa_switch
*ds
,
953 struct mv88e6xxx_atu_entry
*entry
)
955 u16 data
= entry
->state
& GLOBAL_ATU_DATA_STATE_MASK
;
957 if (entry
->state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
958 unsigned int mask
, shift
;
961 data
|= GLOBAL_ATU_DATA_TRUNK
;
962 mask
= GLOBAL_ATU_DATA_TRUNK_ID_MASK
;
963 shift
= GLOBAL_ATU_DATA_TRUNK_ID_SHIFT
;
965 mask
= GLOBAL_ATU_DATA_PORT_VECTOR_MASK
;
966 shift
= GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT
;
969 data
|= (entry
->portv_trunkid
<< shift
) & mask
;
972 return _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_ATU_DATA
, data
);
975 static int _mv88e6xxx_atu_flush_move(struct dsa_switch
*ds
,
976 struct mv88e6xxx_atu_entry
*entry
,
982 err
= _mv88e6xxx_atu_wait(ds
);
986 err
= _mv88e6xxx_atu_data_write(ds
, entry
);
991 err
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_ATU_FID
,
996 op
= static_too
? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB
:
997 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB
;
999 op
= static_too
? GLOBAL_ATU_OP_FLUSH_MOVE_ALL
:
1000 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC
;
1003 return _mv88e6xxx_atu_cmd(ds
, op
);
1006 static int _mv88e6xxx_atu_flush(struct dsa_switch
*ds
, u16 fid
, bool static_too
)
1008 struct mv88e6xxx_atu_entry entry
= {
1010 .state
= 0, /* EntryState bits must be 0 */
1013 return _mv88e6xxx_atu_flush_move(ds
, &entry
, static_too
);
1016 static int _mv88e6xxx_atu_move(struct dsa_switch
*ds
, u16 fid
, int from_port
,
1017 int to_port
, bool static_too
)
1019 struct mv88e6xxx_atu_entry entry
= {
1024 /* EntryState bits must be 0xF */
1025 entry
.state
= GLOBAL_ATU_DATA_STATE_MASK
;
1027 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1028 entry
.portv_trunkid
= (to_port
& 0x0f) << 4;
1029 entry
.portv_trunkid
|= from_port
& 0x0f;
1031 return _mv88e6xxx_atu_flush_move(ds
, &entry
, static_too
);
1034 static int _mv88e6xxx_atu_remove(struct dsa_switch
*ds
, u16 fid
, int port
,
1037 /* Destination port 0xF means remove the entries */
1038 return _mv88e6xxx_atu_move(ds
, fid
, port
, 0x0f, static_too
);
1041 static int mv88e6xxx_set_port_state(struct dsa_switch
*ds
, int port
, u8 state
)
1043 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1047 mutex_lock(&ps
->smi_mutex
);
1049 reg
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_CONTROL
);
1055 oldstate
= reg
& PORT_CONTROL_STATE_MASK
;
1056 if (oldstate
!= state
) {
1057 /* Flush forwarding database if we're moving a port
1058 * from Learning or Forwarding state to Disabled or
1059 * Blocking or Listening state.
1061 if (oldstate
>= PORT_CONTROL_STATE_LEARNING
&&
1062 state
<= PORT_CONTROL_STATE_BLOCKING
) {
1063 ret
= _mv88e6xxx_atu_remove(ds
, 0, port
, false);
1067 reg
= (reg
& ~PORT_CONTROL_STATE_MASK
) | state
;
1068 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_CONTROL
,
1073 mutex_unlock(&ps
->smi_mutex
);
1077 static int _mv88e6xxx_port_vlan_map_set(struct dsa_switch
*ds
, int port
,
1080 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1081 const u16 mask
= (1 << ps
->num_ports
) - 1;
1084 reg
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_BASE_VLAN
);
1089 reg
|= output_ports
& mask
;
1091 return _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_BASE_VLAN
, reg
);
1094 int mv88e6xxx_port_stp_update(struct dsa_switch
*ds
, int port
, u8 state
)
1096 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1100 case BR_STATE_DISABLED
:
1101 stp_state
= PORT_CONTROL_STATE_DISABLED
;
1103 case BR_STATE_BLOCKING
:
1104 case BR_STATE_LISTENING
:
1105 stp_state
= PORT_CONTROL_STATE_BLOCKING
;
1107 case BR_STATE_LEARNING
:
1108 stp_state
= PORT_CONTROL_STATE_LEARNING
;
1110 case BR_STATE_FORWARDING
:
1112 stp_state
= PORT_CONTROL_STATE_FORWARDING
;
1116 netdev_dbg(ds
->ports
[port
], "port state %d [%d]\n", state
, stp_state
);
1118 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1119 * so we can not update the port state directly but need to schedule it.
1121 ps
->port_state
[port
] = stp_state
;
1122 set_bit(port
, &ps
->port_state_update_mask
);
1123 schedule_work(&ps
->bridge_work
);
1128 static int _mv88e6xxx_port_pvid_get(struct dsa_switch
*ds
, int port
, u16
*pvid
)
1132 ret
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_DEFAULT_VLAN
);
1136 *pvid
= ret
& PORT_DEFAULT_VLAN_MASK
;
1141 int mv88e6xxx_port_pvid_get(struct dsa_switch
*ds
, int port
, u16
*pvid
)
1145 ret
= mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_DEFAULT_VLAN
);
1149 *pvid
= ret
& PORT_DEFAULT_VLAN_MASK
;
1154 static int _mv88e6xxx_port_pvid_set(struct dsa_switch
*ds
, int port
, u16 pvid
)
1156 return _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_DEFAULT_VLAN
,
1157 pvid
& PORT_DEFAULT_VLAN_MASK
);
1160 static int _mv88e6xxx_vtu_wait(struct dsa_switch
*ds
)
1162 return _mv88e6xxx_wait(ds
, REG_GLOBAL
, GLOBAL_VTU_OP
,
1163 GLOBAL_VTU_OP_BUSY
);
1166 static int _mv88e6xxx_vtu_cmd(struct dsa_switch
*ds
, u16 op
)
1170 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_OP
, op
);
1174 return _mv88e6xxx_vtu_wait(ds
);
1177 static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch
*ds
)
1181 ret
= _mv88e6xxx_vtu_wait(ds
);
1185 return _mv88e6xxx_vtu_cmd(ds
, GLOBAL_VTU_OP_FLUSH_ALL
);
1188 static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch
*ds
,
1189 struct mv88e6xxx_vtu_stu_entry
*entry
,
1190 unsigned int nibble_offset
)
1192 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1197 for (i
= 0; i
< 3; ++i
) {
1198 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
,
1199 GLOBAL_VTU_DATA_0_3
+ i
);
1206 for (i
= 0; i
< ps
->num_ports
; ++i
) {
1207 unsigned int shift
= (i
% 4) * 4 + nibble_offset
;
1208 u16 reg
= regs
[i
/ 4];
1210 entry
->data
[i
] = (reg
>> shift
) & GLOBAL_VTU_STU_DATA_MASK
;
1216 static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch
*ds
,
1217 struct mv88e6xxx_vtu_stu_entry
*entry
,
1218 unsigned int nibble_offset
)
1220 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1221 u16 regs
[3] = { 0 };
1225 for (i
= 0; i
< ps
->num_ports
; ++i
) {
1226 unsigned int shift
= (i
% 4) * 4 + nibble_offset
;
1227 u8 data
= entry
->data
[i
];
1229 regs
[i
/ 4] |= (data
& GLOBAL_VTU_STU_DATA_MASK
) << shift
;
1232 for (i
= 0; i
< 3; ++i
) {
1233 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
,
1234 GLOBAL_VTU_DATA_0_3
+ i
, regs
[i
]);
1242 static int _mv88e6xxx_vtu_vid_write(struct dsa_switch
*ds
, u16 vid
)
1244 return _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_VID
,
1245 vid
& GLOBAL_VTU_VID_MASK
);
1248 static int _mv88e6xxx_vtu_getnext(struct dsa_switch
*ds
,
1249 struct mv88e6xxx_vtu_stu_entry
*entry
)
1251 struct mv88e6xxx_vtu_stu_entry next
= { 0 };
1254 ret
= _mv88e6xxx_vtu_wait(ds
);
1258 ret
= _mv88e6xxx_vtu_cmd(ds
, GLOBAL_VTU_OP_VTU_GET_NEXT
);
1262 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_VTU_VID
);
1266 next
.vid
= ret
& GLOBAL_VTU_VID_MASK
;
1267 next
.valid
= !!(ret
& GLOBAL_VTU_VID_VALID
);
1270 ret
= _mv88e6xxx_vtu_stu_data_read(ds
, &next
, 0);
1274 if (mv88e6xxx_6097_family(ds
) || mv88e6xxx_6165_family(ds
) ||
1275 mv88e6xxx_6351_family(ds
) || mv88e6xxx_6352_family(ds
)) {
1276 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
,
1281 next
.fid
= ret
& GLOBAL_VTU_FID_MASK
;
1283 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
,
1288 next
.sid
= ret
& GLOBAL_VTU_SID_MASK
;
1296 static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch
*ds
,
1297 struct mv88e6xxx_vtu_stu_entry
*entry
)
1302 ret
= _mv88e6xxx_vtu_wait(ds
);
1309 /* Write port member tags */
1310 ret
= _mv88e6xxx_vtu_stu_data_write(ds
, entry
, 0);
1314 if (mv88e6xxx_6097_family(ds
) || mv88e6xxx_6165_family(ds
) ||
1315 mv88e6xxx_6351_family(ds
) || mv88e6xxx_6352_family(ds
)) {
1316 reg
= entry
->sid
& GLOBAL_VTU_SID_MASK
;
1317 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_SID
, reg
);
1321 reg
= entry
->fid
& GLOBAL_VTU_FID_MASK
;
1322 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_FID
, reg
);
1327 reg
= GLOBAL_VTU_VID_VALID
;
1329 reg
|= entry
->vid
& GLOBAL_VTU_VID_MASK
;
1330 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_VID
, reg
);
1334 return _mv88e6xxx_vtu_cmd(ds
, GLOBAL_VTU_OP_VTU_LOAD_PURGE
);
1337 static int _mv88e6xxx_stu_getnext(struct dsa_switch
*ds
, u8 sid
,
1338 struct mv88e6xxx_vtu_stu_entry
*entry
)
1340 struct mv88e6xxx_vtu_stu_entry next
= { 0 };
1343 ret
= _mv88e6xxx_vtu_wait(ds
);
1347 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_SID
,
1348 sid
& GLOBAL_VTU_SID_MASK
);
1352 ret
= _mv88e6xxx_vtu_cmd(ds
, GLOBAL_VTU_OP_STU_GET_NEXT
);
1356 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_VTU_SID
);
1360 next
.sid
= ret
& GLOBAL_VTU_SID_MASK
;
1362 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_VTU_VID
);
1366 next
.valid
= !!(ret
& GLOBAL_VTU_VID_VALID
);
1369 ret
= _mv88e6xxx_vtu_stu_data_read(ds
, &next
, 2);
1378 static int _mv88e6xxx_stu_loadpurge(struct dsa_switch
*ds
,
1379 struct mv88e6xxx_vtu_stu_entry
*entry
)
1384 ret
= _mv88e6xxx_vtu_wait(ds
);
1391 /* Write port states */
1392 ret
= _mv88e6xxx_vtu_stu_data_write(ds
, entry
, 2);
1396 reg
= GLOBAL_VTU_VID_VALID
;
1398 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_VID
, reg
);
1402 reg
= entry
->sid
& GLOBAL_VTU_SID_MASK
;
1403 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_SID
, reg
);
1407 return _mv88e6xxx_vtu_cmd(ds
, GLOBAL_VTU_OP_STU_LOAD_PURGE
);
1410 static int _mv88e6xxx_vlan_init(struct dsa_switch
*ds
, u16 vid
,
1411 struct mv88e6xxx_vtu_stu_entry
*entry
)
1413 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1414 struct mv88e6xxx_vtu_stu_entry vlan
= {
1417 .fid
= vid
, /* We use one FID per VLAN */
1421 /* exclude all ports except the CPU and DSA ports */
1422 for (i
= 0; i
< ps
->num_ports
; ++i
)
1423 vlan
.data
[i
] = dsa_is_cpu_port(ds
, i
) || dsa_is_dsa_port(ds
, i
)
1424 ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
1425 : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
;
1427 if (mv88e6xxx_6097_family(ds
) || mv88e6xxx_6165_family(ds
) ||
1428 mv88e6xxx_6351_family(ds
) || mv88e6xxx_6352_family(ds
)) {
1429 struct mv88e6xxx_vtu_stu_entry vstp
;
1432 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1433 * implemented, only one STU entry is needed to cover all VTU
1434 * entries. Thus, validate the SID 0.
1437 err
= _mv88e6xxx_stu_getnext(ds
, GLOBAL_VTU_SID_MASK
, &vstp
);
1441 if (vstp
.sid
!= vlan
.sid
|| !vstp
.valid
) {
1442 memset(&vstp
, 0, sizeof(vstp
));
1444 vstp
.sid
= vlan
.sid
;
1446 err
= _mv88e6xxx_stu_loadpurge(ds
, &vstp
);
1451 /* Clear all MAC addresses from the new database */
1452 err
= _mv88e6xxx_atu_flush(ds
, vlan
.fid
, true);
1461 int mv88e6xxx_port_vlan_prepare(struct dsa_switch
*ds
, int port
,
1462 const struct switchdev_obj_port_vlan
*vlan
,
1463 struct switchdev_trans
*trans
)
1465 /* We reserve a few VLANs to isolate unbridged ports */
1466 if (vlan
->vid_end
>= 4000)
1469 /* We don't need any dynamic resource from the kernel (yet),
1470 * so skip the prepare phase.
1475 static int _mv88e6xxx_port_vlan_add(struct dsa_switch
*ds
, int port
, u16 vid
,
1478 struct mv88e6xxx_vtu_stu_entry vlan
;
1481 err
= _mv88e6xxx_vtu_vid_write(ds
, vid
- 1);
1485 err
= _mv88e6xxx_vtu_getnext(ds
, &vlan
);
1489 if (vlan
.vid
!= vid
|| !vlan
.valid
) {
1490 err
= _mv88e6xxx_vlan_init(ds
, vid
, &vlan
);
1495 vlan
.data
[port
] = untagged
?
1496 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED
:
1497 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED
;
1499 return _mv88e6xxx_vtu_loadpurge(ds
, &vlan
);
1502 int mv88e6xxx_port_vlan_add(struct dsa_switch
*ds
, int port
,
1503 const struct switchdev_obj_port_vlan
*vlan
,
1504 struct switchdev_trans
*trans
)
1506 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1507 bool untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
1508 bool pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
1512 mutex_lock(&ps
->smi_mutex
);
1514 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
) {
1515 err
= _mv88e6xxx_port_vlan_add(ds
, port
, vid
, untagged
);
1520 /* no PVID with ranges, otherwise it's a bug */
1522 err
= _mv88e6xxx_port_pvid_set(ds
, port
, vid
);
1524 mutex_unlock(&ps
->smi_mutex
);
1529 static int _mv88e6xxx_port_vlan_del(struct dsa_switch
*ds
, int port
, u16 vid
)
1531 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1532 struct mv88e6xxx_vtu_stu_entry vlan
;
1535 err
= _mv88e6xxx_vtu_vid_write(ds
, vid
- 1);
1539 err
= _mv88e6xxx_vtu_getnext(ds
, &vlan
);
1543 if (vlan
.vid
!= vid
|| !vlan
.valid
||
1544 vlan
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
)
1547 vlan
.data
[port
] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
;
1549 /* keep the VLAN unless all ports are excluded */
1551 for (i
= 0; i
< ps
->num_ports
; ++i
) {
1552 if (dsa_is_cpu_port(ds
, i
) || dsa_is_dsa_port(ds
, i
))
1555 if (vlan
.data
[i
] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
) {
1561 err
= _mv88e6xxx_vtu_loadpurge(ds
, &vlan
);
1565 return _mv88e6xxx_atu_remove(ds
, vlan
.fid
, port
, false);
1568 int mv88e6xxx_port_vlan_del(struct dsa_switch
*ds
, int port
,
1569 const struct switchdev_obj_port_vlan
*vlan
)
1571 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1575 mutex_lock(&ps
->smi_mutex
);
1577 err
= _mv88e6xxx_port_pvid_get(ds
, port
, &pvid
);
1581 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
) {
1582 err
= _mv88e6xxx_port_vlan_del(ds
, port
, vid
);
1587 err
= _mv88e6xxx_port_pvid_set(ds
, port
, 0);
1594 mutex_unlock(&ps
->smi_mutex
);
1599 int mv88e6xxx_vlan_getnext(struct dsa_switch
*ds
, u16
*vid
,
1600 unsigned long *ports
, unsigned long *untagged
)
1602 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1603 struct mv88e6xxx_vtu_stu_entry next
;
1610 mutex_lock(&ps
->smi_mutex
);
1611 err
= _mv88e6xxx_vtu_vid_write(ds
, *vid
);
1615 err
= _mv88e6xxx_vtu_getnext(ds
, &next
);
1617 mutex_unlock(&ps
->smi_mutex
);
1627 for (port
= 0; port
< ps
->num_ports
; ++port
) {
1628 clear_bit(port
, ports
);
1629 clear_bit(port
, untagged
);
1631 if (dsa_is_cpu_port(ds
, port
) || dsa_is_dsa_port(ds
, port
))
1634 if (next
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED
||
1635 next
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED
)
1636 set_bit(port
, ports
);
1638 if (next
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED
)
1639 set_bit(port
, untagged
);
1645 static int _mv88e6xxx_atu_mac_write(struct dsa_switch
*ds
,
1646 const unsigned char *addr
)
1650 for (i
= 0; i
< 3; i
++) {
1651 ret
= _mv88e6xxx_reg_write(
1652 ds
, REG_GLOBAL
, GLOBAL_ATU_MAC_01
+ i
,
1653 (addr
[i
* 2] << 8) | addr
[i
* 2 + 1]);
1661 static int _mv88e6xxx_atu_mac_read(struct dsa_switch
*ds
, unsigned char *addr
)
1665 for (i
= 0; i
< 3; i
++) {
1666 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
,
1667 GLOBAL_ATU_MAC_01
+ i
);
1670 addr
[i
* 2] = ret
>> 8;
1671 addr
[i
* 2 + 1] = ret
& 0xff;
1677 static int _mv88e6xxx_atu_load(struct dsa_switch
*ds
,
1678 struct mv88e6xxx_atu_entry
*entry
)
1682 ret
= _mv88e6xxx_atu_wait(ds
);
1686 ret
= _mv88e6xxx_atu_mac_write(ds
, entry
->mac
);
1690 ret
= _mv88e6xxx_atu_data_write(ds
, entry
);
1694 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_ATU_FID
, entry
->fid
);
1698 return _mv88e6xxx_atu_cmd(ds
, GLOBAL_ATU_OP_LOAD_DB
);
1701 static int _mv88e6xxx_port_fdb_load(struct dsa_switch
*ds
, int port
,
1702 const unsigned char *addr
, u16 vid
,
1705 struct mv88e6xxx_atu_entry entry
= { 0 };
1707 entry
.fid
= vid
; /* We use one FID per VLAN */
1708 entry
.state
= state
;
1709 ether_addr_copy(entry
.mac
, addr
);
1710 if (state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
1711 entry
.trunk
= false;
1712 entry
.portv_trunkid
= BIT(port
);
1715 return _mv88e6xxx_atu_load(ds
, &entry
);
1718 int mv88e6xxx_port_fdb_prepare(struct dsa_switch
*ds
, int port
,
1719 const struct switchdev_obj_port_fdb
*fdb
,
1720 struct switchdev_trans
*trans
)
1722 /* We don't use per-port FDB */
1726 /* We don't need any dynamic resource from the kernel (yet),
1727 * so skip the prepare phase.
1732 int mv88e6xxx_port_fdb_add(struct dsa_switch
*ds
, int port
,
1733 const struct switchdev_obj_port_fdb
*fdb
,
1734 struct switchdev_trans
*trans
)
1736 int state
= is_multicast_ether_addr(fdb
->addr
) ?
1737 GLOBAL_ATU_DATA_STATE_MC_STATIC
:
1738 GLOBAL_ATU_DATA_STATE_UC_STATIC
;
1739 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1742 mutex_lock(&ps
->smi_mutex
);
1743 ret
= _mv88e6xxx_port_fdb_load(ds
, port
, fdb
->addr
, fdb
->vid
, state
);
1744 mutex_unlock(&ps
->smi_mutex
);
1749 int mv88e6xxx_port_fdb_del(struct dsa_switch
*ds
, int port
,
1750 const struct switchdev_obj_port_fdb
*fdb
)
1752 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1755 mutex_lock(&ps
->smi_mutex
);
1756 ret
= _mv88e6xxx_port_fdb_load(ds
, port
, fdb
->addr
, fdb
->vid
,
1757 GLOBAL_ATU_DATA_STATE_UNUSED
);
1758 mutex_unlock(&ps
->smi_mutex
);
1763 static int _mv88e6xxx_atu_getnext(struct dsa_switch
*ds
, u16 fid
,
1764 struct mv88e6xxx_atu_entry
*entry
)
1766 struct mv88e6xxx_atu_entry next
= { 0 };
1771 ret
= _mv88e6xxx_atu_wait(ds
);
1775 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_ATU_FID
, fid
);
1779 ret
= _mv88e6xxx_atu_cmd(ds
, GLOBAL_ATU_OP_GET_NEXT_DB
);
1783 ret
= _mv88e6xxx_atu_mac_read(ds
, next
.mac
);
1787 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_ATU_DATA
);
1791 next
.state
= ret
& GLOBAL_ATU_DATA_STATE_MASK
;
1792 if (next
.state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
1793 unsigned int mask
, shift
;
1795 if (ret
& GLOBAL_ATU_DATA_TRUNK
) {
1797 mask
= GLOBAL_ATU_DATA_TRUNK_ID_MASK
;
1798 shift
= GLOBAL_ATU_DATA_TRUNK_ID_SHIFT
;
1801 mask
= GLOBAL_ATU_DATA_PORT_VECTOR_MASK
;
1802 shift
= GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT
;
1805 next
.portv_trunkid
= (ret
& mask
) >> shift
;
1812 int mv88e6xxx_port_fdb_dump(struct dsa_switch
*ds
, int port
,
1813 struct switchdev_obj_port_fdb
*fdb
,
1814 int (*cb
)(struct switchdev_obj
*obj
))
1816 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1817 struct mv88e6xxx_vtu_stu_entry vlan
= {
1818 .vid
= GLOBAL_VTU_VID_MASK
, /* all ones */
1822 mutex_lock(&ps
->smi_mutex
);
1824 err
= _mv88e6xxx_vtu_vid_write(ds
, vlan
.vid
);
1829 struct mv88e6xxx_atu_entry addr
= {
1830 .mac
= { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
1833 err
= _mv88e6xxx_vtu_getnext(ds
, &vlan
);
1840 err
= _mv88e6xxx_atu_mac_write(ds
, addr
.mac
);
1845 err
= _mv88e6xxx_atu_getnext(ds
, vlan
.fid
, &addr
);
1849 if (addr
.state
== GLOBAL_ATU_DATA_STATE_UNUSED
)
1852 if (!addr
.trunk
&& addr
.portv_trunkid
& BIT(port
)) {
1853 bool is_static
= addr
.state
==
1854 (is_multicast_ether_addr(addr
.mac
) ?
1855 GLOBAL_ATU_DATA_STATE_MC_STATIC
:
1856 GLOBAL_ATU_DATA_STATE_UC_STATIC
);
1858 fdb
->vid
= vlan
.vid
;
1859 ether_addr_copy(fdb
->addr
, addr
.mac
);
1860 fdb
->ndm_state
= is_static
? NUD_NOARP
:
1863 err
= cb(&fdb
->obj
);
1867 } while (!is_broadcast_ether_addr(addr
.mac
));
1869 } while (vlan
.vid
< GLOBAL_VTU_VID_MASK
);
1872 mutex_unlock(&ps
->smi_mutex
);
1877 int mv88e6xxx_port_bridge_join(struct dsa_switch
*ds
, int port
, u32 members
)
1879 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1880 const u16 pvid
= 4000 + ds
->index
* DSA_MAX_PORTS
+ port
;
1883 /* The port joined a bridge, so leave its reserved VLAN */
1884 mutex_lock(&ps
->smi_mutex
);
1885 err
= _mv88e6xxx_port_vlan_del(ds
, port
, pvid
);
1887 err
= _mv88e6xxx_port_pvid_set(ds
, port
, 0);
1888 mutex_unlock(&ps
->smi_mutex
);
1892 int mv88e6xxx_port_bridge_leave(struct dsa_switch
*ds
, int port
, u32 members
)
1894 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1895 const u16 pvid
= 4000 + ds
->index
* DSA_MAX_PORTS
+ port
;
1898 /* The port left the bridge, so join its reserved VLAN */
1899 mutex_lock(&ps
->smi_mutex
);
1900 err
= _mv88e6xxx_port_vlan_add(ds
, port
, pvid
, true);
1902 err
= _mv88e6xxx_port_pvid_set(ds
, port
, pvid
);
1903 mutex_unlock(&ps
->smi_mutex
);
1907 static void mv88e6xxx_bridge_work(struct work_struct
*work
)
1909 struct mv88e6xxx_priv_state
*ps
;
1910 struct dsa_switch
*ds
;
1913 ps
= container_of(work
, struct mv88e6xxx_priv_state
, bridge_work
);
1914 ds
= ((struct dsa_switch
*)ps
) - 1;
1916 while (ps
->port_state_update_mask
) {
1917 port
= __ffs(ps
->port_state_update_mask
);
1918 clear_bit(port
, &ps
->port_state_update_mask
);
1919 mv88e6xxx_set_port_state(ds
, port
, ps
->port_state
[port
]);
1923 static int mv88e6xxx_setup_port(struct dsa_switch
*ds
, int port
)
1925 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1929 mutex_lock(&ps
->smi_mutex
);
1931 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
1932 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
1933 mv88e6xxx_6185_family(ds
) || mv88e6xxx_6095_family(ds
) ||
1934 mv88e6xxx_6065_family(ds
) || mv88e6xxx_6320_family(ds
)) {
1935 /* MAC Forcing register: don't force link, speed,
1936 * duplex or flow control state to any particular
1937 * values on physical ports, but force the CPU port
1938 * and all DSA ports to their maximum bandwidth and
1941 reg
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_PCS_CTRL
);
1942 if (dsa_is_cpu_port(ds
, port
) || dsa_is_dsa_port(ds
, port
)) {
1943 reg
&= ~PORT_PCS_CTRL_UNFORCED
;
1944 reg
|= PORT_PCS_CTRL_FORCE_LINK
|
1945 PORT_PCS_CTRL_LINK_UP
|
1946 PORT_PCS_CTRL_DUPLEX_FULL
|
1947 PORT_PCS_CTRL_FORCE_DUPLEX
;
1948 if (mv88e6xxx_6065_family(ds
))
1949 reg
|= PORT_PCS_CTRL_100
;
1951 reg
|= PORT_PCS_CTRL_1000
;
1953 reg
|= PORT_PCS_CTRL_UNFORCED
;
1956 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
1957 PORT_PCS_CTRL
, reg
);
1962 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
1963 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
1964 * tunneling, determine priority by looking at 802.1p and IP
1965 * priority fields (IP prio has precedence), and set STP state
1968 * If this is the CPU link, use DSA or EDSA tagging depending
1969 * on which tagging mode was configured.
1971 * If this is a link to another switch, use DSA tagging mode.
1973 * If this is the upstream port for this switch, enable
1974 * forwarding of unknown unicasts and multicasts.
1977 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
1978 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
1979 mv88e6xxx_6095_family(ds
) || mv88e6xxx_6065_family(ds
) ||
1980 mv88e6xxx_6185_family(ds
) || mv88e6xxx_6320_family(ds
))
1981 reg
= PORT_CONTROL_IGMP_MLD_SNOOP
|
1982 PORT_CONTROL_USE_TAG
| PORT_CONTROL_USE_IP
|
1983 PORT_CONTROL_STATE_FORWARDING
;
1984 if (dsa_is_cpu_port(ds
, port
)) {
1985 if (mv88e6xxx_6095_family(ds
) || mv88e6xxx_6185_family(ds
))
1986 reg
|= PORT_CONTROL_DSA_TAG
;
1987 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
1988 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
1989 mv88e6xxx_6320_family(ds
)) {
1990 if (ds
->dst
->tag_protocol
== DSA_TAG_PROTO_EDSA
)
1991 reg
|= PORT_CONTROL_FRAME_ETHER_TYPE_DSA
;
1993 reg
|= PORT_CONTROL_FRAME_MODE_DSA
;
1994 reg
|= PORT_CONTROL_FORWARD_UNKNOWN
|
1995 PORT_CONTROL_FORWARD_UNKNOWN_MC
;
1998 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
1999 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2000 mv88e6xxx_6095_family(ds
) || mv88e6xxx_6065_family(ds
) ||
2001 mv88e6xxx_6185_family(ds
) || mv88e6xxx_6320_family(ds
)) {
2002 if (ds
->dst
->tag_protocol
== DSA_TAG_PROTO_EDSA
)
2003 reg
|= PORT_CONTROL_EGRESS_ADD_TAG
;
2006 if (dsa_is_dsa_port(ds
, port
)) {
2007 if (mv88e6xxx_6095_family(ds
) || mv88e6xxx_6185_family(ds
))
2008 reg
|= PORT_CONTROL_DSA_TAG
;
2009 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2010 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2011 mv88e6xxx_6320_family(ds
)) {
2012 reg
|= PORT_CONTROL_FRAME_MODE_DSA
;
2015 if (port
== dsa_upstream_port(ds
))
2016 reg
|= PORT_CONTROL_FORWARD_UNKNOWN
|
2017 PORT_CONTROL_FORWARD_UNKNOWN_MC
;
2020 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2026 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2027 * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
2028 * untagged frames on this port, do a destination address lookup on all
2029 * received packets as usual, disable ARP mirroring and don't send a
2030 * copy of all transmitted/received frames on this port to the CPU.
2033 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2034 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2035 mv88e6xxx_6095_family(ds
) || mv88e6xxx_6320_family(ds
))
2036 reg
= PORT_CONTROL_2_MAP_DA
;
2038 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2039 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6320_family(ds
))
2040 reg
|= PORT_CONTROL_2_JUMBO_10240
;
2042 if (mv88e6xxx_6095_family(ds
) || mv88e6xxx_6185_family(ds
)) {
2043 /* Set the upstream port this port should use */
2044 reg
|= dsa_upstream_port(ds
);
2045 /* enable forwarding of unknown multicast addresses to
2048 if (port
== dsa_upstream_port(ds
))
2049 reg
|= PORT_CONTROL_2_FORWARD_UNKNOWN
;
2052 reg
|= PORT_CONTROL_2_8021Q_SECURE
;
2055 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2056 PORT_CONTROL_2
, reg
);
2061 /* Port Association Vector: when learning source addresses
2062 * of packets, add the address to the address database using
2063 * a port bitmap that has only the bit for this port set and
2064 * the other bits clear.
2067 /* Disable learning for DSA and CPU ports */
2068 if (dsa_is_cpu_port(ds
, port
) || dsa_is_dsa_port(ds
, port
))
2069 reg
= PORT_ASSOC_VECTOR_LOCKED_PORT
;
2071 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_ASSOC_VECTOR
, reg
);
2075 /* Egress rate control 2: disable egress rate control. */
2076 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_RATE_CONTROL_2
,
2081 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2082 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2083 mv88e6xxx_6320_family(ds
)) {
2084 /* Do not limit the period of time that this port can
2085 * be paused for by the remote end or the period of
2086 * time that this port can pause the remote end.
2088 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2089 PORT_PAUSE_CTRL
, 0x0000);
2093 /* Port ATU control: disable limiting the number of
2094 * address database entries that this port is allowed
2097 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2098 PORT_ATU_CONTROL
, 0x0000);
2099 /* Priority Override: disable DA, SA and VTU priority
2102 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2103 PORT_PRI_OVERRIDE
, 0x0000);
2107 /* Port Ethertype: use the Ethertype DSA Ethertype
2110 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2111 PORT_ETH_TYPE
, ETH_P_EDSA
);
2114 /* Tag Remap: use an identity 802.1p prio -> switch
2117 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2118 PORT_TAG_REGMAP_0123
, 0x3210);
2122 /* Tag Remap 2: use an identity 802.1p prio -> switch
2125 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2126 PORT_TAG_REGMAP_4567
, 0x7654);
2131 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2132 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2133 mv88e6xxx_6185_family(ds
) || mv88e6xxx_6095_family(ds
) ||
2134 mv88e6xxx_6320_family(ds
)) {
2135 /* Rate Control: disable ingress rate limiting. */
2136 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2137 PORT_RATE_CONTROL
, 0x0001);
2142 /* Port Control 1: disable trunking, disable sending
2143 * learning messages to this port.
2145 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_CONTROL_1
, 0x0000);
2149 /* Port based VLAN map: do not give each port its own address
2150 * database, and allow every port to egress frames on all other ports.
2152 reg
= BIT(ps
->num_ports
) - 1; /* all ports */
2153 ret
= _mv88e6xxx_port_vlan_map_set(ds
, port
, reg
& ~port
);
2157 /* Default VLAN ID and priority: don't set a default VLAN
2158 * ID, and set the default packet priority to zero.
2160 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_DEFAULT_VLAN
,
2163 mutex_unlock(&ps
->smi_mutex
);
2167 int mv88e6xxx_setup_ports(struct dsa_switch
*ds
)
2169 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2173 for (i
= 0; i
< ps
->num_ports
; i
++) {
2174 ret
= mv88e6xxx_setup_port(ds
, i
);
2178 if (dsa_is_cpu_port(ds
, i
) || dsa_is_dsa_port(ds
, i
))
2181 /* setup the unbridged state */
2182 ret
= mv88e6xxx_port_bridge_leave(ds
, i
, 0);
2189 int mv88e6xxx_setup_common(struct dsa_switch
*ds
)
2191 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2193 mutex_init(&ps
->smi_mutex
);
2195 ps
->id
= REG_READ(REG_PORT(0), PORT_SWITCH_ID
) & 0xfff0;
2197 INIT_WORK(&ps
->bridge_work
, mv88e6xxx_bridge_work
);
2202 int mv88e6xxx_setup_global(struct dsa_switch
*ds
)
2204 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2208 /* Set the default address aging time to 5 minutes, and
2209 * enable address learn messages to be sent to all message
2212 REG_WRITE(REG_GLOBAL
, GLOBAL_ATU_CONTROL
,
2213 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL
);
2215 /* Configure the IP ToS mapping registers. */
2216 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_0
, 0x0000);
2217 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_1
, 0x0000);
2218 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_2
, 0x5555);
2219 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_3
, 0x5555);
2220 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_4
, 0xaaaa);
2221 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_5
, 0xaaaa);
2222 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_6
, 0xffff);
2223 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_7
, 0xffff);
2225 /* Configure the IEEE 802.1p priority mapping register. */
2226 REG_WRITE(REG_GLOBAL
, GLOBAL_IEEE_PRI
, 0xfa41);
2228 /* Send all frames with destination addresses matching
2229 * 01:80:c2:00:00:0x to the CPU port.
2231 REG_WRITE(REG_GLOBAL2
, GLOBAL2_MGMT_EN_0X
, 0xffff);
2233 /* Ignore removed tag data on doubly tagged packets, disable
2234 * flow control messages, force flow control priority to the
2235 * highest, and send all special multicast frames to the CPU
2236 * port at the highest priority.
2238 REG_WRITE(REG_GLOBAL2
, GLOBAL2_SWITCH_MGMT
,
2239 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU
| 0x70 |
2240 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI
);
2242 /* Program the DSA routing table. */
2243 for (i
= 0; i
< 32; i
++) {
2246 if (ds
->pd
->rtable
&&
2247 i
!= ds
->index
&& i
< ds
->dst
->pd
->nr_chips
)
2248 nexthop
= ds
->pd
->rtable
[i
] & 0x1f;
2250 REG_WRITE(REG_GLOBAL2
, GLOBAL2_DEVICE_MAPPING
,
2251 GLOBAL2_DEVICE_MAPPING_UPDATE
|
2252 (i
<< GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT
) |
2256 /* Clear all trunk masks. */
2257 for (i
= 0; i
< 8; i
++)
2258 REG_WRITE(REG_GLOBAL2
, GLOBAL2_TRUNK_MASK
,
2259 0x8000 | (i
<< GLOBAL2_TRUNK_MASK_NUM_SHIFT
) |
2260 ((1 << ps
->num_ports
) - 1));
2262 /* Clear all trunk mappings. */
2263 for (i
= 0; i
< 16; i
++)
2264 REG_WRITE(REG_GLOBAL2
, GLOBAL2_TRUNK_MAPPING
,
2265 GLOBAL2_TRUNK_MAPPING_UPDATE
|
2266 (i
<< GLOBAL2_TRUNK_MAPPING_ID_SHIFT
));
2268 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2269 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2270 mv88e6xxx_6320_family(ds
)) {
2271 /* Send all frames with destination addresses matching
2272 * 01:80:c2:00:00:2x to the CPU port.
2274 REG_WRITE(REG_GLOBAL2
, GLOBAL2_MGMT_EN_2X
, 0xffff);
2276 /* Initialise cross-chip port VLAN table to reset
2279 REG_WRITE(REG_GLOBAL2
, GLOBAL2_PVT_ADDR
, 0x9000);
2281 /* Clear the priority override table. */
2282 for (i
= 0; i
< 16; i
++)
2283 REG_WRITE(REG_GLOBAL2
, GLOBAL2_PRIO_OVERRIDE
,
2287 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2288 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2289 mv88e6xxx_6185_family(ds
) || mv88e6xxx_6095_family(ds
) ||
2290 mv88e6xxx_6320_family(ds
)) {
2291 /* Disable ingress rate limiting by resetting all
2292 * ingress rate limit registers to their initial
2295 for (i
= 0; i
< ps
->num_ports
; i
++)
2296 REG_WRITE(REG_GLOBAL2
, GLOBAL2_INGRESS_OP
,
2300 /* Clear the statistics counters for all ports */
2301 REG_WRITE(REG_GLOBAL
, GLOBAL_STATS_OP
, GLOBAL_STATS_OP_FLUSH_ALL
);
2303 /* Wait for the flush to complete. */
2304 mutex_lock(&ps
->smi_mutex
);
2305 ret
= _mv88e6xxx_stats_wait(ds
);
2309 /* Clear all ATU entries */
2310 ret
= _mv88e6xxx_atu_flush(ds
, 0, true);
2314 /* Clear all the VTU and STU entries */
2315 ret
= _mv88e6xxx_vtu_stu_flush(ds
);
2317 mutex_unlock(&ps
->smi_mutex
);
2322 int mv88e6xxx_switch_reset(struct dsa_switch
*ds
, bool ppu_active
)
2324 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2325 u16 is_reset
= (ppu_active
? 0x8800 : 0xc800);
2326 unsigned long timeout
;
2330 /* Set all ports to the disabled state. */
2331 for (i
= 0; i
< ps
->num_ports
; i
++) {
2332 ret
= REG_READ(REG_PORT(i
), PORT_CONTROL
);
2333 REG_WRITE(REG_PORT(i
), PORT_CONTROL
, ret
& 0xfffc);
2336 /* Wait for transmit queues to drain. */
2337 usleep_range(2000, 4000);
2339 /* Reset the switch. Keep the PPU active if requested. The PPU
2340 * needs to be active to support indirect phy register access
2341 * through global registers 0x18 and 0x19.
2344 REG_WRITE(REG_GLOBAL
, 0x04, 0xc000);
2346 REG_WRITE(REG_GLOBAL
, 0x04, 0xc400);
2348 /* Wait up to one second for reset to complete. */
2349 timeout
= jiffies
+ 1 * HZ
;
2350 while (time_before(jiffies
, timeout
)) {
2351 ret
= REG_READ(REG_GLOBAL
, 0x00);
2352 if ((ret
& is_reset
) == is_reset
)
2354 usleep_range(1000, 2000);
2356 if (time_after(jiffies
, timeout
))
2362 int mv88e6xxx_phy_page_read(struct dsa_switch
*ds
, int port
, int page
, int reg
)
2364 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2367 mutex_lock(&ps
->smi_mutex
);
2368 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, page
);
2371 ret
= _mv88e6xxx_phy_read_indirect(ds
, port
, reg
);
2373 _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, 0x0);
2374 mutex_unlock(&ps
->smi_mutex
);
2378 int mv88e6xxx_phy_page_write(struct dsa_switch
*ds
, int port
, int page
,
2381 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2384 mutex_lock(&ps
->smi_mutex
);
2385 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, page
);
2389 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, reg
, val
);
2391 _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, 0x0);
2392 mutex_unlock(&ps
->smi_mutex
);
2396 static int mv88e6xxx_port_to_phy_addr(struct dsa_switch
*ds
, int port
)
2398 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2400 if (port
>= 0 && port
< ps
->num_ports
)
2406 mv88e6xxx_phy_read(struct dsa_switch
*ds
, int port
, int regnum
)
2408 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2409 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
2415 mutex_lock(&ps
->smi_mutex
);
2416 ret
= _mv88e6xxx_phy_read(ds
, addr
, regnum
);
2417 mutex_unlock(&ps
->smi_mutex
);
2422 mv88e6xxx_phy_write(struct dsa_switch
*ds
, int port
, int regnum
, u16 val
)
2424 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2425 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
2431 mutex_lock(&ps
->smi_mutex
);
2432 ret
= _mv88e6xxx_phy_write(ds
, addr
, regnum
, val
);
2433 mutex_unlock(&ps
->smi_mutex
);
2438 mv88e6xxx_phy_read_indirect(struct dsa_switch
*ds
, int port
, int regnum
)
2440 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2441 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
2447 mutex_lock(&ps
->smi_mutex
);
2448 ret
= _mv88e6xxx_phy_read_indirect(ds
, addr
, regnum
);
2449 mutex_unlock(&ps
->smi_mutex
);
2454 mv88e6xxx_phy_write_indirect(struct dsa_switch
*ds
, int port
, int regnum
,
2457 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2458 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
2464 mutex_lock(&ps
->smi_mutex
);
2465 ret
= _mv88e6xxx_phy_write_indirect(ds
, addr
, regnum
, val
);
2466 mutex_unlock(&ps
->smi_mutex
);
2470 #ifdef CONFIG_NET_DSA_HWMON
2472 static int mv88e61xx_get_temp(struct dsa_switch
*ds
, int *temp
)
2474 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2480 mutex_lock(&ps
->smi_mutex
);
2482 ret
= _mv88e6xxx_phy_write(ds
, 0x0, 0x16, 0x6);
2486 /* Enable temperature sensor */
2487 ret
= _mv88e6xxx_phy_read(ds
, 0x0, 0x1a);
2491 ret
= _mv88e6xxx_phy_write(ds
, 0x0, 0x1a, ret
| (1 << 5));
2495 /* Wait for temperature to stabilize */
2496 usleep_range(10000, 12000);
2498 val
= _mv88e6xxx_phy_read(ds
, 0x0, 0x1a);
2504 /* Disable temperature sensor */
2505 ret
= _mv88e6xxx_phy_write(ds
, 0x0, 0x1a, ret
& ~(1 << 5));
2509 *temp
= ((val
& 0x1f) - 5) * 5;
2512 _mv88e6xxx_phy_write(ds
, 0x0, 0x16, 0x0);
2513 mutex_unlock(&ps
->smi_mutex
);
2517 static int mv88e63xx_get_temp(struct dsa_switch
*ds
, int *temp
)
2519 int phy
= mv88e6xxx_6320_family(ds
) ? 3 : 0;
2524 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 27);
2528 *temp
= (ret
& 0xff) - 25;
2533 int mv88e6xxx_get_temp(struct dsa_switch
*ds
, int *temp
)
2535 if (mv88e6xxx_6320_family(ds
) || mv88e6xxx_6352_family(ds
))
2536 return mv88e63xx_get_temp(ds
, temp
);
2538 return mv88e61xx_get_temp(ds
, temp
);
2541 int mv88e6xxx_get_temp_limit(struct dsa_switch
*ds
, int *temp
)
2543 int phy
= mv88e6xxx_6320_family(ds
) ? 3 : 0;
2546 if (!mv88e6xxx_6320_family(ds
) && !mv88e6xxx_6352_family(ds
))
2551 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
2555 *temp
= (((ret
>> 8) & 0x1f) * 5) - 25;
2560 int mv88e6xxx_set_temp_limit(struct dsa_switch
*ds
, int temp
)
2562 int phy
= mv88e6xxx_6320_family(ds
) ? 3 : 0;
2565 if (!mv88e6xxx_6320_family(ds
) && !mv88e6xxx_6352_family(ds
))
2568 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
2571 temp
= clamp_val(DIV_ROUND_CLOSEST(temp
, 5) + 5, 0, 0x1f);
2572 return mv88e6xxx_phy_page_write(ds
, phy
, 6, 26,
2573 (ret
& 0xe0ff) | (temp
<< 8));
2576 int mv88e6xxx_get_temp_alarm(struct dsa_switch
*ds
, bool *alarm
)
2578 int phy
= mv88e6xxx_6320_family(ds
) ? 3 : 0;
2581 if (!mv88e6xxx_6320_family(ds
) && !mv88e6xxx_6352_family(ds
))
2586 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
2590 *alarm
= !!(ret
& 0x40);
2594 #endif /* CONFIG_NET_DSA_HWMON */
2596 char *mv88e6xxx_lookup_name(struct device
*host_dev
, int sw_addr
,
2597 const struct mv88e6xxx_switch_id
*table
,
2600 struct mii_bus
*bus
= dsa_host_dev_to_mii_bus(host_dev
);
2606 ret
= __mv88e6xxx_reg_read(bus
, sw_addr
, REG_PORT(0), PORT_SWITCH_ID
);
2610 /* Look up the exact switch ID */
2611 for (i
= 0; i
< num
; ++i
)
2612 if (table
[i
].id
== ret
)
2613 return table
[i
].name
;
2615 /* Look up only the product number */
2616 for (i
= 0; i
< num
; ++i
) {
2617 if (table
[i
].id
== (ret
& PORT_SWITCH_ID_PROD_NUM_MASK
)) {
2618 dev_warn(host_dev
, "unknown revision %d, using base switch 0x%x\n",
2619 ret
& PORT_SWITCH_ID_REV_MASK
,
2620 ret
& PORT_SWITCH_ID_PROD_NUM_MASK
);
2621 return table
[i
].name
;
2628 static int __init
mv88e6xxx_init(void)
2630 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2631 register_switch_driver(&mv88e6131_switch_driver
);
2633 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2634 register_switch_driver(&mv88e6123_61_65_switch_driver
);
2636 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2637 register_switch_driver(&mv88e6352_switch_driver
);
2639 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2640 register_switch_driver(&mv88e6171_switch_driver
);
2644 module_init(mv88e6xxx_init
);
2646 static void __exit
mv88e6xxx_cleanup(void)
2648 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2649 unregister_switch_driver(&mv88e6171_switch_driver
);
2651 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2652 unregister_switch_driver(&mv88e6352_switch_driver
);
2654 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2655 unregister_switch_driver(&mv88e6123_61_65_switch_driver
);
2657 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2658 unregister_switch_driver(&mv88e6131_switch_driver
);
2661 module_exit(mv88e6xxx_cleanup
);
2663 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
2664 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
2665 MODULE_LICENSE("GPL");