2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/debugfs.h>
15 #include <linux/delay.h>
16 #include <linux/etherdevice.h>
17 #include <linux/ethtool.h>
18 #include <linux/if_bridge.h>
19 #include <linux/jiffies.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/netdevice.h>
23 #include <linux/phy.h>
24 #include <linux/seq_file.h>
26 #include <net/switchdev.h>
27 #include "mv88e6xxx.h"
29 /* MDIO bus access can be nested in the case of PHYs connected to the
30 * internal MDIO bus of the switch, which is accessed via MDIO bus of
31 * the Ethernet interface. Avoid lockdep false positives by using
32 * mutex_lock_nested().
34 static int mv88e6xxx_mdiobus_read(struct mii_bus
*bus
, int addr
, u32 regnum
)
38 mutex_lock_nested(&bus
->mdio_lock
, SINGLE_DEPTH_NESTING
);
39 ret
= bus
->read(bus
, addr
, regnum
);
40 mutex_unlock(&bus
->mdio_lock
);
45 static int mv88e6xxx_mdiobus_write(struct mii_bus
*bus
, int addr
, u32 regnum
,
50 mutex_lock_nested(&bus
->mdio_lock
, SINGLE_DEPTH_NESTING
);
51 ret
= bus
->write(bus
, addr
, regnum
, val
);
52 mutex_unlock(&bus
->mdio_lock
);
57 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
58 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
59 * will be directly accessible on some {device address,register address}
60 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
61 * will only respond to SMI transactions to that specific address, and
62 * an indirect addressing mechanism needs to be used to access its
65 static int mv88e6xxx_reg_wait_ready(struct mii_bus
*bus
, int sw_addr
)
70 for (i
= 0; i
< 16; i
++) {
71 ret
= mv88e6xxx_mdiobus_read(bus
, sw_addr
, SMI_CMD
);
75 if ((ret
& SMI_CMD_BUSY
) == 0)
82 int __mv88e6xxx_reg_read(struct mii_bus
*bus
, int sw_addr
, int addr
, int reg
)
87 return mv88e6xxx_mdiobus_read(bus
, addr
, reg
);
89 /* Wait for the bus to become free. */
90 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
94 /* Transmit the read command. */
95 ret
= mv88e6xxx_mdiobus_write(bus
, sw_addr
, SMI_CMD
,
96 SMI_CMD_OP_22_READ
| (addr
<< 5) | reg
);
100 /* Wait for the read command to complete. */
101 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
106 ret
= mv88e6xxx_mdiobus_read(bus
, sw_addr
, SMI_DATA
);
113 /* Must be called with SMI mutex held */
114 static int _mv88e6xxx_reg_read(struct dsa_switch
*ds
, int addr
, int reg
)
116 struct mii_bus
*bus
= dsa_host_dev_to_mii_bus(ds
->master_dev
);
122 ret
= __mv88e6xxx_reg_read(bus
, ds
->pd
->sw_addr
, addr
, reg
);
126 dev_dbg(ds
->master_dev
, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
132 int mv88e6xxx_reg_read(struct dsa_switch
*ds
, int addr
, int reg
)
134 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
137 mutex_lock(&ps
->smi_mutex
);
138 ret
= _mv88e6xxx_reg_read(ds
, addr
, reg
);
139 mutex_unlock(&ps
->smi_mutex
);
144 int __mv88e6xxx_reg_write(struct mii_bus
*bus
, int sw_addr
, int addr
,
150 return mv88e6xxx_mdiobus_write(bus
, addr
, reg
, val
);
152 /* Wait for the bus to become free. */
153 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
157 /* Transmit the data to write. */
158 ret
= mv88e6xxx_mdiobus_write(bus
, sw_addr
, SMI_DATA
, val
);
162 /* Transmit the write command. */
163 ret
= mv88e6xxx_mdiobus_write(bus
, sw_addr
, SMI_CMD
,
164 SMI_CMD_OP_22_WRITE
| (addr
<< 5) | reg
);
168 /* Wait for the write command to complete. */
169 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
176 /* Must be called with SMI mutex held */
177 static int _mv88e6xxx_reg_write(struct dsa_switch
*ds
, int addr
, int reg
,
180 struct mii_bus
*bus
= dsa_host_dev_to_mii_bus(ds
->master_dev
);
185 dev_dbg(ds
->master_dev
, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
188 return __mv88e6xxx_reg_write(bus
, ds
->pd
->sw_addr
, addr
, reg
, val
);
191 int mv88e6xxx_reg_write(struct dsa_switch
*ds
, int addr
, int reg
, u16 val
)
193 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
196 mutex_lock(&ps
->smi_mutex
);
197 ret
= _mv88e6xxx_reg_write(ds
, addr
, reg
, val
);
198 mutex_unlock(&ps
->smi_mutex
);
203 int mv88e6xxx_set_addr_direct(struct dsa_switch
*ds
, u8
*addr
)
205 REG_WRITE(REG_GLOBAL
, GLOBAL_MAC_01
, (addr
[0] << 8) | addr
[1]);
206 REG_WRITE(REG_GLOBAL
, GLOBAL_MAC_23
, (addr
[2] << 8) | addr
[3]);
207 REG_WRITE(REG_GLOBAL
, GLOBAL_MAC_45
, (addr
[4] << 8) | addr
[5]);
212 int mv88e6xxx_set_addr_indirect(struct dsa_switch
*ds
, u8
*addr
)
217 for (i
= 0; i
< 6; i
++) {
220 /* Write the MAC address byte. */
221 REG_WRITE(REG_GLOBAL2
, GLOBAL2_SWITCH_MAC
,
222 GLOBAL2_SWITCH_MAC_BUSY
| (i
<< 8) | addr
[i
]);
224 /* Wait for the write to complete. */
225 for (j
= 0; j
< 16; j
++) {
226 ret
= REG_READ(REG_GLOBAL2
, GLOBAL2_SWITCH_MAC
);
227 if ((ret
& GLOBAL2_SWITCH_MAC_BUSY
) == 0)
237 /* Must be called with SMI mutex held */
238 static int _mv88e6xxx_phy_read(struct dsa_switch
*ds
, int addr
, int regnum
)
241 return _mv88e6xxx_reg_read(ds
, addr
, regnum
);
245 /* Must be called with SMI mutex held */
246 static int _mv88e6xxx_phy_write(struct dsa_switch
*ds
, int addr
, int regnum
,
250 return _mv88e6xxx_reg_write(ds
, addr
, regnum
, val
);
254 #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
255 static int mv88e6xxx_ppu_disable(struct dsa_switch
*ds
)
258 unsigned long timeout
;
260 ret
= REG_READ(REG_GLOBAL
, GLOBAL_CONTROL
);
261 REG_WRITE(REG_GLOBAL
, GLOBAL_CONTROL
,
262 ret
& ~GLOBAL_CONTROL_PPU_ENABLE
);
264 timeout
= jiffies
+ 1 * HZ
;
265 while (time_before(jiffies
, timeout
)) {
266 ret
= REG_READ(REG_GLOBAL
, GLOBAL_STATUS
);
267 usleep_range(1000, 2000);
268 if ((ret
& GLOBAL_STATUS_PPU_MASK
) !=
269 GLOBAL_STATUS_PPU_POLLING
)
276 static int mv88e6xxx_ppu_enable(struct dsa_switch
*ds
)
279 unsigned long timeout
;
281 ret
= REG_READ(REG_GLOBAL
, GLOBAL_CONTROL
);
282 REG_WRITE(REG_GLOBAL
, GLOBAL_CONTROL
, ret
| GLOBAL_CONTROL_PPU_ENABLE
);
284 timeout
= jiffies
+ 1 * HZ
;
285 while (time_before(jiffies
, timeout
)) {
286 ret
= REG_READ(REG_GLOBAL
, GLOBAL_STATUS
);
287 usleep_range(1000, 2000);
288 if ((ret
& GLOBAL_STATUS_PPU_MASK
) ==
289 GLOBAL_STATUS_PPU_POLLING
)
296 static void mv88e6xxx_ppu_reenable_work(struct work_struct
*ugly
)
298 struct mv88e6xxx_priv_state
*ps
;
300 ps
= container_of(ugly
, struct mv88e6xxx_priv_state
, ppu_work
);
301 if (mutex_trylock(&ps
->ppu_mutex
)) {
302 struct dsa_switch
*ds
= ((struct dsa_switch
*)ps
) - 1;
304 if (mv88e6xxx_ppu_enable(ds
) == 0)
305 ps
->ppu_disabled
= 0;
306 mutex_unlock(&ps
->ppu_mutex
);
310 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps
)
312 struct mv88e6xxx_priv_state
*ps
= (void *)_ps
;
314 schedule_work(&ps
->ppu_work
);
317 static int mv88e6xxx_ppu_access_get(struct dsa_switch
*ds
)
319 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
322 mutex_lock(&ps
->ppu_mutex
);
324 /* If the PHY polling unit is enabled, disable it so that
325 * we can access the PHY registers. If it was already
326 * disabled, cancel the timer that is going to re-enable
329 if (!ps
->ppu_disabled
) {
330 ret
= mv88e6xxx_ppu_disable(ds
);
332 mutex_unlock(&ps
->ppu_mutex
);
335 ps
->ppu_disabled
= 1;
337 del_timer(&ps
->ppu_timer
);
344 static void mv88e6xxx_ppu_access_put(struct dsa_switch
*ds
)
346 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
348 /* Schedule a timer to re-enable the PHY polling unit. */
349 mod_timer(&ps
->ppu_timer
, jiffies
+ msecs_to_jiffies(10));
350 mutex_unlock(&ps
->ppu_mutex
);
353 void mv88e6xxx_ppu_state_init(struct dsa_switch
*ds
)
355 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
357 mutex_init(&ps
->ppu_mutex
);
358 INIT_WORK(&ps
->ppu_work
, mv88e6xxx_ppu_reenable_work
);
359 init_timer(&ps
->ppu_timer
);
360 ps
->ppu_timer
.data
= (unsigned long)ps
;
361 ps
->ppu_timer
.function
= mv88e6xxx_ppu_reenable_timer
;
364 int mv88e6xxx_phy_read_ppu(struct dsa_switch
*ds
, int addr
, int regnum
)
368 ret
= mv88e6xxx_ppu_access_get(ds
);
370 ret
= mv88e6xxx_reg_read(ds
, addr
, regnum
);
371 mv88e6xxx_ppu_access_put(ds
);
377 int mv88e6xxx_phy_write_ppu(struct dsa_switch
*ds
, int addr
,
382 ret
= mv88e6xxx_ppu_access_get(ds
);
384 ret
= mv88e6xxx_reg_write(ds
, addr
, regnum
, val
);
385 mv88e6xxx_ppu_access_put(ds
);
392 static bool mv88e6xxx_6065_family(struct dsa_switch
*ds
)
394 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
397 case PORT_SWITCH_ID_6031
:
398 case PORT_SWITCH_ID_6061
:
399 case PORT_SWITCH_ID_6035
:
400 case PORT_SWITCH_ID_6065
:
406 static bool mv88e6xxx_6095_family(struct dsa_switch
*ds
)
408 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
411 case PORT_SWITCH_ID_6092
:
412 case PORT_SWITCH_ID_6095
:
418 static bool mv88e6xxx_6097_family(struct dsa_switch
*ds
)
420 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
423 case PORT_SWITCH_ID_6046
:
424 case PORT_SWITCH_ID_6085
:
425 case PORT_SWITCH_ID_6096
:
426 case PORT_SWITCH_ID_6097
:
432 static bool mv88e6xxx_6165_family(struct dsa_switch
*ds
)
434 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
437 case PORT_SWITCH_ID_6123
:
438 case PORT_SWITCH_ID_6161
:
439 case PORT_SWITCH_ID_6165
:
445 static bool mv88e6xxx_6185_family(struct dsa_switch
*ds
)
447 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
450 case PORT_SWITCH_ID_6121
:
451 case PORT_SWITCH_ID_6122
:
452 case PORT_SWITCH_ID_6152
:
453 case PORT_SWITCH_ID_6155
:
454 case PORT_SWITCH_ID_6182
:
455 case PORT_SWITCH_ID_6185
:
456 case PORT_SWITCH_ID_6108
:
457 case PORT_SWITCH_ID_6131
:
463 static bool mv88e6xxx_6320_family(struct dsa_switch
*ds
)
465 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
468 case PORT_SWITCH_ID_6320
:
469 case PORT_SWITCH_ID_6321
:
475 static bool mv88e6xxx_6351_family(struct dsa_switch
*ds
)
477 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
480 case PORT_SWITCH_ID_6171
:
481 case PORT_SWITCH_ID_6175
:
482 case PORT_SWITCH_ID_6350
:
483 case PORT_SWITCH_ID_6351
:
489 static bool mv88e6xxx_6352_family(struct dsa_switch
*ds
)
491 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
494 case PORT_SWITCH_ID_6172
:
495 case PORT_SWITCH_ID_6176
:
496 case PORT_SWITCH_ID_6240
:
497 case PORT_SWITCH_ID_6352
:
503 /* We expect the switch to perform auto negotiation if there is a real
504 * phy. However, in the case of a fixed link phy, we force the port
505 * settings from the fixed link settings.
507 void mv88e6xxx_adjust_link(struct dsa_switch
*ds
, int port
,
508 struct phy_device
*phydev
)
510 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
514 if (!phy_is_pseudo_fixed_link(phydev
))
517 mutex_lock(&ps
->smi_mutex
);
519 ret
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_PCS_CTRL
);
523 reg
= ret
& ~(PORT_PCS_CTRL_LINK_UP
|
524 PORT_PCS_CTRL_FORCE_LINK
|
525 PORT_PCS_CTRL_DUPLEX_FULL
|
526 PORT_PCS_CTRL_FORCE_DUPLEX
|
527 PORT_PCS_CTRL_UNFORCED
);
529 reg
|= PORT_PCS_CTRL_FORCE_LINK
;
531 reg
|= PORT_PCS_CTRL_LINK_UP
;
533 if (mv88e6xxx_6065_family(ds
) && phydev
->speed
> SPEED_100
)
536 switch (phydev
->speed
) {
538 reg
|= PORT_PCS_CTRL_1000
;
541 reg
|= PORT_PCS_CTRL_100
;
544 reg
|= PORT_PCS_CTRL_10
;
547 pr_info("Unknown speed");
551 reg
|= PORT_PCS_CTRL_FORCE_DUPLEX
;
552 if (phydev
->duplex
== DUPLEX_FULL
)
553 reg
|= PORT_PCS_CTRL_DUPLEX_FULL
;
555 if ((mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
)) &&
556 (port
>= ps
->num_ports
- 2)) {
557 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_RXID
)
558 reg
|= PORT_PCS_CTRL_RGMII_DELAY_RXCLK
;
559 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_TXID
)
560 reg
|= PORT_PCS_CTRL_RGMII_DELAY_TXCLK
;
561 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_ID
)
562 reg
|= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK
|
563 PORT_PCS_CTRL_RGMII_DELAY_TXCLK
);
565 _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_PCS_CTRL
, reg
);
568 mutex_unlock(&ps
->smi_mutex
);
571 /* Must be called with SMI mutex held */
572 static int _mv88e6xxx_stats_wait(struct dsa_switch
*ds
)
577 for (i
= 0; i
< 10; i
++) {
578 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_STATS_OP
);
579 if ((ret
& GLOBAL_STATS_OP_BUSY
) == 0)
586 /* Must be called with SMI mutex held */
587 static int _mv88e6xxx_stats_snapshot(struct dsa_switch
*ds
, int port
)
591 if (mv88e6xxx_6320_family(ds
) || mv88e6xxx_6352_family(ds
))
592 port
= (port
+ 1) << 5;
594 /* Snapshot the hardware statistics counters for this port. */
595 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_STATS_OP
,
596 GLOBAL_STATS_OP_CAPTURE_PORT
|
597 GLOBAL_STATS_OP_HIST_RX_TX
| port
);
601 /* Wait for the snapshotting to complete. */
602 ret
= _mv88e6xxx_stats_wait(ds
);
609 /* Must be called with SMI mutex held */
610 static void _mv88e6xxx_stats_read(struct dsa_switch
*ds
, int stat
, u32
*val
)
617 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_STATS_OP
,
618 GLOBAL_STATS_OP_READ_CAPTURED
|
619 GLOBAL_STATS_OP_HIST_RX_TX
| stat
);
623 ret
= _mv88e6xxx_stats_wait(ds
);
627 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_STATS_COUNTER_32
);
633 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_STATS_COUNTER_01
);
640 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats
[] = {
641 { "in_good_octets", 8, 0x00, },
642 { "in_bad_octets", 4, 0x02, },
643 { "in_unicast", 4, 0x04, },
644 { "in_broadcasts", 4, 0x06, },
645 { "in_multicasts", 4, 0x07, },
646 { "in_pause", 4, 0x16, },
647 { "in_undersize", 4, 0x18, },
648 { "in_fragments", 4, 0x19, },
649 { "in_oversize", 4, 0x1a, },
650 { "in_jabber", 4, 0x1b, },
651 { "in_rx_error", 4, 0x1c, },
652 { "in_fcs_error", 4, 0x1d, },
653 { "out_octets", 8, 0x0e, },
654 { "out_unicast", 4, 0x10, },
655 { "out_broadcasts", 4, 0x13, },
656 { "out_multicasts", 4, 0x12, },
657 { "out_pause", 4, 0x15, },
658 { "excessive", 4, 0x11, },
659 { "collisions", 4, 0x1e, },
660 { "deferred", 4, 0x05, },
661 { "single", 4, 0x14, },
662 { "multiple", 4, 0x17, },
663 { "out_fcs_error", 4, 0x03, },
664 { "late", 4, 0x1f, },
665 { "hist_64bytes", 4, 0x08, },
666 { "hist_65_127bytes", 4, 0x09, },
667 { "hist_128_255bytes", 4, 0x0a, },
668 { "hist_256_511bytes", 4, 0x0b, },
669 { "hist_512_1023bytes", 4, 0x0c, },
670 { "hist_1024_max_bytes", 4, 0x0d, },
671 /* Not all devices have the following counters */
672 { "sw_in_discards", 4, 0x110, },
673 { "sw_in_filtered", 2, 0x112, },
674 { "sw_out_filtered", 2, 0x113, },
678 static bool have_sw_in_discards(struct dsa_switch
*ds
)
680 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
683 case PORT_SWITCH_ID_6095
: case PORT_SWITCH_ID_6161
:
684 case PORT_SWITCH_ID_6165
: case PORT_SWITCH_ID_6171
:
685 case PORT_SWITCH_ID_6172
: case PORT_SWITCH_ID_6176
:
686 case PORT_SWITCH_ID_6182
: case PORT_SWITCH_ID_6185
:
687 case PORT_SWITCH_ID_6352
:
694 static void _mv88e6xxx_get_strings(struct dsa_switch
*ds
,
696 struct mv88e6xxx_hw_stat
*stats
,
697 int port
, uint8_t *data
)
701 for (i
= 0; i
< nr_stats
; i
++) {
702 memcpy(data
+ i
* ETH_GSTRING_LEN
,
703 stats
[i
].string
, ETH_GSTRING_LEN
);
707 static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch
*ds
,
709 struct mv88e6xxx_hw_stat
*stats
,
712 struct mv88e6xxx_hw_stat
*s
= stats
+ stat
;
718 if (s
->reg
>= 0x100) {
719 ret
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
),
725 if (s
->sizeof_stat
== 4) {
726 ret
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
),
733 _mv88e6xxx_stats_read(ds
, s
->reg
, &low
);
734 if (s
->sizeof_stat
== 8)
735 _mv88e6xxx_stats_read(ds
, s
->reg
+ 1, &high
);
737 value
= (((u64
)high
) << 16) | low
;
741 static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch
*ds
,
743 struct mv88e6xxx_hw_stat
*stats
,
744 int port
, uint64_t *data
)
746 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
750 mutex_lock(&ps
->smi_mutex
);
752 ret
= _mv88e6xxx_stats_snapshot(ds
, port
);
754 mutex_unlock(&ps
->smi_mutex
);
758 /* Read each of the counters. */
759 for (i
= 0; i
< nr_stats
; i
++)
760 data
[i
] = _mv88e6xxx_get_ethtool_stat(ds
, i
, stats
, port
);
762 mutex_unlock(&ps
->smi_mutex
);
765 /* All the statistics in the table */
767 mv88e6xxx_get_strings(struct dsa_switch
*ds
, int port
, uint8_t *data
)
769 if (have_sw_in_discards(ds
))
770 _mv88e6xxx_get_strings(ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
),
771 mv88e6xxx_hw_stats
, port
, data
);
773 _mv88e6xxx_get_strings(ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
) - 3,
774 mv88e6xxx_hw_stats
, port
, data
);
777 int mv88e6xxx_get_sset_count(struct dsa_switch
*ds
)
779 if (have_sw_in_discards(ds
))
780 return ARRAY_SIZE(mv88e6xxx_hw_stats
);
781 return ARRAY_SIZE(mv88e6xxx_hw_stats
) - 3;
785 mv88e6xxx_get_ethtool_stats(struct dsa_switch
*ds
,
786 int port
, uint64_t *data
)
788 if (have_sw_in_discards(ds
))
789 _mv88e6xxx_get_ethtool_stats(
790 ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
),
791 mv88e6xxx_hw_stats
, port
, data
);
793 _mv88e6xxx_get_ethtool_stats(
794 ds
, ARRAY_SIZE(mv88e6xxx_hw_stats
) - 3,
795 mv88e6xxx_hw_stats
, port
, data
);
798 int mv88e6xxx_get_regs_len(struct dsa_switch
*ds
, int port
)
800 return 32 * sizeof(u16
);
803 void mv88e6xxx_get_regs(struct dsa_switch
*ds
, int port
,
804 struct ethtool_regs
*regs
, void *_p
)
811 memset(p
, 0xff, 32 * sizeof(u16
));
813 for (i
= 0; i
< 32; i
++) {
816 ret
= mv88e6xxx_reg_read(ds
, REG_PORT(port
), i
);
822 /* Must be called with SMI lock held */
823 static int _mv88e6xxx_wait(struct dsa_switch
*ds
, int reg
, int offset
,
826 unsigned long timeout
= jiffies
+ HZ
/ 10;
828 while (time_before(jiffies
, timeout
)) {
831 ret
= _mv88e6xxx_reg_read(ds
, reg
, offset
);
837 usleep_range(1000, 2000);
842 static int mv88e6xxx_wait(struct dsa_switch
*ds
, int reg
, int offset
, u16 mask
)
844 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
847 mutex_lock(&ps
->smi_mutex
);
848 ret
= _mv88e6xxx_wait(ds
, reg
, offset
, mask
);
849 mutex_unlock(&ps
->smi_mutex
);
854 static int _mv88e6xxx_phy_wait(struct dsa_switch
*ds
)
856 return _mv88e6xxx_wait(ds
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
857 GLOBAL2_SMI_OP_BUSY
);
860 int mv88e6xxx_eeprom_load_wait(struct dsa_switch
*ds
)
862 return mv88e6xxx_wait(ds
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
863 GLOBAL2_EEPROM_OP_LOAD
);
866 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch
*ds
)
868 return mv88e6xxx_wait(ds
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
869 GLOBAL2_EEPROM_OP_BUSY
);
872 /* Must be called with SMI lock held */
873 static int _mv88e6xxx_atu_wait(struct dsa_switch
*ds
)
875 return _mv88e6xxx_wait(ds
, REG_GLOBAL
, GLOBAL_ATU_OP
,
879 /* Must be called with SMI lock held */
880 static int _mv88e6xxx_scratch_wait(struct dsa_switch
*ds
)
882 return _mv88e6xxx_wait(ds
, REG_GLOBAL2
, GLOBAL2_SCRATCH_MISC
,
883 GLOBAL2_SCRATCH_BUSY
);
886 /* Must be called with SMI mutex held */
887 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch
*ds
, int addr
,
892 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
893 GLOBAL2_SMI_OP_22_READ
| (addr
<< 5) |
898 ret
= _mv88e6xxx_phy_wait(ds
);
902 return _mv88e6xxx_reg_read(ds
, REG_GLOBAL2
, GLOBAL2_SMI_DATA
);
905 /* Must be called with SMI mutex held */
906 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch
*ds
, int addr
,
911 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL2
, GLOBAL2_SMI_DATA
, val
);
915 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
916 GLOBAL2_SMI_OP_22_WRITE
| (addr
<< 5) |
919 return _mv88e6xxx_phy_wait(ds
);
922 int mv88e6xxx_get_eee(struct dsa_switch
*ds
, int port
, struct ethtool_eee
*e
)
924 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
927 mutex_lock(&ps
->smi_mutex
);
929 reg
= _mv88e6xxx_phy_read_indirect(ds
, port
, 16);
933 e
->eee_enabled
= !!(reg
& 0x0200);
934 e
->tx_lpi_enabled
= !!(reg
& 0x0100);
936 reg
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_STATUS
);
940 e
->eee_active
= !!(reg
& PORT_STATUS_EEE
);
944 mutex_unlock(&ps
->smi_mutex
);
948 int mv88e6xxx_set_eee(struct dsa_switch
*ds
, int port
,
949 struct phy_device
*phydev
, struct ethtool_eee
*e
)
951 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
955 mutex_lock(&ps
->smi_mutex
);
957 ret
= _mv88e6xxx_phy_read_indirect(ds
, port
, 16);
964 if (e
->tx_lpi_enabled
)
967 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, 16, reg
);
969 mutex_unlock(&ps
->smi_mutex
);
974 static int _mv88e6xxx_atu_cmd(struct dsa_switch
*ds
, u16 cmd
)
978 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_ATU_OP
, cmd
);
982 return _mv88e6xxx_atu_wait(ds
);
985 static int _mv88e6xxx_atu_data_write(struct dsa_switch
*ds
,
986 struct mv88e6xxx_atu_entry
*entry
)
988 u16 data
= entry
->state
& GLOBAL_ATU_DATA_STATE_MASK
;
990 if (entry
->state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
991 unsigned int mask
, shift
;
994 data
|= GLOBAL_ATU_DATA_TRUNK
;
995 mask
= GLOBAL_ATU_DATA_TRUNK_ID_MASK
;
996 shift
= GLOBAL_ATU_DATA_TRUNK_ID_SHIFT
;
998 mask
= GLOBAL_ATU_DATA_PORT_VECTOR_MASK
;
999 shift
= GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT
;
1002 data
|= (entry
->portv_trunkid
<< shift
) & mask
;
1005 return _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_ATU_DATA
, data
);
1008 static int _mv88e6xxx_atu_flush_move(struct dsa_switch
*ds
,
1009 struct mv88e6xxx_atu_entry
*entry
,
1015 err
= _mv88e6xxx_atu_wait(ds
);
1019 err
= _mv88e6xxx_atu_data_write(ds
, entry
);
1024 err
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_ATU_FID
,
1029 op
= static_too
? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB
:
1030 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB
;
1032 op
= static_too
? GLOBAL_ATU_OP_FLUSH_MOVE_ALL
:
1033 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC
;
1036 return _mv88e6xxx_atu_cmd(ds
, op
);
1039 static int _mv88e6xxx_atu_flush(struct dsa_switch
*ds
, u16 fid
, bool static_too
)
1041 struct mv88e6xxx_atu_entry entry
= {
1043 .state
= 0, /* EntryState bits must be 0 */
1046 return _mv88e6xxx_atu_flush_move(ds
, &entry
, static_too
);
1049 static int _mv88e6xxx_flush_fid(struct dsa_switch
*ds
, int fid
)
1051 return _mv88e6xxx_atu_flush(ds
, fid
, false);
1054 static int _mv88e6xxx_atu_move(struct dsa_switch
*ds
, u16 fid
, int from_port
,
1055 int to_port
, bool static_too
)
1057 struct mv88e6xxx_atu_entry entry
= {
1062 /* EntryState bits must be 0xF */
1063 entry
.state
= GLOBAL_ATU_DATA_STATE_MASK
;
1065 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1066 entry
.portv_trunkid
= (to_port
& 0x0f) << 4;
1067 entry
.portv_trunkid
|= from_port
& 0x0f;
1069 return _mv88e6xxx_atu_flush_move(ds
, &entry
, static_too
);
1072 static int _mv88e6xxx_atu_remove(struct dsa_switch
*ds
, u16 fid
, int port
,
1075 /* Destination port 0xF means remove the entries */
1076 return _mv88e6xxx_atu_move(ds
, fid
, port
, 0x0f, static_too
);
1079 static int mv88e6xxx_set_port_state(struct dsa_switch
*ds
, int port
, u8 state
)
1081 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1085 mutex_lock(&ps
->smi_mutex
);
1087 reg
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_CONTROL
);
1093 oldstate
= reg
& PORT_CONTROL_STATE_MASK
;
1094 if (oldstate
!= state
) {
1095 /* Flush forwarding database if we're moving a port
1096 * from Learning or Forwarding state to Disabled or
1097 * Blocking or Listening state.
1099 if (oldstate
>= PORT_CONTROL_STATE_LEARNING
&&
1100 state
<= PORT_CONTROL_STATE_BLOCKING
) {
1101 ret
= _mv88e6xxx_atu_remove(ds
, 0, port
, false);
1105 reg
= (reg
& ~PORT_CONTROL_STATE_MASK
) | state
;
1106 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_CONTROL
,
1111 mutex_unlock(&ps
->smi_mutex
);
1115 /* Must be called with smi lock held */
1116 static int _mv88e6xxx_update_port_config(struct dsa_switch
*ds
, int port
)
1118 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1119 u8 fid
= ps
->fid
[port
];
1120 u16 reg
= fid
<< 12;
1122 if (dsa_is_cpu_port(ds
, port
))
1123 reg
|= ds
->phys_port_mask
;
1125 reg
|= (ps
->bridge_mask
[fid
] |
1126 (1 << dsa_upstream_port(ds
))) & ~(1 << port
);
1128 return _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_BASE_VLAN
, reg
);
1131 /* Must be called with smi lock held */
1132 static int _mv88e6xxx_update_bridge_config(struct dsa_switch
*ds
, int fid
)
1134 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1139 mask
= ds
->phys_port_mask
;
1142 mask
&= ~(1 << port
);
1143 if (ps
->fid
[port
] != fid
)
1146 ret
= _mv88e6xxx_update_port_config(ds
, port
);
1151 return _mv88e6xxx_flush_fid(ds
, fid
);
1154 /* Bridge handling functions */
1156 int mv88e6xxx_join_bridge(struct dsa_switch
*ds
, int port
, u32 br_port_mask
)
1158 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1163 /* If the bridge group is not empty, join that group.
1164 * Otherwise create a new group.
1166 fid
= ps
->fid
[port
];
1167 nmask
= br_port_mask
& ~(1 << port
);
1169 fid
= ps
->fid
[__ffs(nmask
)];
1171 nmask
= ps
->bridge_mask
[fid
] | (1 << port
);
1172 if (nmask
!= br_port_mask
) {
1173 netdev_err(ds
->ports
[port
],
1174 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1175 fid
, br_port_mask
, nmask
);
1179 mutex_lock(&ps
->smi_mutex
);
1181 ps
->bridge_mask
[fid
] = br_port_mask
;
1183 if (fid
!= ps
->fid
[port
]) {
1184 clear_bit(ps
->fid
[port
], ps
->fid_bitmap
);
1185 ps
->fid
[port
] = fid
;
1186 ret
= _mv88e6xxx_update_bridge_config(ds
, fid
);
1189 mutex_unlock(&ps
->smi_mutex
);
1194 int mv88e6xxx_leave_bridge(struct dsa_switch
*ds
, int port
, u32 br_port_mask
)
1196 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1200 fid
= ps
->fid
[port
];
1202 if (ps
->bridge_mask
[fid
] != br_port_mask
) {
1203 netdev_err(ds
->ports
[port
],
1204 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1205 fid
, br_port_mask
, ps
->bridge_mask
[fid
]);
1209 /* If the port was the last port of a bridge, we are done.
1210 * Otherwise assign a new fid to the port, and fix up
1211 * the bridge configuration.
1213 if (br_port_mask
== (1 << port
))
1216 mutex_lock(&ps
->smi_mutex
);
1218 newfid
= find_next_zero_bit(ps
->fid_bitmap
, VLAN_N_VID
, 1);
1219 if (unlikely(newfid
> ps
->num_ports
)) {
1220 netdev_err(ds
->ports
[port
], "all first %d FIDs are used\n",
1226 ps
->fid
[port
] = newfid
;
1227 set_bit(newfid
, ps
->fid_bitmap
);
1228 ps
->bridge_mask
[fid
] &= ~(1 << port
);
1229 ps
->bridge_mask
[newfid
] = 1 << port
;
1231 ret
= _mv88e6xxx_update_bridge_config(ds
, fid
);
1233 ret
= _mv88e6xxx_update_bridge_config(ds
, newfid
);
1236 mutex_unlock(&ps
->smi_mutex
);
1241 int mv88e6xxx_port_stp_update(struct dsa_switch
*ds
, int port
, u8 state
)
1243 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1247 case BR_STATE_DISABLED
:
1248 stp_state
= PORT_CONTROL_STATE_DISABLED
;
1250 case BR_STATE_BLOCKING
:
1251 case BR_STATE_LISTENING
:
1252 stp_state
= PORT_CONTROL_STATE_BLOCKING
;
1254 case BR_STATE_LEARNING
:
1255 stp_state
= PORT_CONTROL_STATE_LEARNING
;
1257 case BR_STATE_FORWARDING
:
1259 stp_state
= PORT_CONTROL_STATE_FORWARDING
;
1263 netdev_dbg(ds
->ports
[port
], "port state %d [%d]\n", state
, stp_state
);
1265 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1266 * so we can not update the port state directly but need to schedule it.
1268 ps
->port_state
[port
] = stp_state
;
1269 set_bit(port
, &ps
->port_state_update_mask
);
1270 schedule_work(&ps
->bridge_work
);
1275 int mv88e6xxx_port_pvid_get(struct dsa_switch
*ds
, int port
, u16
*pvid
)
1279 ret
= mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_DEFAULT_VLAN
);
1283 *pvid
= ret
& PORT_DEFAULT_VLAN_MASK
;
1288 int mv88e6xxx_port_pvid_set(struct dsa_switch
*ds
, int port
, u16 pvid
)
1290 return mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_DEFAULT_VLAN
,
1291 pvid
& PORT_DEFAULT_VLAN_MASK
);
1294 static int _mv88e6xxx_vtu_wait(struct dsa_switch
*ds
)
1296 return _mv88e6xxx_wait(ds
, REG_GLOBAL
, GLOBAL_VTU_OP
,
1297 GLOBAL_VTU_OP_BUSY
);
1300 static int _mv88e6xxx_vtu_cmd(struct dsa_switch
*ds
, u16 op
)
1304 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_OP
, op
);
1308 return _mv88e6xxx_vtu_wait(ds
);
1311 static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch
*ds
)
1315 ret
= _mv88e6xxx_vtu_wait(ds
);
1319 return _mv88e6xxx_vtu_cmd(ds
, GLOBAL_VTU_OP_FLUSH_ALL
);
1322 static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch
*ds
,
1323 struct mv88e6xxx_vtu_stu_entry
*entry
,
1324 unsigned int nibble_offset
)
1326 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1331 for (i
= 0; i
< 3; ++i
) {
1332 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
,
1333 GLOBAL_VTU_DATA_0_3
+ i
);
1340 for (i
= 0; i
< ps
->num_ports
; ++i
) {
1341 unsigned int shift
= (i
% 4) * 4 + nibble_offset
;
1342 u16 reg
= regs
[i
/ 4];
1344 entry
->data
[i
] = (reg
>> shift
) & GLOBAL_VTU_STU_DATA_MASK
;
1350 static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch
*ds
,
1351 struct mv88e6xxx_vtu_stu_entry
*entry
,
1352 unsigned int nibble_offset
)
1354 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1355 u16 regs
[3] = { 0 };
1359 for (i
= 0; i
< ps
->num_ports
; ++i
) {
1360 unsigned int shift
= (i
% 4) * 4 + nibble_offset
;
1361 u8 data
= entry
->data
[i
];
1363 regs
[i
/ 4] |= (data
& GLOBAL_VTU_STU_DATA_MASK
) << shift
;
1366 for (i
= 0; i
< 3; ++i
) {
1367 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
,
1368 GLOBAL_VTU_DATA_0_3
+ i
, regs
[i
]);
1376 static int _mv88e6xxx_vtu_getnext(struct dsa_switch
*ds
, u16 vid
,
1377 struct mv88e6xxx_vtu_stu_entry
*entry
)
1379 struct mv88e6xxx_vtu_stu_entry next
= { 0 };
1382 ret
= _mv88e6xxx_vtu_wait(ds
);
1386 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_VID
,
1387 vid
& GLOBAL_VTU_VID_MASK
);
1391 ret
= _mv88e6xxx_vtu_cmd(ds
, GLOBAL_VTU_OP_VTU_GET_NEXT
);
1395 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_VTU_VID
);
1399 next
.vid
= ret
& GLOBAL_VTU_VID_MASK
;
1400 next
.valid
= !!(ret
& GLOBAL_VTU_VID_VALID
);
1403 ret
= _mv88e6xxx_vtu_stu_data_read(ds
, &next
, 0);
1407 if (mv88e6xxx_6097_family(ds
) || mv88e6xxx_6165_family(ds
) ||
1408 mv88e6xxx_6351_family(ds
) || mv88e6xxx_6352_family(ds
)) {
1409 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
,
1414 next
.fid
= ret
& GLOBAL_VTU_FID_MASK
;
1416 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
,
1421 next
.sid
= ret
& GLOBAL_VTU_SID_MASK
;
1429 static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch
*ds
,
1430 struct mv88e6xxx_vtu_stu_entry
*entry
)
1435 ret
= _mv88e6xxx_vtu_wait(ds
);
1442 /* Write port member tags */
1443 ret
= _mv88e6xxx_vtu_stu_data_write(ds
, entry
, 0);
1447 if (mv88e6xxx_6097_family(ds
) || mv88e6xxx_6165_family(ds
) ||
1448 mv88e6xxx_6351_family(ds
) || mv88e6xxx_6352_family(ds
)) {
1449 reg
= entry
->sid
& GLOBAL_VTU_SID_MASK
;
1450 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_SID
, reg
);
1454 reg
= entry
->fid
& GLOBAL_VTU_FID_MASK
;
1455 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_FID
, reg
);
1460 reg
= GLOBAL_VTU_VID_VALID
;
1462 reg
|= entry
->vid
& GLOBAL_VTU_VID_MASK
;
1463 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_VID
, reg
);
1467 return _mv88e6xxx_vtu_cmd(ds
, GLOBAL_VTU_OP_VTU_LOAD_PURGE
);
1470 static int _mv88e6xxx_stu_getnext(struct dsa_switch
*ds
, u8 sid
,
1471 struct mv88e6xxx_vtu_stu_entry
*entry
)
1473 struct mv88e6xxx_vtu_stu_entry next
= { 0 };
1476 ret
= _mv88e6xxx_vtu_wait(ds
);
1480 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_SID
,
1481 sid
& GLOBAL_VTU_SID_MASK
);
1485 ret
= _mv88e6xxx_vtu_cmd(ds
, GLOBAL_VTU_OP_STU_GET_NEXT
);
1489 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_VTU_SID
);
1493 next
.sid
= ret
& GLOBAL_VTU_SID_MASK
;
1495 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_VTU_VID
);
1499 next
.valid
= !!(ret
& GLOBAL_VTU_VID_VALID
);
1502 ret
= _mv88e6xxx_vtu_stu_data_read(ds
, &next
, 2);
1511 static int _mv88e6xxx_stu_loadpurge(struct dsa_switch
*ds
,
1512 struct mv88e6xxx_vtu_stu_entry
*entry
)
1517 ret
= _mv88e6xxx_vtu_wait(ds
);
1524 /* Write port states */
1525 ret
= _mv88e6xxx_vtu_stu_data_write(ds
, entry
, 2);
1529 reg
= GLOBAL_VTU_VID_VALID
;
1531 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_VID
, reg
);
1535 reg
= entry
->sid
& GLOBAL_VTU_SID_MASK
;
1536 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_VTU_SID
, reg
);
1540 return _mv88e6xxx_vtu_cmd(ds
, GLOBAL_VTU_OP_STU_LOAD_PURGE
);
1543 static int _mv88e6xxx_vlan_init(struct dsa_switch
*ds
, u16 vid
,
1544 struct mv88e6xxx_vtu_stu_entry
*entry
)
1546 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1547 struct mv88e6xxx_vtu_stu_entry vlan
= {
1553 /* exclude all ports except the CPU */
1554 for (i
= 0; i
< ps
->num_ports
; ++i
)
1555 vlan
.data
[i
] = dsa_is_cpu_port(ds
, i
) ?
1556 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED
:
1557 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
;
1559 if (mv88e6xxx_6097_family(ds
) || mv88e6xxx_6165_family(ds
) ||
1560 mv88e6xxx_6351_family(ds
) || mv88e6xxx_6352_family(ds
)) {
1561 struct mv88e6xxx_vtu_stu_entry vstp
;
1564 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1565 * implemented, only one STU entry is needed to cover all VTU
1566 * entries. Thus, validate the SID 0.
1569 err
= _mv88e6xxx_stu_getnext(ds
, GLOBAL_VTU_SID_MASK
, &vstp
);
1573 if (vstp
.sid
!= vlan
.sid
|| !vstp
.valid
) {
1574 memset(&vstp
, 0, sizeof(vstp
));
1576 vstp
.sid
= vlan
.sid
;
1578 err
= _mv88e6xxx_stu_loadpurge(ds
, &vstp
);
1583 /* Non-bridged ports and bridge groups use FIDs from 1 to
1584 * num_ports; VLANs use FIDs from num_ports+1 to 4095.
1586 vlan
.fid
= find_next_zero_bit(ps
->fid_bitmap
, VLAN_N_VID
,
1588 if (unlikely(vlan
.fid
== VLAN_N_VID
)) {
1589 pr_err("no more FID available for VLAN %d\n", vid
);
1593 /* Clear all MAC addresses from the new database */
1594 err
= _mv88e6xxx_atu_flush(ds
, vlan
.fid
, true);
1598 set_bit(vlan
.fid
, ps
->fid_bitmap
);
1605 int mv88e6xxx_port_vlan_add(struct dsa_switch
*ds
, int port
, u16 vid
,
1608 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1609 struct mv88e6xxx_vtu_stu_entry vlan
;
1612 mutex_lock(&ps
->smi_mutex
);
1613 err
= _mv88e6xxx_vtu_getnext(ds
, vid
- 1, &vlan
);
1617 if (vlan
.vid
!= vid
|| !vlan
.valid
) {
1618 err
= _mv88e6xxx_vlan_init(ds
, vid
, &vlan
);
1623 vlan
.data
[port
] = untagged
?
1624 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED
:
1625 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED
;
1627 err
= _mv88e6xxx_vtu_loadpurge(ds
, &vlan
);
1629 mutex_unlock(&ps
->smi_mutex
);
1634 int mv88e6xxx_port_vlan_del(struct dsa_switch
*ds
, int port
, u16 vid
)
1636 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1637 struct mv88e6xxx_vtu_stu_entry vlan
;
1641 mutex_lock(&ps
->smi_mutex
);
1643 err
= _mv88e6xxx_vtu_getnext(ds
, vid
- 1, &vlan
);
1647 if (vlan
.vid
!= vid
|| !vlan
.valid
||
1648 vlan
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
) {
1653 vlan
.data
[port
] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
;
1655 /* keep the VLAN unless all ports are excluded */
1656 for (i
= 0; i
< ps
->num_ports
; ++i
) {
1657 if (dsa_is_cpu_port(ds
, i
))
1660 if (vlan
.data
[i
] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
) {
1667 err
= _mv88e6xxx_vtu_loadpurge(ds
, &vlan
);
1671 err
= _mv88e6xxx_atu_remove(ds
, vlan
.fid
, port
, false);
1676 clear_bit(vlan
.fid
, ps
->fid_bitmap
);
1679 mutex_unlock(&ps
->smi_mutex
);
1684 static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch
*ds
, int port
, u16 vid
,
1685 struct mv88e6xxx_vtu_stu_entry
*entry
)
1693 err
= _mv88e6xxx_vtu_getnext(ds
, vid
, entry
);
1701 } while (entry
->data
[port
] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED
&&
1702 entry
->data
[port
] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED
);
1707 int mv88e6xxx_vlan_getnext(struct dsa_switch
*ds
, u16
*vid
,
1708 unsigned long *ports
, unsigned long *untagged
)
1710 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1711 struct mv88e6xxx_vtu_stu_entry next
;
1718 mutex_lock(&ps
->smi_mutex
);
1719 err
= _mv88e6xxx_vtu_getnext(ds
, *vid
, &next
);
1720 mutex_unlock(&ps
->smi_mutex
);
1730 for (port
= 0; port
< ps
->num_ports
; ++port
) {
1731 clear_bit(port
, ports
);
1732 clear_bit(port
, untagged
);
1734 if (dsa_is_cpu_port(ds
, port
))
1737 if (next
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED
||
1738 next
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED
)
1739 set_bit(port
, ports
);
1741 if (next
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED
)
1742 set_bit(port
, untagged
);
1748 static int _mv88e6xxx_atu_mac_write(struct dsa_switch
*ds
,
1749 const unsigned char *addr
)
1753 for (i
= 0; i
< 3; i
++) {
1754 ret
= _mv88e6xxx_reg_write(
1755 ds
, REG_GLOBAL
, GLOBAL_ATU_MAC_01
+ i
,
1756 (addr
[i
* 2] << 8) | addr
[i
* 2 + 1]);
1764 static int _mv88e6xxx_atu_mac_read(struct dsa_switch
*ds
, unsigned char *addr
)
1768 for (i
= 0; i
< 3; i
++) {
1769 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
,
1770 GLOBAL_ATU_MAC_01
+ i
);
1773 addr
[i
* 2] = ret
>> 8;
1774 addr
[i
* 2 + 1] = ret
& 0xff;
1780 static int _mv88e6xxx_atu_load(struct dsa_switch
*ds
,
1781 struct mv88e6xxx_atu_entry
*entry
)
1785 ret
= _mv88e6xxx_atu_wait(ds
);
1789 ret
= _mv88e6xxx_atu_mac_write(ds
, entry
->mac
);
1793 ret
= _mv88e6xxx_atu_data_write(ds
, entry
);
1797 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_ATU_FID
, entry
->fid
);
1801 return _mv88e6xxx_atu_cmd(ds
, GLOBAL_ATU_OP_LOAD_DB
);
1804 static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch
*ds
, int port
, u16 vid
)
1806 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1807 struct mv88e6xxx_vtu_stu_entry vlan
;
1811 return ps
->fid
[port
];
1813 err
= _mv88e6xxx_port_vtu_getnext(ds
, port
, vid
- 1, &vlan
);
1817 if (vlan
.vid
== vid
)
1823 static int _mv88e6xxx_port_fdb_load(struct dsa_switch
*ds
, int port
,
1824 const unsigned char *addr
, u16 vid
,
1827 struct mv88e6xxx_atu_entry entry
= { 0 };
1830 ret
= _mv88e6xxx_port_vid_to_fid(ds
, port
, vid
);
1835 entry
.state
= state
;
1836 ether_addr_copy(entry
.mac
, addr
);
1837 if (state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
1838 entry
.trunk
= false;
1839 entry
.portv_trunkid
= BIT(port
);
1842 return _mv88e6xxx_atu_load(ds
, &entry
);
1845 int mv88e6xxx_port_fdb_prepare(struct dsa_switch
*ds
, int port
,
1846 const struct switchdev_obj_port_fdb
*fdb
,
1847 struct switchdev_trans
*trans
)
1849 /* We don't need any dynamic resource from the kernel (yet),
1850 * so skip the prepare phase.
1855 int mv88e6xxx_port_fdb_add(struct dsa_switch
*ds
, int port
,
1856 const struct switchdev_obj_port_fdb
*fdb
,
1857 struct switchdev_trans
*trans
)
1859 int state
= is_multicast_ether_addr(fdb
->addr
) ?
1860 GLOBAL_ATU_DATA_STATE_MC_STATIC
:
1861 GLOBAL_ATU_DATA_STATE_UC_STATIC
;
1862 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1865 mutex_lock(&ps
->smi_mutex
);
1866 ret
= _mv88e6xxx_port_fdb_load(ds
, port
, fdb
->addr
, fdb
->vid
, state
);
1867 mutex_unlock(&ps
->smi_mutex
);
1872 int mv88e6xxx_port_fdb_del(struct dsa_switch
*ds
, int port
,
1873 const unsigned char *addr
, u16 vid
)
1875 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1878 mutex_lock(&ps
->smi_mutex
);
1879 ret
= _mv88e6xxx_port_fdb_load(ds
, port
, addr
, vid
,
1880 GLOBAL_ATU_DATA_STATE_UNUSED
);
1881 mutex_unlock(&ps
->smi_mutex
);
1886 static int _mv88e6xxx_atu_getnext(struct dsa_switch
*ds
, u16 fid
,
1887 const unsigned char *addr
,
1888 struct mv88e6xxx_atu_entry
*entry
)
1890 struct mv88e6xxx_atu_entry next
= { 0 };
1895 ret
= _mv88e6xxx_atu_wait(ds
);
1899 ret
= _mv88e6xxx_atu_mac_write(ds
, addr
);
1903 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_ATU_FID
, fid
);
1907 ret
= _mv88e6xxx_atu_cmd(ds
, GLOBAL_ATU_OP_GET_NEXT_DB
);
1911 ret
= _mv88e6xxx_atu_mac_read(ds
, next
.mac
);
1915 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_ATU_DATA
);
1919 next
.state
= ret
& GLOBAL_ATU_DATA_STATE_MASK
;
1920 if (next
.state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
1921 unsigned int mask
, shift
;
1923 if (ret
& GLOBAL_ATU_DATA_TRUNK
) {
1925 mask
= GLOBAL_ATU_DATA_TRUNK_ID_MASK
;
1926 shift
= GLOBAL_ATU_DATA_TRUNK_ID_SHIFT
;
1929 mask
= GLOBAL_ATU_DATA_PORT_VECTOR_MASK
;
1930 shift
= GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT
;
1933 next
.portv_trunkid
= (ret
& mask
) >> shift
;
1940 /* get next entry for port */
1941 int mv88e6xxx_port_fdb_getnext(struct dsa_switch
*ds
, int port
,
1942 unsigned char *addr
, u16
*vid
, bool *is_static
)
1944 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1945 struct mv88e6xxx_atu_entry next
;
1949 mutex_lock(&ps
->smi_mutex
);
1951 ret
= _mv88e6xxx_port_vid_to_fid(ds
, port
, *vid
);
1957 if (is_broadcast_ether_addr(addr
)) {
1958 struct mv88e6xxx_vtu_stu_entry vtu
;
1960 ret
= _mv88e6xxx_port_vtu_getnext(ds
, port
, *vid
, &vtu
);
1968 ret
= _mv88e6xxx_atu_getnext(ds
, fid
, addr
, &next
);
1972 ether_addr_copy(addr
, next
.mac
);
1974 if (next
.state
== GLOBAL_ATU_DATA_STATE_UNUSED
)
1976 } while (next
.trunk
|| (next
.portv_trunkid
& BIT(port
)) == 0);
1978 *is_static
= next
.state
== (is_multicast_ether_addr(addr
) ?
1979 GLOBAL_ATU_DATA_STATE_MC_STATIC
:
1980 GLOBAL_ATU_DATA_STATE_UC_STATIC
);
1982 mutex_unlock(&ps
->smi_mutex
);
1987 static void mv88e6xxx_bridge_work(struct work_struct
*work
)
1989 struct mv88e6xxx_priv_state
*ps
;
1990 struct dsa_switch
*ds
;
1993 ps
= container_of(work
, struct mv88e6xxx_priv_state
, bridge_work
);
1994 ds
= ((struct dsa_switch
*)ps
) - 1;
1996 while (ps
->port_state_update_mask
) {
1997 port
= __ffs(ps
->port_state_update_mask
);
1998 clear_bit(port
, &ps
->port_state_update_mask
);
1999 mv88e6xxx_set_port_state(ds
, port
, ps
->port_state
[port
]);
2003 static int mv88e6xxx_setup_port(struct dsa_switch
*ds
, int port
)
2005 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2009 mutex_lock(&ps
->smi_mutex
);
2011 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2012 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2013 mv88e6xxx_6185_family(ds
) || mv88e6xxx_6095_family(ds
) ||
2014 mv88e6xxx_6065_family(ds
) || mv88e6xxx_6320_family(ds
)) {
2015 /* MAC Forcing register: don't force link, speed,
2016 * duplex or flow control state to any particular
2017 * values on physical ports, but force the CPU port
2018 * and all DSA ports to their maximum bandwidth and
2021 reg
= _mv88e6xxx_reg_read(ds
, REG_PORT(port
), PORT_PCS_CTRL
);
2022 if (dsa_is_cpu_port(ds
, port
) || dsa_is_dsa_port(ds
, port
)) {
2023 reg
&= ~PORT_PCS_CTRL_UNFORCED
;
2024 reg
|= PORT_PCS_CTRL_FORCE_LINK
|
2025 PORT_PCS_CTRL_LINK_UP
|
2026 PORT_PCS_CTRL_DUPLEX_FULL
|
2027 PORT_PCS_CTRL_FORCE_DUPLEX
;
2028 if (mv88e6xxx_6065_family(ds
))
2029 reg
|= PORT_PCS_CTRL_100
;
2031 reg
|= PORT_PCS_CTRL_1000
;
2033 reg
|= PORT_PCS_CTRL_UNFORCED
;
2036 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2037 PORT_PCS_CTRL
, reg
);
2042 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2043 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2044 * tunneling, determine priority by looking at 802.1p and IP
2045 * priority fields (IP prio has precedence), and set STP state
2048 * If this is the CPU link, use DSA or EDSA tagging depending
2049 * on which tagging mode was configured.
2051 * If this is a link to another switch, use DSA tagging mode.
2053 * If this is the upstream port for this switch, enable
2054 * forwarding of unknown unicasts and multicasts.
2057 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2058 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2059 mv88e6xxx_6095_family(ds
) || mv88e6xxx_6065_family(ds
) ||
2060 mv88e6xxx_6185_family(ds
) || mv88e6xxx_6320_family(ds
))
2061 reg
= PORT_CONTROL_IGMP_MLD_SNOOP
|
2062 PORT_CONTROL_USE_TAG
| PORT_CONTROL_USE_IP
|
2063 PORT_CONTROL_STATE_FORWARDING
;
2064 if (dsa_is_cpu_port(ds
, port
)) {
2065 if (mv88e6xxx_6095_family(ds
) || mv88e6xxx_6185_family(ds
))
2066 reg
|= PORT_CONTROL_DSA_TAG
;
2067 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2068 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2069 mv88e6xxx_6320_family(ds
)) {
2070 if (ds
->dst
->tag_protocol
== DSA_TAG_PROTO_EDSA
)
2071 reg
|= PORT_CONTROL_FRAME_ETHER_TYPE_DSA
;
2073 reg
|= PORT_CONTROL_FRAME_MODE_DSA
;
2074 reg
|= PORT_CONTROL_FORWARD_UNKNOWN
|
2075 PORT_CONTROL_FORWARD_UNKNOWN_MC
;
2078 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2079 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2080 mv88e6xxx_6095_family(ds
) || mv88e6xxx_6065_family(ds
) ||
2081 mv88e6xxx_6185_family(ds
) || mv88e6xxx_6320_family(ds
)) {
2082 if (ds
->dst
->tag_protocol
== DSA_TAG_PROTO_EDSA
)
2083 reg
|= PORT_CONTROL_EGRESS_ADD_TAG
;
2086 if (dsa_is_dsa_port(ds
, port
)) {
2087 if (mv88e6xxx_6095_family(ds
) || mv88e6xxx_6185_family(ds
))
2088 reg
|= PORT_CONTROL_DSA_TAG
;
2089 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2090 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2091 mv88e6xxx_6320_family(ds
)) {
2092 reg
|= PORT_CONTROL_FRAME_MODE_DSA
;
2095 if (port
== dsa_upstream_port(ds
))
2096 reg
|= PORT_CONTROL_FORWARD_UNKNOWN
|
2097 PORT_CONTROL_FORWARD_UNKNOWN_MC
;
2100 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2106 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2107 * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
2108 * untagged frames on this port, do a destination address lookup on all
2109 * received packets as usual, disable ARP mirroring and don't send a
2110 * copy of all transmitted/received frames on this port to the CPU.
2113 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2114 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2115 mv88e6xxx_6095_family(ds
) || mv88e6xxx_6320_family(ds
))
2116 reg
= PORT_CONTROL_2_MAP_DA
;
2118 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2119 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6320_family(ds
))
2120 reg
|= PORT_CONTROL_2_JUMBO_10240
;
2122 if (mv88e6xxx_6095_family(ds
) || mv88e6xxx_6185_family(ds
)) {
2123 /* Set the upstream port this port should use */
2124 reg
|= dsa_upstream_port(ds
);
2125 /* enable forwarding of unknown multicast addresses to
2128 if (port
== dsa_upstream_port(ds
))
2129 reg
|= PORT_CONTROL_2_FORWARD_UNKNOWN
;
2132 reg
|= PORT_CONTROL_2_8021Q_FALLBACK
;
2135 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2136 PORT_CONTROL_2
, reg
);
2141 /* Port Association Vector: when learning source addresses
2142 * of packets, add the address to the address database using
2143 * a port bitmap that has only the bit for this port set and
2144 * the other bits clear.
2146 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_ASSOC_VECTOR
,
2151 /* Egress rate control 2: disable egress rate control. */
2152 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_RATE_CONTROL_2
,
2157 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2158 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2159 mv88e6xxx_6320_family(ds
)) {
2160 /* Do not limit the period of time that this port can
2161 * be paused for by the remote end or the period of
2162 * time that this port can pause the remote end.
2164 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2165 PORT_PAUSE_CTRL
, 0x0000);
2169 /* Port ATU control: disable limiting the number of
2170 * address database entries that this port is allowed
2173 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2174 PORT_ATU_CONTROL
, 0x0000);
2175 /* Priority Override: disable DA, SA and VTU priority
2178 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2179 PORT_PRI_OVERRIDE
, 0x0000);
2183 /* Port Ethertype: use the Ethertype DSA Ethertype
2186 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2187 PORT_ETH_TYPE
, ETH_P_EDSA
);
2190 /* Tag Remap: use an identity 802.1p prio -> switch
2193 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2194 PORT_TAG_REGMAP_0123
, 0x3210);
2198 /* Tag Remap 2: use an identity 802.1p prio -> switch
2201 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2202 PORT_TAG_REGMAP_4567
, 0x7654);
2207 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2208 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2209 mv88e6xxx_6185_family(ds
) || mv88e6xxx_6095_family(ds
) ||
2210 mv88e6xxx_6320_family(ds
)) {
2211 /* Rate Control: disable ingress rate limiting. */
2212 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
),
2213 PORT_RATE_CONTROL
, 0x0001);
2218 /* Port Control 1: disable trunking, disable sending
2219 * learning messages to this port.
2221 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_CONTROL_1
, 0x0000);
2225 /* Port based VLAN map: give each port its own address
2226 * database, allow the CPU port to talk to each of the 'real'
2227 * ports, and allow each of the 'real' ports to only talk to
2228 * the upstream port.
2231 ps
->fid
[port
] = fid
;
2232 set_bit(fid
, ps
->fid_bitmap
);
2234 if (!dsa_is_cpu_port(ds
, port
))
2235 ps
->bridge_mask
[fid
] = 1 << port
;
2237 ret
= _mv88e6xxx_update_port_config(ds
, port
);
2241 /* Default VLAN ID and priority: don't set a default VLAN
2242 * ID, and set the default packet priority to zero.
2244 ret
= _mv88e6xxx_reg_write(ds
, REG_PORT(port
), PORT_DEFAULT_VLAN
,
2247 mutex_unlock(&ps
->smi_mutex
);
2251 int mv88e6xxx_setup_ports(struct dsa_switch
*ds
)
2253 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2257 for (i
= 0; i
< ps
->num_ports
; i
++) {
2258 ret
= mv88e6xxx_setup_port(ds
, i
);
2265 static int mv88e6xxx_regs_show(struct seq_file
*s
, void *p
)
2267 struct dsa_switch
*ds
= s
->private;
2269 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2272 seq_puts(s
, " GLOBAL GLOBAL2 ");
2273 for (port
= 0 ; port
< ps
->num_ports
; port
++)
2274 seq_printf(s
, " %2d ", port
);
2277 for (reg
= 0; reg
< 32; reg
++) {
2278 seq_printf(s
, "%2x: ", reg
);
2279 seq_printf(s
, " %4x %4x ",
2280 mv88e6xxx_reg_read(ds
, REG_GLOBAL
, reg
),
2281 mv88e6xxx_reg_read(ds
, REG_GLOBAL2
, reg
));
2283 for (port
= 0 ; port
< ps
->num_ports
; port
++)
2284 seq_printf(s
, "%4x ",
2285 mv88e6xxx_reg_read(ds
, REG_PORT(port
), reg
));
2292 static int mv88e6xxx_regs_open(struct inode
*inode
, struct file
*file
)
2294 return single_open(file
, mv88e6xxx_regs_show
, inode
->i_private
);
2297 static const struct file_operations mv88e6xxx_regs_fops
= {
2298 .open
= mv88e6xxx_regs_open
,
2300 .llseek
= no_llseek
,
2301 .release
= single_release
,
2302 .owner
= THIS_MODULE
,
2305 static void mv88e6xxx_atu_show_header(struct seq_file
*s
)
2307 seq_puts(s
, "DB T/P Vec State Addr\n");
2310 static void mv88e6xxx_atu_show_entry(struct seq_file
*s
, int dbnum
,
2311 unsigned char *addr
, int data
)
2313 bool trunk
= !!(data
& GLOBAL_ATU_DATA_TRUNK
);
2314 int portvec
= ((data
& GLOBAL_ATU_DATA_PORT_VECTOR_MASK
) >>
2315 GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT
);
2316 int state
= data
& GLOBAL_ATU_DATA_STATE_MASK
;
2318 seq_printf(s
, "%03x %5s %10pb %x %pM\n",
2319 dbnum
, (trunk
? "Trunk" : "Port"), &portvec
, state
, addr
);
2322 static int mv88e6xxx_atu_show_db(struct seq_file
*s
, struct dsa_switch
*ds
,
2325 unsigned char bcast
[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2326 unsigned char addr
[6];
2327 int ret
, data
, state
;
2329 ret
= _mv88e6xxx_atu_mac_write(ds
, bcast
);
2334 ret
= _mv88e6xxx_reg_write(ds
, REG_GLOBAL
, GLOBAL_ATU_FID
,
2339 ret
= _mv88e6xxx_atu_cmd(ds
, GLOBAL_ATU_OP_GET_NEXT_DB
);
2343 data
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL
, GLOBAL_ATU_DATA
);
2347 state
= data
& GLOBAL_ATU_DATA_STATE_MASK
;
2348 if (state
== GLOBAL_ATU_DATA_STATE_UNUSED
)
2350 ret
= _mv88e6xxx_atu_mac_read(ds
, addr
);
2353 mv88e6xxx_atu_show_entry(s
, dbnum
, addr
, data
);
2354 } while (state
!= GLOBAL_ATU_DATA_STATE_UNUSED
);
2359 static int mv88e6xxx_atu_show(struct seq_file
*s
, void *p
)
2361 struct dsa_switch
*ds
= s
->private;
2362 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2365 mv88e6xxx_atu_show_header(s
);
2367 for (dbnum
= 0; dbnum
< 255; dbnum
++) {
2368 mutex_lock(&ps
->smi_mutex
);
2369 mv88e6xxx_atu_show_db(s
, ds
, dbnum
);
2370 mutex_unlock(&ps
->smi_mutex
);
2376 static int mv88e6xxx_atu_open(struct inode
*inode
, struct file
*file
)
2378 return single_open(file
, mv88e6xxx_atu_show
, inode
->i_private
);
2381 static const struct file_operations mv88e6xxx_atu_fops
= {
2382 .open
= mv88e6xxx_atu_open
,
2384 .llseek
= no_llseek
,
2385 .release
= single_release
,
2386 .owner
= THIS_MODULE
,
2389 static void mv88e6xxx_stats_show_header(struct seq_file
*s
,
2390 struct mv88e6xxx_priv_state
*ps
)
2394 seq_puts(s
, " Statistic ");
2395 for (port
= 0 ; port
< ps
->num_ports
; port
++)
2396 seq_printf(s
, "Port %2d ", port
);
2400 static int mv88e6xxx_stats_show(struct seq_file
*s
, void *p
)
2402 struct dsa_switch
*ds
= s
->private;
2403 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2404 struct mv88e6xxx_hw_stat
*stats
= mv88e6xxx_hw_stats
;
2405 int port
, stat
, max_stats
;
2408 if (have_sw_in_discards(ds
))
2409 max_stats
= ARRAY_SIZE(mv88e6xxx_hw_stats
);
2411 max_stats
= ARRAY_SIZE(mv88e6xxx_hw_stats
) - 3;
2413 mv88e6xxx_stats_show_header(s
, ps
);
2415 mutex_lock(&ps
->smi_mutex
);
2417 for (stat
= 0; stat
< max_stats
; stat
++) {
2418 seq_printf(s
, "%19s: ", stats
[stat
].string
);
2419 for (port
= 0 ; port
< ps
->num_ports
; port
++) {
2420 _mv88e6xxx_stats_snapshot(ds
, port
);
2421 value
= _mv88e6xxx_get_ethtool_stat(ds
, stat
, stats
,
2423 seq_printf(s
, "%8llu ", value
);
2427 mutex_unlock(&ps
->smi_mutex
);
2432 static int mv88e6xxx_stats_open(struct inode
*inode
, struct file
*file
)
2434 return single_open(file
, mv88e6xxx_stats_show
, inode
->i_private
);
2437 static const struct file_operations mv88e6xxx_stats_fops
= {
2438 .open
= mv88e6xxx_stats_open
,
2440 .llseek
= no_llseek
,
2441 .release
= single_release
,
2442 .owner
= THIS_MODULE
,
2445 static int mv88e6xxx_device_map_show(struct seq_file
*s
, void *p
)
2447 struct dsa_switch
*ds
= s
->private;
2448 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2451 seq_puts(s
, "Target Port\n");
2453 mutex_lock(&ps
->smi_mutex
);
2454 for (target
= 0; target
< 32; target
++) {
2455 ret
= _mv88e6xxx_reg_write(
2456 ds
, REG_GLOBAL2
, GLOBAL2_DEVICE_MAPPING
,
2457 target
<< GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT
);
2460 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL2
,
2461 GLOBAL2_DEVICE_MAPPING
);
2462 seq_printf(s
, " %2d %2d\n", target
,
2463 ret
& GLOBAL2_DEVICE_MAPPING_PORT_MASK
);
2466 mutex_unlock(&ps
->smi_mutex
);
2471 static int mv88e6xxx_device_map_open(struct inode
*inode
, struct file
*file
)
2473 return single_open(file
, mv88e6xxx_device_map_show
, inode
->i_private
);
2476 static const struct file_operations mv88e6xxx_device_map_fops
= {
2477 .open
= mv88e6xxx_device_map_open
,
2479 .llseek
= no_llseek
,
2480 .release
= single_release
,
2481 .owner
= THIS_MODULE
,
2484 static int mv88e6xxx_scratch_show(struct seq_file
*s
, void *p
)
2486 struct dsa_switch
*ds
= s
->private;
2487 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2490 seq_puts(s
, "Register Value\n");
2492 mutex_lock(&ps
->smi_mutex
);
2493 for (reg
= 0; reg
< 0x80; reg
++) {
2494 ret
= _mv88e6xxx_reg_write(
2495 ds
, REG_GLOBAL2
, GLOBAL2_SCRATCH_MISC
,
2496 reg
<< GLOBAL2_SCRATCH_REGISTER_SHIFT
);
2500 ret
= _mv88e6xxx_scratch_wait(ds
);
2504 ret
= _mv88e6xxx_reg_read(ds
, REG_GLOBAL2
,
2505 GLOBAL2_SCRATCH_MISC
);
2506 seq_printf(s
, " %2x %2x\n", reg
,
2507 ret
& GLOBAL2_SCRATCH_VALUE_MASK
);
2510 mutex_unlock(&ps
->smi_mutex
);
2515 static int mv88e6xxx_scratch_open(struct inode
*inode
, struct file
*file
)
2517 return single_open(file
, mv88e6xxx_scratch_show
, inode
->i_private
);
2520 static const struct file_operations mv88e6xxx_scratch_fops
= {
2521 .open
= mv88e6xxx_scratch_open
,
2523 .llseek
= no_llseek
,
2524 .release
= single_release
,
2525 .owner
= THIS_MODULE
,
2528 int mv88e6xxx_setup_common(struct dsa_switch
*ds
)
2530 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2533 mutex_init(&ps
->smi_mutex
);
2535 ps
->id
= REG_READ(REG_PORT(0), PORT_SWITCH_ID
) & 0xfff0;
2537 INIT_WORK(&ps
->bridge_work
, mv88e6xxx_bridge_work
);
2539 name
= kasprintf(GFP_KERNEL
, "dsa%d", ds
->index
);
2540 ps
->dbgfs
= debugfs_create_dir(name
, NULL
);
2543 debugfs_create_file("regs", S_IRUGO
, ps
->dbgfs
, ds
,
2544 &mv88e6xxx_regs_fops
);
2546 debugfs_create_file("atu", S_IRUGO
, ps
->dbgfs
, ds
,
2547 &mv88e6xxx_atu_fops
);
2549 debugfs_create_file("stats", S_IRUGO
, ps
->dbgfs
, ds
,
2550 &mv88e6xxx_stats_fops
);
2552 debugfs_create_file("device_map", S_IRUGO
, ps
->dbgfs
, ds
,
2553 &mv88e6xxx_device_map_fops
);
2555 debugfs_create_file("scratch", S_IRUGO
, ps
->dbgfs
, ds
,
2556 &mv88e6xxx_scratch_fops
);
2560 int mv88e6xxx_setup_global(struct dsa_switch
*ds
)
2562 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2566 /* Set the default address aging time to 5 minutes, and
2567 * enable address learn messages to be sent to all message
2570 REG_WRITE(REG_GLOBAL
, GLOBAL_ATU_CONTROL
,
2571 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL
);
2573 /* Configure the IP ToS mapping registers. */
2574 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_0
, 0x0000);
2575 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_1
, 0x0000);
2576 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_2
, 0x5555);
2577 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_3
, 0x5555);
2578 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_4
, 0xaaaa);
2579 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_5
, 0xaaaa);
2580 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_6
, 0xffff);
2581 REG_WRITE(REG_GLOBAL
, GLOBAL_IP_PRI_7
, 0xffff);
2583 /* Configure the IEEE 802.1p priority mapping register. */
2584 REG_WRITE(REG_GLOBAL
, GLOBAL_IEEE_PRI
, 0xfa41);
2586 /* Send all frames with destination addresses matching
2587 * 01:80:c2:00:00:0x to the CPU port.
2589 REG_WRITE(REG_GLOBAL2
, GLOBAL2_MGMT_EN_0X
, 0xffff);
2591 /* Ignore removed tag data on doubly tagged packets, disable
2592 * flow control messages, force flow control priority to the
2593 * highest, and send all special multicast frames to the CPU
2594 * port at the highest priority.
2596 REG_WRITE(REG_GLOBAL2
, GLOBAL2_SWITCH_MGMT
,
2597 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU
| 0x70 |
2598 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI
);
2600 /* Program the DSA routing table. */
2601 for (i
= 0; i
< 32; i
++) {
2604 if (ds
->pd
->rtable
&&
2605 i
!= ds
->index
&& i
< ds
->dst
->pd
->nr_chips
)
2606 nexthop
= ds
->pd
->rtable
[i
] & 0x1f;
2608 REG_WRITE(REG_GLOBAL2
, GLOBAL2_DEVICE_MAPPING
,
2609 GLOBAL2_DEVICE_MAPPING_UPDATE
|
2610 (i
<< GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT
) |
2614 /* Clear all trunk masks. */
2615 for (i
= 0; i
< 8; i
++)
2616 REG_WRITE(REG_GLOBAL2
, GLOBAL2_TRUNK_MASK
,
2617 0x8000 | (i
<< GLOBAL2_TRUNK_MASK_NUM_SHIFT
) |
2618 ((1 << ps
->num_ports
) - 1));
2620 /* Clear all trunk mappings. */
2621 for (i
= 0; i
< 16; i
++)
2622 REG_WRITE(REG_GLOBAL2
, GLOBAL2_TRUNK_MAPPING
,
2623 GLOBAL2_TRUNK_MAPPING_UPDATE
|
2624 (i
<< GLOBAL2_TRUNK_MAPPING_ID_SHIFT
));
2626 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2627 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2628 mv88e6xxx_6320_family(ds
)) {
2629 /* Send all frames with destination addresses matching
2630 * 01:80:c2:00:00:2x to the CPU port.
2632 REG_WRITE(REG_GLOBAL2
, GLOBAL2_MGMT_EN_2X
, 0xffff);
2634 /* Initialise cross-chip port VLAN table to reset
2637 REG_WRITE(REG_GLOBAL2
, GLOBAL2_PVT_ADDR
, 0x9000);
2639 /* Clear the priority override table. */
2640 for (i
= 0; i
< 16; i
++)
2641 REG_WRITE(REG_GLOBAL2
, GLOBAL2_PRIO_OVERRIDE
,
2645 if (mv88e6xxx_6352_family(ds
) || mv88e6xxx_6351_family(ds
) ||
2646 mv88e6xxx_6165_family(ds
) || mv88e6xxx_6097_family(ds
) ||
2647 mv88e6xxx_6185_family(ds
) || mv88e6xxx_6095_family(ds
) ||
2648 mv88e6xxx_6320_family(ds
)) {
2649 /* Disable ingress rate limiting by resetting all
2650 * ingress rate limit registers to their initial
2653 for (i
= 0; i
< ps
->num_ports
; i
++)
2654 REG_WRITE(REG_GLOBAL2
, GLOBAL2_INGRESS_OP
,
2658 /* Clear the statistics counters for all ports */
2659 REG_WRITE(REG_GLOBAL
, GLOBAL_STATS_OP
, GLOBAL_STATS_OP_FLUSH_ALL
);
2661 /* Wait for the flush to complete. */
2662 mutex_lock(&ps
->smi_mutex
);
2663 ret
= _mv88e6xxx_stats_wait(ds
);
2667 /* Clear all ATU entries */
2668 ret
= _mv88e6xxx_atu_flush(ds
, 0, true);
2672 /* Clear all the VTU and STU entries */
2673 ret
= _mv88e6xxx_vtu_stu_flush(ds
);
2675 mutex_unlock(&ps
->smi_mutex
);
2680 int mv88e6xxx_switch_reset(struct dsa_switch
*ds
, bool ppu_active
)
2682 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2683 u16 is_reset
= (ppu_active
? 0x8800 : 0xc800);
2684 unsigned long timeout
;
2688 /* Set all ports to the disabled state. */
2689 for (i
= 0; i
< ps
->num_ports
; i
++) {
2690 ret
= REG_READ(REG_PORT(i
), PORT_CONTROL
);
2691 REG_WRITE(REG_PORT(i
), PORT_CONTROL
, ret
& 0xfffc);
2694 /* Wait for transmit queues to drain. */
2695 usleep_range(2000, 4000);
2697 /* Reset the switch. Keep the PPU active if requested. The PPU
2698 * needs to be active to support indirect phy register access
2699 * through global registers 0x18 and 0x19.
2702 REG_WRITE(REG_GLOBAL
, 0x04, 0xc000);
2704 REG_WRITE(REG_GLOBAL
, 0x04, 0xc400);
2706 /* Wait up to one second for reset to complete. */
2707 timeout
= jiffies
+ 1 * HZ
;
2708 while (time_before(jiffies
, timeout
)) {
2709 ret
= REG_READ(REG_GLOBAL
, 0x00);
2710 if ((ret
& is_reset
) == is_reset
)
2712 usleep_range(1000, 2000);
2714 if (time_after(jiffies
, timeout
))
2720 int mv88e6xxx_phy_page_read(struct dsa_switch
*ds
, int port
, int page
, int reg
)
2722 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2725 mutex_lock(&ps
->smi_mutex
);
2726 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, page
);
2729 ret
= _mv88e6xxx_phy_read_indirect(ds
, port
, reg
);
2731 _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, 0x0);
2732 mutex_unlock(&ps
->smi_mutex
);
2736 int mv88e6xxx_phy_page_write(struct dsa_switch
*ds
, int port
, int page
,
2739 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2742 mutex_lock(&ps
->smi_mutex
);
2743 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, page
);
2747 ret
= _mv88e6xxx_phy_write_indirect(ds
, port
, reg
, val
);
2749 _mv88e6xxx_phy_write_indirect(ds
, port
, 0x16, 0x0);
2750 mutex_unlock(&ps
->smi_mutex
);
2754 static int mv88e6xxx_port_to_phy_addr(struct dsa_switch
*ds
, int port
)
2756 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2758 if (port
>= 0 && port
< ps
->num_ports
)
2764 mv88e6xxx_phy_read(struct dsa_switch
*ds
, int port
, int regnum
)
2766 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2767 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
2773 mutex_lock(&ps
->smi_mutex
);
2774 ret
= _mv88e6xxx_phy_read(ds
, addr
, regnum
);
2775 mutex_unlock(&ps
->smi_mutex
);
2780 mv88e6xxx_phy_write(struct dsa_switch
*ds
, int port
, int regnum
, u16 val
)
2782 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2783 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
2789 mutex_lock(&ps
->smi_mutex
);
2790 ret
= _mv88e6xxx_phy_write(ds
, addr
, regnum
, val
);
2791 mutex_unlock(&ps
->smi_mutex
);
2796 mv88e6xxx_phy_read_indirect(struct dsa_switch
*ds
, int port
, int regnum
)
2798 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2799 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
2805 mutex_lock(&ps
->smi_mutex
);
2806 ret
= _mv88e6xxx_phy_read_indirect(ds
, addr
, regnum
);
2807 mutex_unlock(&ps
->smi_mutex
);
2812 mv88e6xxx_phy_write_indirect(struct dsa_switch
*ds
, int port
, int regnum
,
2815 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2816 int addr
= mv88e6xxx_port_to_phy_addr(ds
, port
);
2822 mutex_lock(&ps
->smi_mutex
);
2823 ret
= _mv88e6xxx_phy_write_indirect(ds
, addr
, regnum
, val
);
2824 mutex_unlock(&ps
->smi_mutex
);
2828 #ifdef CONFIG_NET_DSA_HWMON
2830 static int mv88e61xx_get_temp(struct dsa_switch
*ds
, int *temp
)
2832 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2838 mutex_lock(&ps
->smi_mutex
);
2840 ret
= _mv88e6xxx_phy_write(ds
, 0x0, 0x16, 0x6);
2844 /* Enable temperature sensor */
2845 ret
= _mv88e6xxx_phy_read(ds
, 0x0, 0x1a);
2849 ret
= _mv88e6xxx_phy_write(ds
, 0x0, 0x1a, ret
| (1 << 5));
2853 /* Wait for temperature to stabilize */
2854 usleep_range(10000, 12000);
2856 val
= _mv88e6xxx_phy_read(ds
, 0x0, 0x1a);
2862 /* Disable temperature sensor */
2863 ret
= _mv88e6xxx_phy_write(ds
, 0x0, 0x1a, ret
& ~(1 << 5));
2867 *temp
= ((val
& 0x1f) - 5) * 5;
2870 _mv88e6xxx_phy_write(ds
, 0x0, 0x16, 0x0);
2871 mutex_unlock(&ps
->smi_mutex
);
2875 static int mv88e63xx_get_temp(struct dsa_switch
*ds
, int *temp
)
2877 int phy
= mv88e6xxx_6320_family(ds
) ? 3 : 0;
2882 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 27);
2886 *temp
= (ret
& 0xff) - 25;
2891 int mv88e6xxx_get_temp(struct dsa_switch
*ds
, int *temp
)
2893 if (mv88e6xxx_6320_family(ds
) || mv88e6xxx_6352_family(ds
))
2894 return mv88e63xx_get_temp(ds
, temp
);
2896 return mv88e61xx_get_temp(ds
, temp
);
2899 int mv88e6xxx_get_temp_limit(struct dsa_switch
*ds
, int *temp
)
2901 int phy
= mv88e6xxx_6320_family(ds
) ? 3 : 0;
2904 if (!mv88e6xxx_6320_family(ds
) && !mv88e6xxx_6352_family(ds
))
2909 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
2913 *temp
= (((ret
>> 8) & 0x1f) * 5) - 25;
2918 int mv88e6xxx_set_temp_limit(struct dsa_switch
*ds
, int temp
)
2920 int phy
= mv88e6xxx_6320_family(ds
) ? 3 : 0;
2923 if (!mv88e6xxx_6320_family(ds
) && !mv88e6xxx_6352_family(ds
))
2926 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
2929 temp
= clamp_val(DIV_ROUND_CLOSEST(temp
, 5) + 5, 0, 0x1f);
2930 return mv88e6xxx_phy_page_write(ds
, phy
, 6, 26,
2931 (ret
& 0xe0ff) | (temp
<< 8));
2934 int mv88e6xxx_get_temp_alarm(struct dsa_switch
*ds
, bool *alarm
)
2936 int phy
= mv88e6xxx_6320_family(ds
) ? 3 : 0;
2939 if (!mv88e6xxx_6320_family(ds
) && !mv88e6xxx_6352_family(ds
))
2944 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
2948 *alarm
= !!(ret
& 0x40);
2952 #endif /* CONFIG_NET_DSA_HWMON */
2954 static int __init
mv88e6xxx_init(void)
2956 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2957 register_switch_driver(&mv88e6131_switch_driver
);
2959 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2960 register_switch_driver(&mv88e6123_61_65_switch_driver
);
2962 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2963 register_switch_driver(&mv88e6352_switch_driver
);
2965 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2966 register_switch_driver(&mv88e6171_switch_driver
);
2970 module_init(mv88e6xxx_init
);
2972 static void __exit
mv88e6xxx_cleanup(void)
2974 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2975 unregister_switch_driver(&mv88e6171_switch_driver
);
2977 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2978 unregister_switch_driver(&mv88e6352_switch_driver
);
2980 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2981 unregister_switch_driver(&mv88e6123_61_65_switch_driver
);
2983 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2984 unregister_switch_driver(&mv88e6131_switch_driver
);
2987 module_exit(mv88e6xxx_cleanup
);
2989 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
2990 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
2991 MODULE_LICENSE("GPL");