]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/dsa/mv88e6xxx.c
6aac58b8b78b5146a36ca79881b747798248edee
[mirror_ubuntu-artful-kernel.git] / drivers / net / dsa / mv88e6xxx.c
1 /*
2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
4 *
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14 #include <linux/delay.h>
15 #include <linux/etherdevice.h>
16 #include <linux/ethtool.h>
17 #include <linux/if_bridge.h>
18 #include <linux/jiffies.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/phy.h>
24 #include <net/dsa.h>
25 #include <net/switchdev.h>
26 #include "mv88e6xxx.h"
27
28 static void assert_smi_lock(struct mv88e6xxx_priv_state *ps)
29 {
30 if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
31 dev_err(ps->dev, "SMI lock not held!\n");
32 dump_stack();
33 }
34 }
35
36 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
37 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
38 * will be directly accessible on some {device address,register address}
39 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
40 * will only respond to SMI transactions to that specific address, and
41 * an indirect addressing mechanism needs to be used to access its
42 * registers.
43 */
44 static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
45 {
46 int ret;
47 int i;
48
49 for (i = 0; i < 16; i++) {
50 ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
51 if (ret < 0)
52 return ret;
53
54 if ((ret & SMI_CMD_BUSY) == 0)
55 return 0;
56 }
57
58 return -ETIMEDOUT;
59 }
60
61 static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
62 int reg)
63 {
64 int ret;
65
66 if (sw_addr == 0)
67 return mdiobus_read_nested(bus, addr, reg);
68
69 /* Wait for the bus to become free. */
70 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
71 if (ret < 0)
72 return ret;
73
74 /* Transmit the read command. */
75 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
76 SMI_CMD_OP_22_READ | (addr << 5) | reg);
77 if (ret < 0)
78 return ret;
79
80 /* Wait for the read command to complete. */
81 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
82 if (ret < 0)
83 return ret;
84
85 /* Read the data. */
86 ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
87 if (ret < 0)
88 return ret;
89
90 return ret & 0xffff;
91 }
92
93 static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps,
94 int addr, int reg)
95 {
96 int ret;
97
98 assert_smi_lock(ps);
99
100 ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg);
101 if (ret < 0)
102 return ret;
103
104 dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
105 addr, reg, ret);
106
107 return ret;
108 }
109
110 int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg)
111 {
112 int ret;
113
114 mutex_lock(&ps->smi_mutex);
115 ret = _mv88e6xxx_reg_read(ps, addr, reg);
116 mutex_unlock(&ps->smi_mutex);
117
118 return ret;
119 }
120
121 static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
122 int reg, u16 val)
123 {
124 int ret;
125
126 if (sw_addr == 0)
127 return mdiobus_write_nested(bus, addr, reg, val);
128
129 /* Wait for the bus to become free. */
130 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
131 if (ret < 0)
132 return ret;
133
134 /* Transmit the data to write. */
135 ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
136 if (ret < 0)
137 return ret;
138
139 /* Transmit the write command. */
140 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
141 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
142 if (ret < 0)
143 return ret;
144
145 /* Wait for the write command to complete. */
146 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
147 if (ret < 0)
148 return ret;
149
150 return 0;
151 }
152
153 static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
154 int reg, u16 val)
155 {
156 assert_smi_lock(ps);
157
158 dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
159 addr, reg, val);
160
161 return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val);
162 }
163
164 int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
165 int reg, u16 val)
166 {
167 int ret;
168
169 mutex_lock(&ps->smi_mutex);
170 ret = _mv88e6xxx_reg_write(ps, addr, reg, val);
171 mutex_unlock(&ps->smi_mutex);
172
173 return ret;
174 }
175
176 static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
177 {
178 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
179 int err;
180
181 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01,
182 (addr[0] << 8) | addr[1]);
183 if (err)
184 return err;
185
186 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23,
187 (addr[2] << 8) | addr[3]);
188 if (err)
189 return err;
190
191 return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45,
192 (addr[4] << 8) | addr[5]);
193 }
194
195 static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
196 {
197 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
198 int ret;
199 int i;
200
201 for (i = 0; i < 6; i++) {
202 int j;
203
204 /* Write the MAC address byte. */
205 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
206 GLOBAL2_SWITCH_MAC_BUSY |
207 (i << 8) | addr[i]);
208 if (ret)
209 return ret;
210
211 /* Wait for the write to complete. */
212 for (j = 0; j < 16; j++) {
213 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2,
214 GLOBAL2_SWITCH_MAC);
215 if (ret < 0)
216 return ret;
217
218 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
219 break;
220 }
221 if (j == 16)
222 return -ETIMEDOUT;
223 }
224
225 return 0;
226 }
227
228 int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
229 {
230 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
231
232 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SWITCH_MAC))
233 return mv88e6xxx_set_addr_indirect(ds, addr);
234 else
235 return mv88e6xxx_set_addr_direct(ds, addr);
236 }
237
238 static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr,
239 int regnum)
240 {
241 if (addr >= 0)
242 return _mv88e6xxx_reg_read(ps, addr, regnum);
243 return 0xffff;
244 }
245
246 static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr,
247 int regnum, u16 val)
248 {
249 if (addr >= 0)
250 return _mv88e6xxx_reg_write(ps, addr, regnum, val);
251 return 0;
252 }
253
254 static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
255 {
256 int ret;
257 unsigned long timeout;
258
259 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
260 if (ret < 0)
261 return ret;
262
263 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
264 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
265 if (ret)
266 return ret;
267
268 timeout = jiffies + 1 * HZ;
269 while (time_before(jiffies, timeout)) {
270 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
271 if (ret < 0)
272 return ret;
273
274 usleep_range(1000, 2000);
275 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
276 GLOBAL_STATUS_PPU_POLLING)
277 return 0;
278 }
279
280 return -ETIMEDOUT;
281 }
282
283 static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
284 {
285 int ret, err;
286 unsigned long timeout;
287
288 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
289 if (ret < 0)
290 return ret;
291
292 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
293 ret | GLOBAL_CONTROL_PPU_ENABLE);
294 if (err)
295 return err;
296
297 timeout = jiffies + 1 * HZ;
298 while (time_before(jiffies, timeout)) {
299 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
300 if (ret < 0)
301 return ret;
302
303 usleep_range(1000, 2000);
304 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
305 GLOBAL_STATUS_PPU_POLLING)
306 return 0;
307 }
308
309 return -ETIMEDOUT;
310 }
311
312 static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
313 {
314 struct mv88e6xxx_priv_state *ps;
315
316 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
317 if (mutex_trylock(&ps->ppu_mutex)) {
318 if (mv88e6xxx_ppu_enable(ps) == 0)
319 ps->ppu_disabled = 0;
320 mutex_unlock(&ps->ppu_mutex);
321 }
322 }
323
324 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
325 {
326 struct mv88e6xxx_priv_state *ps = (void *)_ps;
327
328 schedule_work(&ps->ppu_work);
329 }
330
331 static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps)
332 {
333 int ret;
334
335 mutex_lock(&ps->ppu_mutex);
336
337 /* If the PHY polling unit is enabled, disable it so that
338 * we can access the PHY registers. If it was already
339 * disabled, cancel the timer that is going to re-enable
340 * it.
341 */
342 if (!ps->ppu_disabled) {
343 ret = mv88e6xxx_ppu_disable(ps);
344 if (ret < 0) {
345 mutex_unlock(&ps->ppu_mutex);
346 return ret;
347 }
348 ps->ppu_disabled = 1;
349 } else {
350 del_timer(&ps->ppu_timer);
351 ret = 0;
352 }
353
354 return ret;
355 }
356
357 static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps)
358 {
359 /* Schedule a timer to re-enable the PHY polling unit. */
360 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
361 mutex_unlock(&ps->ppu_mutex);
362 }
363
364 void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps)
365 {
366 mutex_init(&ps->ppu_mutex);
367 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
368 init_timer(&ps->ppu_timer);
369 ps->ppu_timer.data = (unsigned long)ps;
370 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
371 }
372
373 static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
374 int regnum)
375 {
376 int ret;
377
378 ret = mv88e6xxx_ppu_access_get(ps);
379 if (ret >= 0) {
380 ret = _mv88e6xxx_reg_read(ps, addr, regnum);
381 mv88e6xxx_ppu_access_put(ps);
382 }
383
384 return ret;
385 }
386
387 static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
388 int regnum, u16 val)
389 {
390 int ret;
391
392 ret = mv88e6xxx_ppu_access_get(ps);
393 if (ret >= 0) {
394 ret = _mv88e6xxx_reg_write(ps, addr, regnum, val);
395 mv88e6xxx_ppu_access_put(ps);
396 }
397
398 return ret;
399 }
400
401 static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps)
402 {
403 return ps->info->family == MV88E6XXX_FAMILY_6065;
404 }
405
406 static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps)
407 {
408 return ps->info->family == MV88E6XXX_FAMILY_6095;
409 }
410
411 static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps)
412 {
413 return ps->info->family == MV88E6XXX_FAMILY_6097;
414 }
415
416 static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps)
417 {
418 return ps->info->family == MV88E6XXX_FAMILY_6165;
419 }
420
421 static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps)
422 {
423 return ps->info->family == MV88E6XXX_FAMILY_6185;
424 }
425
426 static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps)
427 {
428 return ps->info->family == MV88E6XXX_FAMILY_6320;
429 }
430
431 static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps)
432 {
433 return ps->info->family == MV88E6XXX_FAMILY_6351;
434 }
435
436 static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps)
437 {
438 return ps->info->family == MV88E6XXX_FAMILY_6352;
439 }
440
441 static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps)
442 {
443 return ps->info->num_databases;
444 }
445
446 static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
447 {
448 /* Does the device have dedicated FID registers for ATU and VTU ops? */
449 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
450 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
451 return true;
452
453 return false;
454 }
455
456 static bool mv88e6xxx_has_stu(struct mv88e6xxx_priv_state *ps)
457 {
458 /* Does the device have STU and dedicated SID registers for VTU ops? */
459 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
460 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
461 return true;
462
463 return false;
464 }
465
466 /* We expect the switch to perform auto negotiation if there is a real
467 * phy. However, in the case of a fixed link phy, we force the port
468 * settings from the fixed link settings.
469 */
470 void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
471 struct phy_device *phydev)
472 {
473 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
474 u32 reg;
475 int ret;
476
477 if (!phy_is_pseudo_fixed_link(phydev))
478 return;
479
480 mutex_lock(&ps->smi_mutex);
481
482 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
483 if (ret < 0)
484 goto out;
485
486 reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
487 PORT_PCS_CTRL_FORCE_LINK |
488 PORT_PCS_CTRL_DUPLEX_FULL |
489 PORT_PCS_CTRL_FORCE_DUPLEX |
490 PORT_PCS_CTRL_UNFORCED);
491
492 reg |= PORT_PCS_CTRL_FORCE_LINK;
493 if (phydev->link)
494 reg |= PORT_PCS_CTRL_LINK_UP;
495
496 if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100)
497 goto out;
498
499 switch (phydev->speed) {
500 case SPEED_1000:
501 reg |= PORT_PCS_CTRL_1000;
502 break;
503 case SPEED_100:
504 reg |= PORT_PCS_CTRL_100;
505 break;
506 case SPEED_10:
507 reg |= PORT_PCS_CTRL_10;
508 break;
509 default:
510 pr_info("Unknown speed");
511 goto out;
512 }
513
514 reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
515 if (phydev->duplex == DUPLEX_FULL)
516 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
517
518 if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) &&
519 (port >= ps->info->num_ports - 2)) {
520 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
521 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
522 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
523 reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
524 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
525 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
526 PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
527 }
528 _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg);
529
530 out:
531 mutex_unlock(&ps->smi_mutex);
532 }
533
534 static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
535 {
536 int ret;
537 int i;
538
539 for (i = 0; i < 10; i++) {
540 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP);
541 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
542 return 0;
543 }
544
545 return -ETIMEDOUT;
546 }
547
548 static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps,
549 int port)
550 {
551 int ret;
552
553 if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
554 port = (port + 1) << 5;
555
556 /* Snapshot the hardware statistics counters for this port. */
557 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
558 GLOBAL_STATS_OP_CAPTURE_PORT |
559 GLOBAL_STATS_OP_HIST_RX_TX | port);
560 if (ret < 0)
561 return ret;
562
563 /* Wait for the snapshotting to complete. */
564 ret = _mv88e6xxx_stats_wait(ps);
565 if (ret < 0)
566 return ret;
567
568 return 0;
569 }
570
571 static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
572 int stat, u32 *val)
573 {
574 u32 _val;
575 int ret;
576
577 *val = 0;
578
579 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
580 GLOBAL_STATS_OP_READ_CAPTURED |
581 GLOBAL_STATS_OP_HIST_RX_TX | stat);
582 if (ret < 0)
583 return;
584
585 ret = _mv88e6xxx_stats_wait(ps);
586 if (ret < 0)
587 return;
588
589 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
590 if (ret < 0)
591 return;
592
593 _val = ret << 16;
594
595 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
596 if (ret < 0)
597 return;
598
599 *val = _val | ret;
600 }
601
602 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
603 { "in_good_octets", 8, 0x00, BANK0, },
604 { "in_bad_octets", 4, 0x02, BANK0, },
605 { "in_unicast", 4, 0x04, BANK0, },
606 { "in_broadcasts", 4, 0x06, BANK0, },
607 { "in_multicasts", 4, 0x07, BANK0, },
608 { "in_pause", 4, 0x16, BANK0, },
609 { "in_undersize", 4, 0x18, BANK0, },
610 { "in_fragments", 4, 0x19, BANK0, },
611 { "in_oversize", 4, 0x1a, BANK0, },
612 { "in_jabber", 4, 0x1b, BANK0, },
613 { "in_rx_error", 4, 0x1c, BANK0, },
614 { "in_fcs_error", 4, 0x1d, BANK0, },
615 { "out_octets", 8, 0x0e, BANK0, },
616 { "out_unicast", 4, 0x10, BANK0, },
617 { "out_broadcasts", 4, 0x13, BANK0, },
618 { "out_multicasts", 4, 0x12, BANK0, },
619 { "out_pause", 4, 0x15, BANK0, },
620 { "excessive", 4, 0x11, BANK0, },
621 { "collisions", 4, 0x1e, BANK0, },
622 { "deferred", 4, 0x05, BANK0, },
623 { "single", 4, 0x14, BANK0, },
624 { "multiple", 4, 0x17, BANK0, },
625 { "out_fcs_error", 4, 0x03, BANK0, },
626 { "late", 4, 0x1f, BANK0, },
627 { "hist_64bytes", 4, 0x08, BANK0, },
628 { "hist_65_127bytes", 4, 0x09, BANK0, },
629 { "hist_128_255bytes", 4, 0x0a, BANK0, },
630 { "hist_256_511bytes", 4, 0x0b, BANK0, },
631 { "hist_512_1023bytes", 4, 0x0c, BANK0, },
632 { "hist_1024_max_bytes", 4, 0x0d, BANK0, },
633 { "sw_in_discards", 4, 0x10, PORT, },
634 { "sw_in_filtered", 2, 0x12, PORT, },
635 { "sw_out_filtered", 2, 0x13, PORT, },
636 { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, },
637 { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, },
638 { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, },
639 { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, },
640 { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, },
641 { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, },
642 { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, },
643 { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, },
644 { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, },
645 { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, },
646 { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, },
647 { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, },
648 { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, },
649 { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, },
650 { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, },
651 { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, },
652 { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, },
653 { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, },
654 { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, },
655 { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, },
656 { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, },
657 { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, },
658 { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, },
659 { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, },
660 { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, },
661 { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
662 };
663
664 static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps,
665 struct mv88e6xxx_hw_stat *stat)
666 {
667 switch (stat->type) {
668 case BANK0:
669 return true;
670 case BANK1:
671 return mv88e6xxx_6320_family(ps);
672 case PORT:
673 return mv88e6xxx_6095_family(ps) ||
674 mv88e6xxx_6185_family(ps) ||
675 mv88e6xxx_6097_family(ps) ||
676 mv88e6xxx_6165_family(ps) ||
677 mv88e6xxx_6351_family(ps) ||
678 mv88e6xxx_6352_family(ps);
679 }
680 return false;
681 }
682
683 static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
684 struct mv88e6xxx_hw_stat *s,
685 int port)
686 {
687 u32 low;
688 u32 high = 0;
689 int ret;
690 u64 value;
691
692 switch (s->type) {
693 case PORT:
694 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg);
695 if (ret < 0)
696 return UINT64_MAX;
697
698 low = ret;
699 if (s->sizeof_stat == 4) {
700 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port),
701 s->reg + 1);
702 if (ret < 0)
703 return UINT64_MAX;
704 high = ret;
705 }
706 break;
707 case BANK0:
708 case BANK1:
709 _mv88e6xxx_stats_read(ps, s->reg, &low);
710 if (s->sizeof_stat == 8)
711 _mv88e6xxx_stats_read(ps, s->reg + 1, &high);
712 }
713 value = (((u64)high) << 16) | low;
714 return value;
715 }
716
717 void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
718 {
719 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
720 struct mv88e6xxx_hw_stat *stat;
721 int i, j;
722
723 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
724 stat = &mv88e6xxx_hw_stats[i];
725 if (mv88e6xxx_has_stat(ps, stat)) {
726 memcpy(data + j * ETH_GSTRING_LEN, stat->string,
727 ETH_GSTRING_LEN);
728 j++;
729 }
730 }
731 }
732
733 int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
734 {
735 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
736 struct mv88e6xxx_hw_stat *stat;
737 int i, j;
738
739 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
740 stat = &mv88e6xxx_hw_stats[i];
741 if (mv88e6xxx_has_stat(ps, stat))
742 j++;
743 }
744 return j;
745 }
746
747 void
748 mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
749 int port, uint64_t *data)
750 {
751 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
752 struct mv88e6xxx_hw_stat *stat;
753 int ret;
754 int i, j;
755
756 mutex_lock(&ps->smi_mutex);
757
758 ret = _mv88e6xxx_stats_snapshot(ps, port);
759 if (ret < 0) {
760 mutex_unlock(&ps->smi_mutex);
761 return;
762 }
763 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
764 stat = &mv88e6xxx_hw_stats[i];
765 if (mv88e6xxx_has_stat(ps, stat)) {
766 data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port);
767 j++;
768 }
769 }
770
771 mutex_unlock(&ps->smi_mutex);
772 }
773
774 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
775 {
776 return 32 * sizeof(u16);
777 }
778
779 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
780 struct ethtool_regs *regs, void *_p)
781 {
782 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
783 u16 *p = _p;
784 int i;
785
786 regs->version = 0;
787
788 memset(p, 0xff, 32 * sizeof(u16));
789
790 for (i = 0; i < 32; i++) {
791 int ret;
792
793 ret = mv88e6xxx_reg_read(ps, REG_PORT(port), i);
794 if (ret >= 0)
795 p[i] = ret;
796 }
797 }
798
799 static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
800 u16 mask)
801 {
802 unsigned long timeout = jiffies + HZ / 10;
803
804 while (time_before(jiffies, timeout)) {
805 int ret;
806
807 ret = _mv88e6xxx_reg_read(ps, reg, offset);
808 if (ret < 0)
809 return ret;
810 if (!(ret & mask))
811 return 0;
812
813 usleep_range(1000, 2000);
814 }
815 return -ETIMEDOUT;
816 }
817
818 static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg,
819 int offset, u16 mask)
820 {
821 int ret;
822
823 mutex_lock(&ps->smi_mutex);
824 ret = _mv88e6xxx_wait(ps, reg, offset, mask);
825 mutex_unlock(&ps->smi_mutex);
826
827 return ret;
828 }
829
830 static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps)
831 {
832 return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
833 GLOBAL2_SMI_OP_BUSY);
834 }
835
836 static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
837 {
838 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
839
840 return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
841 GLOBAL2_EEPROM_OP_LOAD);
842 }
843
844 static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
845 {
846 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
847
848 return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
849 GLOBAL2_EEPROM_OP_BUSY);
850 }
851
852 static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
853 {
854 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
855 int ret;
856
857 mutex_lock(&ps->eeprom_mutex);
858
859 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
860 GLOBAL2_EEPROM_OP_READ |
861 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
862 if (ret < 0)
863 goto error;
864
865 ret = mv88e6xxx_eeprom_busy_wait(ds);
866 if (ret < 0)
867 goto error;
868
869 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
870 error:
871 mutex_unlock(&ps->eeprom_mutex);
872 return ret;
873 }
874
875 int mv88e6xxx_get_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom,
876 u8 *data)
877 {
878 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
879 int offset;
880 int len;
881 int ret;
882
883 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
884 return -EOPNOTSUPP;
885
886 offset = eeprom->offset;
887 len = eeprom->len;
888 eeprom->len = 0;
889
890 eeprom->magic = 0xc3ec4951;
891
892 ret = mv88e6xxx_eeprom_load_wait(ds);
893 if (ret < 0)
894 return ret;
895
896 if (offset & 1) {
897 int word;
898
899 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
900 if (word < 0)
901 return word;
902
903 *data++ = (word >> 8) & 0xff;
904
905 offset++;
906 len--;
907 eeprom->len++;
908 }
909
910 while (len >= 2) {
911 int word;
912
913 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
914 if (word < 0)
915 return word;
916
917 *data++ = word & 0xff;
918 *data++ = (word >> 8) & 0xff;
919
920 offset += 2;
921 len -= 2;
922 eeprom->len += 2;
923 }
924
925 if (len) {
926 int word;
927
928 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
929 if (word < 0)
930 return word;
931
932 *data++ = word & 0xff;
933
934 offset++;
935 len--;
936 eeprom->len++;
937 }
938
939 return 0;
940 }
941
942 static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
943 {
944 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
945 int ret;
946
947 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
948 if (ret < 0)
949 return ret;
950
951 if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
952 return -EROFS;
953
954 return 0;
955 }
956
957 static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
958 u16 data)
959 {
960 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
961 int ret;
962
963 mutex_lock(&ps->eeprom_mutex);
964
965 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
966 if (ret < 0)
967 goto error;
968
969 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
970 GLOBAL2_EEPROM_OP_WRITE |
971 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
972 if (ret < 0)
973 goto error;
974
975 ret = mv88e6xxx_eeprom_busy_wait(ds);
976 error:
977 mutex_unlock(&ps->eeprom_mutex);
978 return ret;
979 }
980
981 int mv88e6xxx_set_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom,
982 u8 *data)
983 {
984 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
985 int offset;
986 int ret;
987 int len;
988
989 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
990 return -EOPNOTSUPP;
991
992 if (eeprom->magic != 0xc3ec4951)
993 return -EINVAL;
994
995 ret = mv88e6xxx_eeprom_is_readonly(ds);
996 if (ret)
997 return ret;
998
999 offset = eeprom->offset;
1000 len = eeprom->len;
1001 eeprom->len = 0;
1002
1003 ret = mv88e6xxx_eeprom_load_wait(ds);
1004 if (ret < 0)
1005 return ret;
1006
1007 if (offset & 1) {
1008 int word;
1009
1010 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
1011 if (word < 0)
1012 return word;
1013
1014 word = (*data++ << 8) | (word & 0xff);
1015
1016 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1017 if (ret < 0)
1018 return ret;
1019
1020 offset++;
1021 len--;
1022 eeprom->len++;
1023 }
1024
1025 while (len >= 2) {
1026 int word;
1027
1028 word = *data++;
1029 word |= *data++ << 8;
1030
1031 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1032 if (ret < 0)
1033 return ret;
1034
1035 offset += 2;
1036 len -= 2;
1037 eeprom->len += 2;
1038 }
1039
1040 if (len) {
1041 int word;
1042
1043 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
1044 if (word < 0)
1045 return word;
1046
1047 word = (word & 0xff00) | *data++;
1048
1049 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1050 if (ret < 0)
1051 return ret;
1052
1053 offset++;
1054 len--;
1055 eeprom->len++;
1056 }
1057
1058 return 0;
1059 }
1060
1061 static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps)
1062 {
1063 return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP,
1064 GLOBAL_ATU_OP_BUSY);
1065 }
1066
1067 static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
1068 int addr, int regnum)
1069 {
1070 int ret;
1071
1072 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
1073 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
1074 regnum);
1075 if (ret < 0)
1076 return ret;
1077
1078 ret = _mv88e6xxx_phy_wait(ps);
1079 if (ret < 0)
1080 return ret;
1081
1082 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA);
1083
1084 return ret;
1085 }
1086
1087 static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps,
1088 int addr, int regnum, u16 val)
1089 {
1090 int ret;
1091
1092 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
1093 if (ret < 0)
1094 return ret;
1095
1096 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
1097 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
1098 regnum);
1099
1100 return _mv88e6xxx_phy_wait(ps);
1101 }
1102
1103 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
1104 {
1105 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1106 int reg;
1107
1108 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
1109 return -EOPNOTSUPP;
1110
1111 mutex_lock(&ps->smi_mutex);
1112
1113 reg = _mv88e6xxx_phy_read_indirect(ps, port, 16);
1114 if (reg < 0)
1115 goto out;
1116
1117 e->eee_enabled = !!(reg & 0x0200);
1118 e->tx_lpi_enabled = !!(reg & 0x0100);
1119
1120 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
1121 if (reg < 0)
1122 goto out;
1123
1124 e->eee_active = !!(reg & PORT_STATUS_EEE);
1125 reg = 0;
1126
1127 out:
1128 mutex_unlock(&ps->smi_mutex);
1129 return reg;
1130 }
1131
1132 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
1133 struct phy_device *phydev, struct ethtool_eee *e)
1134 {
1135 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1136 int reg;
1137 int ret;
1138
1139 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
1140 return -EOPNOTSUPP;
1141
1142 mutex_lock(&ps->smi_mutex);
1143
1144 ret = _mv88e6xxx_phy_read_indirect(ps, port, 16);
1145 if (ret < 0)
1146 goto out;
1147
1148 reg = ret & ~0x0300;
1149 if (e->eee_enabled)
1150 reg |= 0x0200;
1151 if (e->tx_lpi_enabled)
1152 reg |= 0x0100;
1153
1154 ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg);
1155 out:
1156 mutex_unlock(&ps->smi_mutex);
1157
1158 return ret;
1159 }
1160
1161 static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
1162 {
1163 int ret;
1164
1165 if (mv88e6xxx_has_fid_reg(ps)) {
1166 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid);
1167 if (ret < 0)
1168 return ret;
1169 } else if (mv88e6xxx_num_databases(ps) == 256) {
1170 /* ATU DBNum[7:4] are located in ATU Control 15:12 */
1171 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL);
1172 if (ret < 0)
1173 return ret;
1174
1175 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
1176 (ret & 0xfff) |
1177 ((fid << 8) & 0xf000));
1178 if (ret < 0)
1179 return ret;
1180
1181 /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
1182 cmd |= fid & 0xf;
1183 }
1184
1185 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
1186 if (ret < 0)
1187 return ret;
1188
1189 return _mv88e6xxx_atu_wait(ps);
1190 }
1191
1192 static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
1193 struct mv88e6xxx_atu_entry *entry)
1194 {
1195 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
1196
1197 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1198 unsigned int mask, shift;
1199
1200 if (entry->trunk) {
1201 data |= GLOBAL_ATU_DATA_TRUNK;
1202 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1203 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1204 } else {
1205 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1206 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1207 }
1208
1209 data |= (entry->portv_trunkid << shift) & mask;
1210 }
1211
1212 return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data);
1213 }
1214
1215 static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
1216 struct mv88e6xxx_atu_entry *entry,
1217 bool static_too)
1218 {
1219 int op;
1220 int err;
1221
1222 err = _mv88e6xxx_atu_wait(ps);
1223 if (err)
1224 return err;
1225
1226 err = _mv88e6xxx_atu_data_write(ps, entry);
1227 if (err)
1228 return err;
1229
1230 if (entry->fid) {
1231 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
1232 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
1233 } else {
1234 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
1235 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
1236 }
1237
1238 return _mv88e6xxx_atu_cmd(ps, entry->fid, op);
1239 }
1240
1241 static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
1242 u16 fid, bool static_too)
1243 {
1244 struct mv88e6xxx_atu_entry entry = {
1245 .fid = fid,
1246 .state = 0, /* EntryState bits must be 0 */
1247 };
1248
1249 return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
1250 }
1251
1252 static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
1253 int from_port, int to_port, bool static_too)
1254 {
1255 struct mv88e6xxx_atu_entry entry = {
1256 .trunk = false,
1257 .fid = fid,
1258 };
1259
1260 /* EntryState bits must be 0xF */
1261 entry.state = GLOBAL_ATU_DATA_STATE_MASK;
1262
1263 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1264 entry.portv_trunkid = (to_port & 0x0f) << 4;
1265 entry.portv_trunkid |= from_port & 0x0f;
1266
1267 return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
1268 }
1269
1270 static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid,
1271 int port, bool static_too)
1272 {
1273 /* Destination port 0xF means remove the entries */
1274 return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too);
1275 }
1276
1277 static const char * const mv88e6xxx_port_state_names[] = {
1278 [PORT_CONTROL_STATE_DISABLED] = "Disabled",
1279 [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
1280 [PORT_CONTROL_STATE_LEARNING] = "Learning",
1281 [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
1282 };
1283
1284 static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
1285 u8 state)
1286 {
1287 struct dsa_switch *ds = ps->ds;
1288 int reg, ret = 0;
1289 u8 oldstate;
1290
1291 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL);
1292 if (reg < 0)
1293 return reg;
1294
1295 oldstate = reg & PORT_CONTROL_STATE_MASK;
1296
1297 if (oldstate != state) {
1298 /* Flush forwarding database if we're moving a port
1299 * from Learning or Forwarding state to Disabled or
1300 * Blocking or Listening state.
1301 */
1302 if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
1303 oldstate == PORT_CONTROL_STATE_FORWARDING)
1304 && (state == PORT_CONTROL_STATE_DISABLED ||
1305 state == PORT_CONTROL_STATE_BLOCKING)) {
1306 ret = _mv88e6xxx_atu_remove(ps, 0, port, false);
1307 if (ret)
1308 return ret;
1309 }
1310
1311 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1312 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL,
1313 reg);
1314 if (ret)
1315 return ret;
1316
1317 netdev_dbg(ds->ports[port], "PortState %s (was %s)\n",
1318 mv88e6xxx_port_state_names[state],
1319 mv88e6xxx_port_state_names[oldstate]);
1320 }
1321
1322 return ret;
1323 }
1324
1325 static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
1326 int port)
1327 {
1328 struct net_device *bridge = ps->ports[port].bridge_dev;
1329 const u16 mask = (1 << ps->info->num_ports) - 1;
1330 struct dsa_switch *ds = ps->ds;
1331 u16 output_ports = 0;
1332 int reg;
1333 int i;
1334
1335 /* allow CPU port or DSA link(s) to send frames to every port */
1336 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
1337 output_ports = mask;
1338 } else {
1339 for (i = 0; i < ps->info->num_ports; ++i) {
1340 /* allow sending frames to every group member */
1341 if (bridge && ps->ports[i].bridge_dev == bridge)
1342 output_ports |= BIT(i);
1343
1344 /* allow sending frames to CPU port and DSA link(s) */
1345 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
1346 output_ports |= BIT(i);
1347 }
1348 }
1349
1350 /* prevent frames from going back out of the port they came in on */
1351 output_ports &= ~BIT(port);
1352
1353 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
1354 if (reg < 0)
1355 return reg;
1356
1357 reg &= ~mask;
1358 reg |= output_ports & mask;
1359
1360 return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg);
1361 }
1362
1363 void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
1364 {
1365 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1366 int stp_state;
1367
1368 switch (state) {
1369 case BR_STATE_DISABLED:
1370 stp_state = PORT_CONTROL_STATE_DISABLED;
1371 break;
1372 case BR_STATE_BLOCKING:
1373 case BR_STATE_LISTENING:
1374 stp_state = PORT_CONTROL_STATE_BLOCKING;
1375 break;
1376 case BR_STATE_LEARNING:
1377 stp_state = PORT_CONTROL_STATE_LEARNING;
1378 break;
1379 case BR_STATE_FORWARDING:
1380 default:
1381 stp_state = PORT_CONTROL_STATE_FORWARDING;
1382 break;
1383 }
1384
1385 /* mv88e6xxx_port_stp_state_set may be called with softirqs disabled,
1386 * so we can not update the port state directly but need to schedule it.
1387 */
1388 ps->ports[port].state = stp_state;
1389 set_bit(port, ps->port_state_update_mask);
1390 schedule_work(&ps->bridge_work);
1391 }
1392
1393 static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
1394 u16 *new, u16 *old)
1395 {
1396 struct dsa_switch *ds = ps->ds;
1397 u16 pvid;
1398 int ret;
1399
1400 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN);
1401 if (ret < 0)
1402 return ret;
1403
1404 pvid = ret & PORT_DEFAULT_VLAN_MASK;
1405
1406 if (new) {
1407 ret &= ~PORT_DEFAULT_VLAN_MASK;
1408 ret |= *new & PORT_DEFAULT_VLAN_MASK;
1409
1410 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
1411 PORT_DEFAULT_VLAN, ret);
1412 if (ret < 0)
1413 return ret;
1414
1415 netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new,
1416 pvid);
1417 }
1418
1419 if (old)
1420 *old = pvid;
1421
1422 return 0;
1423 }
1424
1425 static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps,
1426 int port, u16 *pvid)
1427 {
1428 return _mv88e6xxx_port_pvid(ps, port, NULL, pvid);
1429 }
1430
1431 static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps,
1432 int port, u16 pvid)
1433 {
1434 return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL);
1435 }
1436
1437 static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps)
1438 {
1439 return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP,
1440 GLOBAL_VTU_OP_BUSY);
1441 }
1442
1443 static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op)
1444 {
1445 int ret;
1446
1447 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op);
1448 if (ret < 0)
1449 return ret;
1450
1451 return _mv88e6xxx_vtu_wait(ps);
1452 }
1453
1454 static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps)
1455 {
1456 int ret;
1457
1458 ret = _mv88e6xxx_vtu_wait(ps);
1459 if (ret < 0)
1460 return ret;
1461
1462 return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL);
1463 }
1464
1465 static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
1466 struct mv88e6xxx_vtu_stu_entry *entry,
1467 unsigned int nibble_offset)
1468 {
1469 u16 regs[3];
1470 int i;
1471 int ret;
1472
1473 for (i = 0; i < 3; ++i) {
1474 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1475 GLOBAL_VTU_DATA_0_3 + i);
1476 if (ret < 0)
1477 return ret;
1478
1479 regs[i] = ret;
1480 }
1481
1482 for (i = 0; i < ps->info->num_ports; ++i) {
1483 unsigned int shift = (i % 4) * 4 + nibble_offset;
1484 u16 reg = regs[i / 4];
1485
1486 entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1487 }
1488
1489 return 0;
1490 }
1491
1492 static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
1493 struct mv88e6xxx_vtu_stu_entry *entry,
1494 unsigned int nibble_offset)
1495 {
1496 u16 regs[3] = { 0 };
1497 int i;
1498 int ret;
1499
1500 for (i = 0; i < ps->info->num_ports; ++i) {
1501 unsigned int shift = (i % 4) * 4 + nibble_offset;
1502 u8 data = entry->data[i];
1503
1504 regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1505 }
1506
1507 for (i = 0; i < 3; ++i) {
1508 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL,
1509 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1510 if (ret < 0)
1511 return ret;
1512 }
1513
1514 return 0;
1515 }
1516
1517 static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid)
1518 {
1519 return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID,
1520 vid & GLOBAL_VTU_VID_MASK);
1521 }
1522
1523 static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
1524 struct mv88e6xxx_vtu_stu_entry *entry)
1525 {
1526 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1527 int ret;
1528
1529 ret = _mv88e6xxx_vtu_wait(ps);
1530 if (ret < 0)
1531 return ret;
1532
1533 ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT);
1534 if (ret < 0)
1535 return ret;
1536
1537 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
1538 if (ret < 0)
1539 return ret;
1540
1541 next.vid = ret & GLOBAL_VTU_VID_MASK;
1542 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1543
1544 if (next.valid) {
1545 ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 0);
1546 if (ret < 0)
1547 return ret;
1548
1549 if (mv88e6xxx_has_fid_reg(ps)) {
1550 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1551 GLOBAL_VTU_FID);
1552 if (ret < 0)
1553 return ret;
1554
1555 next.fid = ret & GLOBAL_VTU_FID_MASK;
1556 } else if (mv88e6xxx_num_databases(ps) == 256) {
1557 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1558 * VTU DBNum[3:0] are located in VTU Operation 3:0
1559 */
1560 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1561 GLOBAL_VTU_OP);
1562 if (ret < 0)
1563 return ret;
1564
1565 next.fid = (ret & 0xf00) >> 4;
1566 next.fid |= ret & 0xf;
1567 }
1568
1569 if (mv88e6xxx_has_stu(ps)) {
1570 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1571 GLOBAL_VTU_SID);
1572 if (ret < 0)
1573 return ret;
1574
1575 next.sid = ret & GLOBAL_VTU_SID_MASK;
1576 }
1577 }
1578
1579 *entry = next;
1580 return 0;
1581 }
1582
1583 int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
1584 struct switchdev_obj_port_vlan *vlan,
1585 int (*cb)(struct switchdev_obj *obj))
1586 {
1587 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1588 struct mv88e6xxx_vtu_stu_entry next;
1589 u16 pvid;
1590 int err;
1591
1592 mutex_lock(&ps->smi_mutex);
1593
1594 err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
1595 if (err)
1596 goto unlock;
1597
1598 err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
1599 if (err)
1600 goto unlock;
1601
1602 do {
1603 err = _mv88e6xxx_vtu_getnext(ps, &next);
1604 if (err)
1605 break;
1606
1607 if (!next.valid)
1608 break;
1609
1610 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1611 continue;
1612
1613 /* reinit and dump this VLAN obj */
1614 vlan->vid_begin = vlan->vid_end = next.vid;
1615 vlan->flags = 0;
1616
1617 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1618 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1619
1620 if (next.vid == pvid)
1621 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1622
1623 err = cb(&vlan->obj);
1624 if (err)
1625 break;
1626 } while (next.vid < GLOBAL_VTU_VID_MASK);
1627
1628 unlock:
1629 mutex_unlock(&ps->smi_mutex);
1630
1631 return err;
1632 }
1633
1634 static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
1635 struct mv88e6xxx_vtu_stu_entry *entry)
1636 {
1637 u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
1638 u16 reg = 0;
1639 int ret;
1640
1641 ret = _mv88e6xxx_vtu_wait(ps);
1642 if (ret < 0)
1643 return ret;
1644
1645 if (!entry->valid)
1646 goto loadpurge;
1647
1648 /* Write port member tags */
1649 ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 0);
1650 if (ret < 0)
1651 return ret;
1652
1653 if (mv88e6xxx_has_stu(ps)) {
1654 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1655 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1656 if (ret < 0)
1657 return ret;
1658 }
1659
1660 if (mv88e6xxx_has_fid_reg(ps)) {
1661 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1662 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1663 if (ret < 0)
1664 return ret;
1665 } else if (mv88e6xxx_num_databases(ps) == 256) {
1666 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1667 * VTU DBNum[3:0] are located in VTU Operation 3:0
1668 */
1669 op |= (entry->fid & 0xf0) << 8;
1670 op |= entry->fid & 0xf;
1671 }
1672
1673 reg = GLOBAL_VTU_VID_VALID;
1674 loadpurge:
1675 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1676 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1677 if (ret < 0)
1678 return ret;
1679
1680 return _mv88e6xxx_vtu_cmd(ps, op);
1681 }
1682
1683 static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
1684 struct mv88e6xxx_vtu_stu_entry *entry)
1685 {
1686 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1687 int ret;
1688
1689 ret = _mv88e6xxx_vtu_wait(ps);
1690 if (ret < 0)
1691 return ret;
1692
1693 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID,
1694 sid & GLOBAL_VTU_SID_MASK);
1695 if (ret < 0)
1696 return ret;
1697
1698 ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT);
1699 if (ret < 0)
1700 return ret;
1701
1702 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID);
1703 if (ret < 0)
1704 return ret;
1705
1706 next.sid = ret & GLOBAL_VTU_SID_MASK;
1707
1708 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
1709 if (ret < 0)
1710 return ret;
1711
1712 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1713
1714 if (next.valid) {
1715 ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 2);
1716 if (ret < 0)
1717 return ret;
1718 }
1719
1720 *entry = next;
1721 return 0;
1722 }
1723
1724 static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
1725 struct mv88e6xxx_vtu_stu_entry *entry)
1726 {
1727 u16 reg = 0;
1728 int ret;
1729
1730 ret = _mv88e6xxx_vtu_wait(ps);
1731 if (ret < 0)
1732 return ret;
1733
1734 if (!entry->valid)
1735 goto loadpurge;
1736
1737 /* Write port states */
1738 ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 2);
1739 if (ret < 0)
1740 return ret;
1741
1742 reg = GLOBAL_VTU_VID_VALID;
1743 loadpurge:
1744 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1745 if (ret < 0)
1746 return ret;
1747
1748 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1749 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1750 if (ret < 0)
1751 return ret;
1752
1753 return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1754 }
1755
1756 static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
1757 u16 *new, u16 *old)
1758 {
1759 struct dsa_switch *ds = ps->ds;
1760 u16 upper_mask;
1761 u16 fid;
1762 int ret;
1763
1764 if (mv88e6xxx_num_databases(ps) == 4096)
1765 upper_mask = 0xff;
1766 else if (mv88e6xxx_num_databases(ps) == 256)
1767 upper_mask = 0xf;
1768 else
1769 return -EOPNOTSUPP;
1770
1771 /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
1772 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
1773 if (ret < 0)
1774 return ret;
1775
1776 fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
1777
1778 if (new) {
1779 ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
1780 ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
1781
1782 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN,
1783 ret);
1784 if (ret < 0)
1785 return ret;
1786 }
1787
1788 /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
1789 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1);
1790 if (ret < 0)
1791 return ret;
1792
1793 fid |= (ret & upper_mask) << 4;
1794
1795 if (new) {
1796 ret &= ~upper_mask;
1797 ret |= (*new >> 4) & upper_mask;
1798
1799 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1,
1800 ret);
1801 if (ret < 0)
1802 return ret;
1803
1804 netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid);
1805 }
1806
1807 if (old)
1808 *old = fid;
1809
1810 return 0;
1811 }
1812
1813 static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps,
1814 int port, u16 *fid)
1815 {
1816 return _mv88e6xxx_port_fid(ps, port, NULL, fid);
1817 }
1818
1819 static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps,
1820 int port, u16 fid)
1821 {
1822 return _mv88e6xxx_port_fid(ps, port, &fid, NULL);
1823 }
1824
1825 static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
1826 {
1827 DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
1828 struct mv88e6xxx_vtu_stu_entry vlan;
1829 int i, err;
1830
1831 bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
1832
1833 /* Set every FID bit used by the (un)bridged ports */
1834 for (i = 0; i < ps->info->num_ports; ++i) {
1835 err = _mv88e6xxx_port_fid_get(ps, i, fid);
1836 if (err)
1837 return err;
1838
1839 set_bit(*fid, fid_bitmap);
1840 }
1841
1842 /* Set every FID bit used by the VLAN entries */
1843 err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
1844 if (err)
1845 return err;
1846
1847 do {
1848 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
1849 if (err)
1850 return err;
1851
1852 if (!vlan.valid)
1853 break;
1854
1855 set_bit(vlan.fid, fid_bitmap);
1856 } while (vlan.vid < GLOBAL_VTU_VID_MASK);
1857
1858 /* The reset value 0x000 is used to indicate that multiple address
1859 * databases are not needed. Return the next positive available.
1860 */
1861 *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
1862 if (unlikely(*fid >= mv88e6xxx_num_databases(ps)))
1863 return -ENOSPC;
1864
1865 /* Clear the database */
1866 return _mv88e6xxx_atu_flush(ps, *fid, true);
1867 }
1868
1869 static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
1870 struct mv88e6xxx_vtu_stu_entry *entry)
1871 {
1872 struct dsa_switch *ds = ps->ds;
1873 struct mv88e6xxx_vtu_stu_entry vlan = {
1874 .valid = true,
1875 .vid = vid,
1876 };
1877 int i, err;
1878
1879 err = _mv88e6xxx_fid_new(ps, &vlan.fid);
1880 if (err)
1881 return err;
1882
1883 /* exclude all ports except the CPU and DSA ports */
1884 for (i = 0; i < ps->info->num_ports; ++i)
1885 vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
1886 ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
1887 : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1888
1889 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
1890 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) {
1891 struct mv88e6xxx_vtu_stu_entry vstp;
1892
1893 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1894 * implemented, only one STU entry is needed to cover all VTU
1895 * entries. Thus, validate the SID 0.
1896 */
1897 vlan.sid = 0;
1898 err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp);
1899 if (err)
1900 return err;
1901
1902 if (vstp.sid != vlan.sid || !vstp.valid) {
1903 memset(&vstp, 0, sizeof(vstp));
1904 vstp.valid = true;
1905 vstp.sid = vlan.sid;
1906
1907 err = _mv88e6xxx_stu_loadpurge(ps, &vstp);
1908 if (err)
1909 return err;
1910 }
1911 }
1912
1913 *entry = vlan;
1914 return 0;
1915 }
1916
1917 static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
1918 struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
1919 {
1920 int err;
1921
1922 if (!vid)
1923 return -EINVAL;
1924
1925 err = _mv88e6xxx_vtu_vid_write(ps, vid - 1);
1926 if (err)
1927 return err;
1928
1929 err = _mv88e6xxx_vtu_getnext(ps, entry);
1930 if (err)
1931 return err;
1932
1933 if (entry->vid != vid || !entry->valid) {
1934 if (!creat)
1935 return -EOPNOTSUPP;
1936 /* -ENOENT would've been more appropriate, but switchdev expects
1937 * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
1938 */
1939
1940 err = _mv88e6xxx_vtu_new(ps, vid, entry);
1941 }
1942
1943 return err;
1944 }
1945
1946 static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
1947 u16 vid_begin, u16 vid_end)
1948 {
1949 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1950 struct mv88e6xxx_vtu_stu_entry vlan;
1951 int i, err;
1952
1953 if (!vid_begin)
1954 return -EOPNOTSUPP;
1955
1956 mutex_lock(&ps->smi_mutex);
1957
1958 err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1);
1959 if (err)
1960 goto unlock;
1961
1962 do {
1963 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
1964 if (err)
1965 goto unlock;
1966
1967 if (!vlan.valid)
1968 break;
1969
1970 if (vlan.vid > vid_end)
1971 break;
1972
1973 for (i = 0; i < ps->info->num_ports; ++i) {
1974 if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
1975 continue;
1976
1977 if (vlan.data[i] ==
1978 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1979 continue;
1980
1981 if (ps->ports[i].bridge_dev ==
1982 ps->ports[port].bridge_dev)
1983 break; /* same bridge, check next VLAN */
1984
1985 netdev_warn(ds->ports[port],
1986 "hardware VLAN %d already used by %s\n",
1987 vlan.vid,
1988 netdev_name(ps->ports[i].bridge_dev));
1989 err = -EOPNOTSUPP;
1990 goto unlock;
1991 }
1992 } while (vlan.vid < vid_end);
1993
1994 unlock:
1995 mutex_unlock(&ps->smi_mutex);
1996
1997 return err;
1998 }
1999
2000 static const char * const mv88e6xxx_port_8021q_mode_names[] = {
2001 [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
2002 [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
2003 [PORT_CONTROL_2_8021Q_CHECK] = "Check",
2004 [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
2005 };
2006
2007 int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
2008 bool vlan_filtering)
2009 {
2010 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2011 u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
2012 PORT_CONTROL_2_8021Q_DISABLED;
2013 int ret;
2014
2015 mutex_lock(&ps->smi_mutex);
2016
2017 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2);
2018 if (ret < 0)
2019 goto unlock;
2020
2021 old = ret & PORT_CONTROL_2_8021Q_MASK;
2022
2023 if (new != old) {
2024 ret &= ~PORT_CONTROL_2_8021Q_MASK;
2025 ret |= new & PORT_CONTROL_2_8021Q_MASK;
2026
2027 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2,
2028 ret);
2029 if (ret < 0)
2030 goto unlock;
2031
2032 netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n",
2033 mv88e6xxx_port_8021q_mode_names[new],
2034 mv88e6xxx_port_8021q_mode_names[old]);
2035 }
2036
2037 ret = 0;
2038 unlock:
2039 mutex_unlock(&ps->smi_mutex);
2040
2041 return ret;
2042 }
2043
2044 int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
2045 const struct switchdev_obj_port_vlan *vlan,
2046 struct switchdev_trans *trans)
2047 {
2048 int err;
2049
2050 /* If the requested port doesn't belong to the same bridge as the VLAN
2051 * members, do not support it (yet) and fallback to software VLAN.
2052 */
2053 err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
2054 vlan->vid_end);
2055 if (err)
2056 return err;
2057
2058 /* We don't need any dynamic resource from the kernel (yet),
2059 * so skip the prepare phase.
2060 */
2061 return 0;
2062 }
2063
2064 static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
2065 u16 vid, bool untagged)
2066 {
2067 struct mv88e6xxx_vtu_stu_entry vlan;
2068 int err;
2069
2070 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true);
2071 if (err)
2072 return err;
2073
2074 vlan.data[port] = untagged ?
2075 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
2076 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
2077
2078 return _mv88e6xxx_vtu_loadpurge(ps, &vlan);
2079 }
2080
2081 void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
2082 const struct switchdev_obj_port_vlan *vlan,
2083 struct switchdev_trans *trans)
2084 {
2085 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2086 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2087 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2088 u16 vid;
2089
2090 mutex_lock(&ps->smi_mutex);
2091
2092 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
2093 if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged))
2094 netdev_err(ds->ports[port], "failed to add VLAN %d%c\n",
2095 vid, untagged ? 'u' : 't');
2096
2097 if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end))
2098 netdev_err(ds->ports[port], "failed to set PVID %d\n",
2099 vlan->vid_end);
2100
2101 mutex_unlock(&ps->smi_mutex);
2102 }
2103
2104 static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
2105 int port, u16 vid)
2106 {
2107 struct dsa_switch *ds = ps->ds;
2108 struct mv88e6xxx_vtu_stu_entry vlan;
2109 int i, err;
2110
2111 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
2112 if (err)
2113 return err;
2114
2115 /* Tell switchdev if this VLAN is handled in software */
2116 if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
2117 return -EOPNOTSUPP;
2118
2119 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
2120
2121 /* keep the VLAN unless all ports are excluded */
2122 vlan.valid = false;
2123 for (i = 0; i < ps->info->num_ports; ++i) {
2124 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
2125 continue;
2126
2127 if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
2128 vlan.valid = true;
2129 break;
2130 }
2131 }
2132
2133 err = _mv88e6xxx_vtu_loadpurge(ps, &vlan);
2134 if (err)
2135 return err;
2136
2137 return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false);
2138 }
2139
2140 int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
2141 const struct switchdev_obj_port_vlan *vlan)
2142 {
2143 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2144 u16 pvid, vid;
2145 int err = 0;
2146
2147 mutex_lock(&ps->smi_mutex);
2148
2149 err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
2150 if (err)
2151 goto unlock;
2152
2153 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
2154 err = _mv88e6xxx_port_vlan_del(ps, port, vid);
2155 if (err)
2156 goto unlock;
2157
2158 if (vid == pvid) {
2159 err = _mv88e6xxx_port_pvid_set(ps, port, 0);
2160 if (err)
2161 goto unlock;
2162 }
2163 }
2164
2165 unlock:
2166 mutex_unlock(&ps->smi_mutex);
2167
2168 return err;
2169 }
2170
2171 static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
2172 const unsigned char *addr)
2173 {
2174 int i, ret;
2175
2176 for (i = 0; i < 3; i++) {
2177 ret = _mv88e6xxx_reg_write(
2178 ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
2179 (addr[i * 2] << 8) | addr[i * 2 + 1]);
2180 if (ret < 0)
2181 return ret;
2182 }
2183
2184 return 0;
2185 }
2186
2187 static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
2188 unsigned char *addr)
2189 {
2190 int i, ret;
2191
2192 for (i = 0; i < 3; i++) {
2193 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
2194 GLOBAL_ATU_MAC_01 + i);
2195 if (ret < 0)
2196 return ret;
2197 addr[i * 2] = ret >> 8;
2198 addr[i * 2 + 1] = ret & 0xff;
2199 }
2200
2201 return 0;
2202 }
2203
2204 static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps,
2205 struct mv88e6xxx_atu_entry *entry)
2206 {
2207 int ret;
2208
2209 ret = _mv88e6xxx_atu_wait(ps);
2210 if (ret < 0)
2211 return ret;
2212
2213 ret = _mv88e6xxx_atu_mac_write(ps, entry->mac);
2214 if (ret < 0)
2215 return ret;
2216
2217 ret = _mv88e6xxx_atu_data_write(ps, entry);
2218 if (ret < 0)
2219 return ret;
2220
2221 return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
2222 }
2223
2224 static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
2225 const unsigned char *addr, u16 vid,
2226 u8 state)
2227 {
2228 struct mv88e6xxx_atu_entry entry = { 0 };
2229 struct mv88e6xxx_vtu_stu_entry vlan;
2230 int err;
2231
2232 /* Null VLAN ID corresponds to the port private database */
2233 if (vid == 0)
2234 err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid);
2235 else
2236 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
2237 if (err)
2238 return err;
2239
2240 entry.fid = vlan.fid;
2241 entry.state = state;
2242 ether_addr_copy(entry.mac, addr);
2243 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
2244 entry.trunk = false;
2245 entry.portv_trunkid = BIT(port);
2246 }
2247
2248 return _mv88e6xxx_atu_load(ps, &entry);
2249 }
2250
2251 int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
2252 const struct switchdev_obj_port_fdb *fdb,
2253 struct switchdev_trans *trans)
2254 {
2255 /* We don't need any dynamic resource from the kernel (yet),
2256 * so skip the prepare phase.
2257 */
2258 return 0;
2259 }
2260
2261 void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
2262 const struct switchdev_obj_port_fdb *fdb,
2263 struct switchdev_trans *trans)
2264 {
2265 int state = is_multicast_ether_addr(fdb->addr) ?
2266 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2267 GLOBAL_ATU_DATA_STATE_UC_STATIC;
2268 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2269
2270 mutex_lock(&ps->smi_mutex);
2271 if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state))
2272 netdev_err(ds->ports[port], "failed to load MAC address\n");
2273 mutex_unlock(&ps->smi_mutex);
2274 }
2275
2276 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
2277 const struct switchdev_obj_port_fdb *fdb)
2278 {
2279 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2280 int ret;
2281
2282 mutex_lock(&ps->smi_mutex);
2283 ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid,
2284 GLOBAL_ATU_DATA_STATE_UNUSED);
2285 mutex_unlock(&ps->smi_mutex);
2286
2287 return ret;
2288 }
2289
2290 static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
2291 struct mv88e6xxx_atu_entry *entry)
2292 {
2293 struct mv88e6xxx_atu_entry next = { 0 };
2294 int ret;
2295
2296 next.fid = fid;
2297
2298 ret = _mv88e6xxx_atu_wait(ps);
2299 if (ret < 0)
2300 return ret;
2301
2302 ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
2303 if (ret < 0)
2304 return ret;
2305
2306 ret = _mv88e6xxx_atu_mac_read(ps, next.mac);
2307 if (ret < 0)
2308 return ret;
2309
2310 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA);
2311 if (ret < 0)
2312 return ret;
2313
2314 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
2315 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
2316 unsigned int mask, shift;
2317
2318 if (ret & GLOBAL_ATU_DATA_TRUNK) {
2319 next.trunk = true;
2320 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
2321 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
2322 } else {
2323 next.trunk = false;
2324 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
2325 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
2326 }
2327
2328 next.portv_trunkid = (ret & mask) >> shift;
2329 }
2330
2331 *entry = next;
2332 return 0;
2333 }
2334
2335 static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
2336 u16 fid, u16 vid, int port,
2337 struct switchdev_obj_port_fdb *fdb,
2338 int (*cb)(struct switchdev_obj *obj))
2339 {
2340 struct mv88e6xxx_atu_entry addr = {
2341 .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
2342 };
2343 int err;
2344
2345 err = _mv88e6xxx_atu_mac_write(ps, addr.mac);
2346 if (err)
2347 return err;
2348
2349 do {
2350 err = _mv88e6xxx_atu_getnext(ps, fid, &addr);
2351 if (err)
2352 break;
2353
2354 if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
2355 break;
2356
2357 if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
2358 bool is_static = addr.state ==
2359 (is_multicast_ether_addr(addr.mac) ?
2360 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2361 GLOBAL_ATU_DATA_STATE_UC_STATIC);
2362
2363 fdb->vid = vid;
2364 ether_addr_copy(fdb->addr, addr.mac);
2365 fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
2366
2367 err = cb(&fdb->obj);
2368 if (err)
2369 break;
2370 }
2371 } while (!is_broadcast_ether_addr(addr.mac));
2372
2373 return err;
2374 }
2375
2376 int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
2377 struct switchdev_obj_port_fdb *fdb,
2378 int (*cb)(struct switchdev_obj *obj))
2379 {
2380 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2381 struct mv88e6xxx_vtu_stu_entry vlan = {
2382 .vid = GLOBAL_VTU_VID_MASK, /* all ones */
2383 };
2384 u16 fid;
2385 int err;
2386
2387 mutex_lock(&ps->smi_mutex);
2388
2389 /* Dump port's default Filtering Information Database (VLAN ID 0) */
2390 err = _mv88e6xxx_port_fid_get(ps, port, &fid);
2391 if (err)
2392 goto unlock;
2393
2394 err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb);
2395 if (err)
2396 goto unlock;
2397
2398 /* Dump VLANs' Filtering Information Databases */
2399 err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid);
2400 if (err)
2401 goto unlock;
2402
2403 do {
2404 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
2405 if (err)
2406 break;
2407
2408 if (!vlan.valid)
2409 break;
2410
2411 err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port,
2412 fdb, cb);
2413 if (err)
2414 break;
2415 } while (vlan.vid < GLOBAL_VTU_VID_MASK);
2416
2417 unlock:
2418 mutex_unlock(&ps->smi_mutex);
2419
2420 return err;
2421 }
2422
2423 int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2424 struct net_device *bridge)
2425 {
2426 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2427 int i, err = 0;
2428
2429 mutex_lock(&ps->smi_mutex);
2430
2431 /* Assign the bridge and remap each port's VLANTable */
2432 ps->ports[port].bridge_dev = bridge;
2433
2434 for (i = 0; i < ps->info->num_ports; ++i) {
2435 if (ps->ports[i].bridge_dev == bridge) {
2436 err = _mv88e6xxx_port_based_vlan_map(ps, i);
2437 if (err)
2438 break;
2439 }
2440 }
2441
2442 mutex_unlock(&ps->smi_mutex);
2443
2444 return err;
2445 }
2446
2447 void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
2448 {
2449 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2450 struct net_device *bridge = ps->ports[port].bridge_dev;
2451 int i;
2452
2453 mutex_lock(&ps->smi_mutex);
2454
2455 /* Unassign the bridge and remap each port's VLANTable */
2456 ps->ports[port].bridge_dev = NULL;
2457
2458 for (i = 0; i < ps->info->num_ports; ++i)
2459 if (i == port || ps->ports[i].bridge_dev == bridge)
2460 if (_mv88e6xxx_port_based_vlan_map(ps, i))
2461 netdev_warn(ds->ports[i], "failed to remap\n");
2462
2463 mutex_unlock(&ps->smi_mutex);
2464 }
2465
2466 static void mv88e6xxx_bridge_work(struct work_struct *work)
2467 {
2468 struct mv88e6xxx_priv_state *ps;
2469 struct dsa_switch *ds;
2470 int port;
2471
2472 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
2473 ds = ps->ds;
2474
2475 mutex_lock(&ps->smi_mutex);
2476
2477 for (port = 0; port < ps->info->num_ports; ++port)
2478 if (test_and_clear_bit(port, ps->port_state_update_mask) &&
2479 _mv88e6xxx_port_state(ps, port, ps->ports[port].state))
2480 netdev_warn(ds->ports[port],
2481 "failed to update state to %s\n",
2482 mv88e6xxx_port_state_names[ps->ports[port].state]);
2483
2484 mutex_unlock(&ps->smi_mutex);
2485 }
2486
2487 static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps,
2488 int port, int page, int reg, int val)
2489 {
2490 int ret;
2491
2492 ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
2493 if (ret < 0)
2494 goto restore_page_0;
2495
2496 ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val);
2497 restore_page_0:
2498 _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
2499
2500 return ret;
2501 }
2502
2503 static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps,
2504 int port, int page, int reg)
2505 {
2506 int ret;
2507
2508 ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
2509 if (ret < 0)
2510 goto restore_page_0;
2511
2512 ret = _mv88e6xxx_phy_read_indirect(ps, port, reg);
2513 restore_page_0:
2514 _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
2515
2516 return ret;
2517 }
2518
2519 static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
2520 {
2521 int ret;
2522
2523 ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
2524 MII_BMCR);
2525 if (ret < 0)
2526 return ret;
2527
2528 if (ret & BMCR_PDOWN) {
2529 ret &= ~BMCR_PDOWN;
2530 ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES,
2531 PAGE_FIBER_SERDES, MII_BMCR,
2532 ret);
2533 }
2534
2535 return ret;
2536 }
2537
2538 static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2539 {
2540 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2541 int ret;
2542 u16 reg;
2543
2544 mutex_lock(&ps->smi_mutex);
2545
2546 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2547 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2548 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
2549 mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) {
2550 /* MAC Forcing register: don't force link, speed,
2551 * duplex or flow control state to any particular
2552 * values on physical ports, but force the CPU port
2553 * and all DSA ports to their maximum bandwidth and
2554 * full duplex.
2555 */
2556 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
2557 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
2558 reg &= ~PORT_PCS_CTRL_UNFORCED;
2559 reg |= PORT_PCS_CTRL_FORCE_LINK |
2560 PORT_PCS_CTRL_LINK_UP |
2561 PORT_PCS_CTRL_DUPLEX_FULL |
2562 PORT_PCS_CTRL_FORCE_DUPLEX;
2563 if (mv88e6xxx_6065_family(ps))
2564 reg |= PORT_PCS_CTRL_100;
2565 else
2566 reg |= PORT_PCS_CTRL_1000;
2567 } else {
2568 reg |= PORT_PCS_CTRL_UNFORCED;
2569 }
2570
2571 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2572 PORT_PCS_CTRL, reg);
2573 if (ret)
2574 goto abort;
2575 }
2576
2577 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2578 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2579 * tunneling, determine priority by looking at 802.1p and IP
2580 * priority fields (IP prio has precedence), and set STP state
2581 * to Forwarding.
2582 *
2583 * If this is the CPU link, use DSA or EDSA tagging depending
2584 * on which tagging mode was configured.
2585 *
2586 * If this is a link to another switch, use DSA tagging mode.
2587 *
2588 * If this is the upstream port for this switch, enable
2589 * forwarding of unknown unicasts and multicasts.
2590 */
2591 reg = 0;
2592 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2593 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2594 mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
2595 mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps))
2596 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
2597 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
2598 PORT_CONTROL_STATE_FORWARDING;
2599 if (dsa_is_cpu_port(ds, port)) {
2600 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
2601 reg |= PORT_CONTROL_DSA_TAG;
2602 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2603 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2604 mv88e6xxx_6320_family(ps)) {
2605 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2606 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2607 else
2608 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2609 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2610 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2611 }
2612
2613 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2614 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2615 mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
2616 mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) {
2617 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2618 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
2619 }
2620 }
2621 if (dsa_is_dsa_port(ds, port)) {
2622 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
2623 reg |= PORT_CONTROL_DSA_TAG;
2624 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2625 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2626 mv88e6xxx_6320_family(ps)) {
2627 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2628 }
2629
2630 if (port == dsa_upstream_port(ds))
2631 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2632 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2633 }
2634 if (reg) {
2635 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2636 PORT_CONTROL, reg);
2637 if (ret)
2638 goto abort;
2639 }
2640
2641 /* If this port is connected to a SerDes, make sure the SerDes is not
2642 * powered down.
2643 */
2644 if (mv88e6xxx_6352_family(ps)) {
2645 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
2646 if (ret < 0)
2647 goto abort;
2648 ret &= PORT_STATUS_CMODE_MASK;
2649 if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
2650 (ret == PORT_STATUS_CMODE_1000BASE_X) ||
2651 (ret == PORT_STATUS_CMODE_SGMII)) {
2652 ret = mv88e6xxx_power_on_serdes(ps);
2653 if (ret < 0)
2654 goto abort;
2655 }
2656 }
2657
2658 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2659 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2660 * untagged frames on this port, do a destination address lookup on all
2661 * received packets as usual, disable ARP mirroring and don't send a
2662 * copy of all transmitted/received frames on this port to the CPU.
2663 */
2664 reg = 0;
2665 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2666 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2667 mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) ||
2668 mv88e6xxx_6185_family(ps))
2669 reg = PORT_CONTROL_2_MAP_DA;
2670
2671 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2672 mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps))
2673 reg |= PORT_CONTROL_2_JUMBO_10240;
2674
2675 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) {
2676 /* Set the upstream port this port should use */
2677 reg |= dsa_upstream_port(ds);
2678 /* enable forwarding of unknown multicast addresses to
2679 * the upstream port
2680 */
2681 if (port == dsa_upstream_port(ds))
2682 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
2683 }
2684
2685 reg |= PORT_CONTROL_2_8021Q_DISABLED;
2686
2687 if (reg) {
2688 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2689 PORT_CONTROL_2, reg);
2690 if (ret)
2691 goto abort;
2692 }
2693
2694 /* Port Association Vector: when learning source addresses
2695 * of packets, add the address to the address database using
2696 * a port bitmap that has only the bit for this port set and
2697 * the other bits clear.
2698 */
2699 reg = 1 << port;
2700 /* Disable learning for CPU port */
2701 if (dsa_is_cpu_port(ds, port))
2702 reg = 0;
2703
2704 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
2705 if (ret)
2706 goto abort;
2707
2708 /* Egress rate control 2: disable egress rate control. */
2709 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2,
2710 0x0000);
2711 if (ret)
2712 goto abort;
2713
2714 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2715 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2716 mv88e6xxx_6320_family(ps)) {
2717 /* Do not limit the period of time that this port can
2718 * be paused for by the remote end or the period of
2719 * time that this port can pause the remote end.
2720 */
2721 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2722 PORT_PAUSE_CTRL, 0x0000);
2723 if (ret)
2724 goto abort;
2725
2726 /* Port ATU control: disable limiting the number of
2727 * address database entries that this port is allowed
2728 * to use.
2729 */
2730 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2731 PORT_ATU_CONTROL, 0x0000);
2732 /* Priority Override: disable DA, SA and VTU priority
2733 * override.
2734 */
2735 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2736 PORT_PRI_OVERRIDE, 0x0000);
2737 if (ret)
2738 goto abort;
2739
2740 /* Port Ethertype: use the Ethertype DSA Ethertype
2741 * value.
2742 */
2743 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2744 PORT_ETH_TYPE, ETH_P_EDSA);
2745 if (ret)
2746 goto abort;
2747 /* Tag Remap: use an identity 802.1p prio -> switch
2748 * prio mapping.
2749 */
2750 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2751 PORT_TAG_REGMAP_0123, 0x3210);
2752 if (ret)
2753 goto abort;
2754
2755 /* Tag Remap 2: use an identity 802.1p prio -> switch
2756 * prio mapping.
2757 */
2758 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2759 PORT_TAG_REGMAP_4567, 0x7654);
2760 if (ret)
2761 goto abort;
2762 }
2763
2764 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2765 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2766 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
2767 mv88e6xxx_6320_family(ps)) {
2768 /* Rate Control: disable ingress rate limiting. */
2769 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2770 PORT_RATE_CONTROL, 0x0001);
2771 if (ret)
2772 goto abort;
2773 }
2774
2775 /* Port Control 1: disable trunking, disable sending
2776 * learning messages to this port.
2777 */
2778 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000);
2779 if (ret)
2780 goto abort;
2781
2782 /* Port based VLAN map: give each port the same default address
2783 * database, and allow bidirectional communication between the
2784 * CPU and DSA port(s), and the other ports.
2785 */
2786 ret = _mv88e6xxx_port_fid_set(ps, port, 0);
2787 if (ret)
2788 goto abort;
2789
2790 ret = _mv88e6xxx_port_based_vlan_map(ps, port);
2791 if (ret)
2792 goto abort;
2793
2794 /* Default VLAN ID and priority: don't set a default VLAN
2795 * ID, and set the default packet priority to zero.
2796 */
2797 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN,
2798 0x0000);
2799 abort:
2800 mutex_unlock(&ps->smi_mutex);
2801 return ret;
2802 }
2803
2804 int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2805 {
2806 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2807 int ret;
2808 int i;
2809
2810 for (i = 0; i < ps->info->num_ports; i++) {
2811 ret = mv88e6xxx_setup_port(ds, i);
2812 if (ret < 0)
2813 return ret;
2814 }
2815 return 0;
2816 }
2817
2818 int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps)
2819 {
2820 mutex_init(&ps->smi_mutex);
2821
2822 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
2823
2824 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
2825 mutex_init(&ps->eeprom_mutex);
2826
2827 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
2828 mv88e6xxx_ppu_state_init(ps);
2829
2830 return 0;
2831 }
2832
2833 int mv88e6xxx_setup_global(struct dsa_switch *ds)
2834 {
2835 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2836 int err;
2837 int i;
2838
2839 mutex_lock(&ps->smi_mutex);
2840 /* Set the default address aging time to 5 minutes, and
2841 * enable address learn messages to be sent to all message
2842 * ports.
2843 */
2844 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
2845 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2846 if (err)
2847 goto unlock;
2848
2849 /* Configure the IP ToS mapping registers. */
2850 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2851 if (err)
2852 goto unlock;
2853 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2854 if (err)
2855 goto unlock;
2856 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2857 if (err)
2858 goto unlock;
2859 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2860 if (err)
2861 goto unlock;
2862 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2863 if (err)
2864 goto unlock;
2865 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2866 if (err)
2867 goto unlock;
2868 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2869 if (err)
2870 goto unlock;
2871 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2872 if (err)
2873 goto unlock;
2874
2875 /* Configure the IEEE 802.1p priority mapping register. */
2876 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2877 if (err)
2878 goto unlock;
2879
2880 /* Send all frames with destination addresses matching
2881 * 01:80:c2:00:00:0x to the CPU port.
2882 */
2883 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2884 if (err)
2885 goto unlock;
2886
2887 /* Ignore removed tag data on doubly tagged packets, disable
2888 * flow control messages, force flow control priority to the
2889 * highest, and send all special multicast frames to the CPU
2890 * port at the highest priority.
2891 */
2892 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2893 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
2894 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
2895 if (err)
2896 goto unlock;
2897
2898 /* Program the DSA routing table. */
2899 for (i = 0; i < 32; i++) {
2900 int nexthop = 0x1f;
2901
2902 if (ds->pd->rtable &&
2903 i != ds->index && i < ds->dst->pd->nr_chips)
2904 nexthop = ds->pd->rtable[i] & 0x1f;
2905
2906 err = _mv88e6xxx_reg_write(
2907 ps, REG_GLOBAL2,
2908 GLOBAL2_DEVICE_MAPPING,
2909 GLOBAL2_DEVICE_MAPPING_UPDATE |
2910 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop);
2911 if (err)
2912 goto unlock;
2913 }
2914
2915 /* Clear all trunk masks. */
2916 for (i = 0; i < 8; i++) {
2917 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2918 0x8000 |
2919 (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2920 ((1 << ps->info->num_ports) - 1));
2921 if (err)
2922 goto unlock;
2923 }
2924
2925 /* Clear all trunk mappings. */
2926 for (i = 0; i < 16; i++) {
2927 err = _mv88e6xxx_reg_write(
2928 ps, REG_GLOBAL2,
2929 GLOBAL2_TRUNK_MAPPING,
2930 GLOBAL2_TRUNK_MAPPING_UPDATE |
2931 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2932 if (err)
2933 goto unlock;
2934 }
2935
2936 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2937 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2938 mv88e6xxx_6320_family(ps)) {
2939 /* Send all frames with destination addresses matching
2940 * 01:80:c2:00:00:2x to the CPU port.
2941 */
2942 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2943 GLOBAL2_MGMT_EN_2X, 0xffff);
2944 if (err)
2945 goto unlock;
2946
2947 /* Initialise cross-chip port VLAN table to reset
2948 * defaults.
2949 */
2950 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2951 GLOBAL2_PVT_ADDR, 0x9000);
2952 if (err)
2953 goto unlock;
2954
2955 /* Clear the priority override table. */
2956 for (i = 0; i < 16; i++) {
2957 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2958 GLOBAL2_PRIO_OVERRIDE,
2959 0x8000 | (i << 8));
2960 if (err)
2961 goto unlock;
2962 }
2963 }
2964
2965 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2966 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2967 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
2968 mv88e6xxx_6320_family(ps)) {
2969 /* Disable ingress rate limiting by resetting all
2970 * ingress rate limit registers to their initial
2971 * state.
2972 */
2973 for (i = 0; i < ps->info->num_ports; i++) {
2974 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2975 GLOBAL2_INGRESS_OP,
2976 0x9000 | (i << 8));
2977 if (err)
2978 goto unlock;
2979 }
2980 }
2981
2982 /* Clear the statistics counters for all ports */
2983 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
2984 GLOBAL_STATS_OP_FLUSH_ALL);
2985 if (err)
2986 goto unlock;
2987
2988 /* Wait for the flush to complete. */
2989 err = _mv88e6xxx_stats_wait(ps);
2990 if (err < 0)
2991 goto unlock;
2992
2993 /* Clear all ATU entries */
2994 err = _mv88e6xxx_atu_flush(ps, 0, true);
2995 if (err < 0)
2996 goto unlock;
2997
2998 /* Clear all the VTU and STU entries */
2999 err = _mv88e6xxx_vtu_stu_flush(ps);
3000 unlock:
3001 mutex_unlock(&ps->smi_mutex);
3002
3003 return err;
3004 }
3005
3006 int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps, bool ppu_active)
3007 {
3008 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
3009 struct gpio_desc *gpiod = ps->ds->pd->reset;
3010 unsigned long timeout;
3011 int ret;
3012 int i;
3013
3014 mutex_lock(&ps->smi_mutex);
3015
3016 /* Set all ports to the disabled state. */
3017 for (i = 0; i < ps->info->num_ports; i++) {
3018 ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL);
3019 if (ret < 0)
3020 goto unlock;
3021
3022 ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL,
3023 ret & 0xfffc);
3024 if (ret)
3025 goto unlock;
3026 }
3027
3028 /* Wait for transmit queues to drain. */
3029 usleep_range(2000, 4000);
3030
3031 /* If there is a gpio connected to the reset pin, toggle it */
3032 if (gpiod) {
3033 gpiod_set_value_cansleep(gpiod, 1);
3034 usleep_range(10000, 20000);
3035 gpiod_set_value_cansleep(gpiod, 0);
3036 usleep_range(10000, 20000);
3037 }
3038
3039 /* Reset the switch. Keep the PPU active if requested. The PPU
3040 * needs to be active to support indirect phy register access
3041 * through global registers 0x18 and 0x19.
3042 */
3043 if (ppu_active)
3044 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000);
3045 else
3046 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400);
3047 if (ret)
3048 goto unlock;
3049
3050 /* Wait up to one second for reset to complete. */
3051 timeout = jiffies + 1 * HZ;
3052 while (time_before(jiffies, timeout)) {
3053 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00);
3054 if (ret < 0)
3055 goto unlock;
3056
3057 if ((ret & is_reset) == is_reset)
3058 break;
3059 usleep_range(1000, 2000);
3060 }
3061 if (time_after(jiffies, timeout))
3062 ret = -ETIMEDOUT;
3063 else
3064 ret = 0;
3065 unlock:
3066 mutex_unlock(&ps->smi_mutex);
3067
3068 return ret;
3069 }
3070
3071 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
3072 {
3073 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3074 int ret;
3075
3076 mutex_lock(&ps->smi_mutex);
3077 ret = _mv88e6xxx_phy_page_read(ps, port, page, reg);
3078 mutex_unlock(&ps->smi_mutex);
3079
3080 return ret;
3081 }
3082
3083 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
3084 int reg, int val)
3085 {
3086 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3087 int ret;
3088
3089 mutex_lock(&ps->smi_mutex);
3090 ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val);
3091 mutex_unlock(&ps->smi_mutex);
3092
3093 return ret;
3094 }
3095
3096 static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps,
3097 int port)
3098 {
3099 if (port >= 0 && port < ps->info->num_ports)
3100 return port;
3101 return -EINVAL;
3102 }
3103
3104 int
3105 mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
3106 {
3107 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3108 int addr = mv88e6xxx_port_to_phy_addr(ps, port);
3109 int ret;
3110
3111 if (addr < 0)
3112 return 0xffff;
3113
3114 mutex_lock(&ps->smi_mutex);
3115
3116 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
3117 ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum);
3118 else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
3119 ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum);
3120 else
3121 ret = _mv88e6xxx_phy_read(ps, addr, regnum);
3122
3123 mutex_unlock(&ps->smi_mutex);
3124 return ret;
3125 }
3126
3127 int
3128 mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
3129 {
3130 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3131 int addr = mv88e6xxx_port_to_phy_addr(ps, port);
3132 int ret;
3133
3134 if (addr < 0)
3135 return 0xffff;
3136
3137 mutex_lock(&ps->smi_mutex);
3138
3139 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
3140 ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val);
3141 else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
3142 ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val);
3143 else
3144 ret = _mv88e6xxx_phy_write(ps, addr, regnum, val);
3145
3146 mutex_unlock(&ps->smi_mutex);
3147 return ret;
3148 }
3149
3150 #ifdef CONFIG_NET_DSA_HWMON
3151
3152 static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
3153 {
3154 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3155 int ret;
3156 int val;
3157
3158 *temp = 0;
3159
3160 mutex_lock(&ps->smi_mutex);
3161
3162 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6);
3163 if (ret < 0)
3164 goto error;
3165
3166 /* Enable temperature sensor */
3167 ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
3168 if (ret < 0)
3169 goto error;
3170
3171 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5));
3172 if (ret < 0)
3173 goto error;
3174
3175 /* Wait for temperature to stabilize */
3176 usleep_range(10000, 12000);
3177
3178 val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
3179 if (val < 0) {
3180 ret = val;
3181 goto error;
3182 }
3183
3184 /* Disable temperature sensor */
3185 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5));
3186 if (ret < 0)
3187 goto error;
3188
3189 *temp = ((val & 0x1f) - 5) * 5;
3190
3191 error:
3192 _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0);
3193 mutex_unlock(&ps->smi_mutex);
3194 return ret;
3195 }
3196
3197 static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
3198 {
3199 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3200 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3201 int ret;
3202
3203 *temp = 0;
3204
3205 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
3206 if (ret < 0)
3207 return ret;
3208
3209 *temp = (ret & 0xff) - 25;
3210
3211 return 0;
3212 }
3213
3214 int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
3215 {
3216 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3217
3218 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP))
3219 return -EOPNOTSUPP;
3220
3221 if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
3222 return mv88e63xx_get_temp(ds, temp);
3223
3224 return mv88e61xx_get_temp(ds, temp);
3225 }
3226
3227 int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
3228 {
3229 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3230 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3231 int ret;
3232
3233 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3234 return -EOPNOTSUPP;
3235
3236 *temp = 0;
3237
3238 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3239 if (ret < 0)
3240 return ret;
3241
3242 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
3243
3244 return 0;
3245 }
3246
3247 int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
3248 {
3249 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3250 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3251 int ret;
3252
3253 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3254 return -EOPNOTSUPP;
3255
3256 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3257 if (ret < 0)
3258 return ret;
3259 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
3260 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
3261 (ret & 0xe0ff) | (temp << 8));
3262 }
3263
3264 int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
3265 {
3266 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3267 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3268 int ret;
3269
3270 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3271 return -EOPNOTSUPP;
3272
3273 *alarm = false;
3274
3275 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3276 if (ret < 0)
3277 return ret;
3278
3279 *alarm = !!(ret & 0x40);
3280
3281 return 0;
3282 }
3283 #endif /* CONFIG_NET_DSA_HWMON */
3284
3285 static const struct mv88e6xxx_info *
3286 mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table,
3287 unsigned int num)
3288 {
3289 int i;
3290
3291 for (i = 0; i < num; ++i)
3292 if (table[i].prod_num == prod_num)
3293 return &table[i];
3294
3295 return NULL;
3296 }
3297
3298 const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev,
3299 int sw_addr, void **priv,
3300 const struct mv88e6xxx_info *table,
3301 unsigned int num)
3302 {
3303 const struct mv88e6xxx_info *info;
3304 struct mv88e6xxx_priv_state *ps;
3305 struct mii_bus *bus;
3306 const char *name;
3307 int id, prod_num, rev;
3308
3309 bus = dsa_host_dev_to_mii_bus(host_dev);
3310 if (!bus)
3311 return NULL;
3312
3313 id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
3314 if (id < 0)
3315 return NULL;
3316
3317 prod_num = (id & 0xfff0) >> 4;
3318 rev = id & 0x000f;
3319
3320 info = mv88e6xxx_lookup_info(prod_num, table, num);
3321 if (!info)
3322 return NULL;
3323
3324 name = info->name;
3325
3326 ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL);
3327 if (!ps)
3328 return NULL;
3329
3330 ps->bus = bus;
3331 ps->sw_addr = sw_addr;
3332 ps->info = info;
3333
3334 *priv = ps;
3335
3336 dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n",
3337 prod_num, name, rev);
3338
3339 return name;
3340 }
3341
3342 static int __init mv88e6xxx_init(void)
3343 {
3344 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
3345 register_switch_driver(&mv88e6131_switch_driver);
3346 #endif
3347 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
3348 register_switch_driver(&mv88e6123_switch_driver);
3349 #endif
3350 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
3351 register_switch_driver(&mv88e6352_switch_driver);
3352 #endif
3353 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
3354 register_switch_driver(&mv88e6171_switch_driver);
3355 #endif
3356 return 0;
3357 }
3358 module_init(mv88e6xxx_init);
3359
3360 static void __exit mv88e6xxx_cleanup(void)
3361 {
3362 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
3363 unregister_switch_driver(&mv88e6171_switch_driver);
3364 #endif
3365 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
3366 unregister_switch_driver(&mv88e6352_switch_driver);
3367 #endif
3368 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
3369 unregister_switch_driver(&mv88e6123_switch_driver);
3370 #endif
3371 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
3372 unregister_switch_driver(&mv88e6131_switch_driver);
3373 #endif
3374 }
3375 module_exit(mv88e6xxx_cleanup);
3376
3377 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
3378 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
3379 MODULE_LICENSE("GPL");