24aea900af35ec6613e57276fa102feedc3195fe
[deliverable/linux.git] / drivers / net / dsa / mv88e6xxx.c
1 /*
2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
4 *
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14 #include <linux/delay.h>
15 #include <linux/etherdevice.h>
16 #include <linux/ethtool.h>
17 #include <linux/if_bridge.h>
18 #include <linux/jiffies.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/phy.h>
24 #include <net/dsa.h>
25 #include <net/switchdev.h>
26 #include "mv88e6xxx.h"
27
28 static void assert_smi_lock(struct mv88e6xxx_priv_state *ps)
29 {
30 if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
31 dev_err(ps->dev, "SMI lock not held!\n");
32 dump_stack();
33 }
34 }
35
36 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
37 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
38 * will be directly accessible on some {device address,register address}
39 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
40 * will only respond to SMI transactions to that specific address, and
41 * an indirect addressing mechanism needs to be used to access its
42 * registers.
43 */
44 static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
45 {
46 int ret;
47 int i;
48
49 for (i = 0; i < 16; i++) {
50 ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
51 if (ret < 0)
52 return ret;
53
54 if ((ret & SMI_CMD_BUSY) == 0)
55 return 0;
56 }
57
58 return -ETIMEDOUT;
59 }
60
61 static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
62 int reg)
63 {
64 int ret;
65
66 if (sw_addr == 0)
67 return mdiobus_read_nested(bus, addr, reg);
68
69 /* Wait for the bus to become free. */
70 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
71 if (ret < 0)
72 return ret;
73
74 /* Transmit the read command. */
75 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
76 SMI_CMD_OP_22_READ | (addr << 5) | reg);
77 if (ret < 0)
78 return ret;
79
80 /* Wait for the read command to complete. */
81 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
82 if (ret < 0)
83 return ret;
84
85 /* Read the data. */
86 ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
87 if (ret < 0)
88 return ret;
89
90 return ret & 0xffff;
91 }
92
93 static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps,
94 int addr, int reg)
95 {
96 int ret;
97
98 assert_smi_lock(ps);
99
100 ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg);
101 if (ret < 0)
102 return ret;
103
104 dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
105 addr, reg, ret);
106
107 return ret;
108 }
109
110 int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg)
111 {
112 int ret;
113
114 mutex_lock(&ps->smi_mutex);
115 ret = _mv88e6xxx_reg_read(ps, addr, reg);
116 mutex_unlock(&ps->smi_mutex);
117
118 return ret;
119 }
120
121 static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
122 int reg, u16 val)
123 {
124 int ret;
125
126 if (sw_addr == 0)
127 return mdiobus_write_nested(bus, addr, reg, val);
128
129 /* Wait for the bus to become free. */
130 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
131 if (ret < 0)
132 return ret;
133
134 /* Transmit the data to write. */
135 ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
136 if (ret < 0)
137 return ret;
138
139 /* Transmit the write command. */
140 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
141 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
142 if (ret < 0)
143 return ret;
144
145 /* Wait for the write command to complete. */
146 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
147 if (ret < 0)
148 return ret;
149
150 return 0;
151 }
152
153 static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
154 int reg, u16 val)
155 {
156 assert_smi_lock(ps);
157
158 dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
159 addr, reg, val);
160
161 return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val);
162 }
163
164 int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
165 int reg, u16 val)
166 {
167 int ret;
168
169 mutex_lock(&ps->smi_mutex);
170 ret = _mv88e6xxx_reg_write(ps, addr, reg, val);
171 mutex_unlock(&ps->smi_mutex);
172
173 return ret;
174 }
175
176 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
177 {
178 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
179 int err;
180
181 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01,
182 (addr[0] << 8) | addr[1]);
183 if (err)
184 return err;
185
186 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23,
187 (addr[2] << 8) | addr[3]);
188 if (err)
189 return err;
190
191 return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45,
192 (addr[4] << 8) | addr[5]);
193 }
194
195 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
196 {
197 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
198 int ret;
199 int i;
200
201 for (i = 0; i < 6; i++) {
202 int j;
203
204 /* Write the MAC address byte. */
205 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
206 GLOBAL2_SWITCH_MAC_BUSY |
207 (i << 8) | addr[i]);
208 if (ret)
209 return ret;
210
211 /* Wait for the write to complete. */
212 for (j = 0; j < 16; j++) {
213 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2,
214 GLOBAL2_SWITCH_MAC);
215 if (ret < 0)
216 return ret;
217
218 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
219 break;
220 }
221 if (j == 16)
222 return -ETIMEDOUT;
223 }
224
225 return 0;
226 }
227
228 static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr,
229 int regnum)
230 {
231 if (addr >= 0)
232 return _mv88e6xxx_reg_read(ps, addr, regnum);
233 return 0xffff;
234 }
235
236 static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr,
237 int regnum, u16 val)
238 {
239 if (addr >= 0)
240 return _mv88e6xxx_reg_write(ps, addr, regnum, val);
241 return 0;
242 }
243
244 static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
245 {
246 int ret;
247 unsigned long timeout;
248
249 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
250 if (ret < 0)
251 return ret;
252
253 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
254 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
255 if (ret)
256 return ret;
257
258 timeout = jiffies + 1 * HZ;
259 while (time_before(jiffies, timeout)) {
260 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
261 if (ret < 0)
262 return ret;
263
264 usleep_range(1000, 2000);
265 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
266 GLOBAL_STATUS_PPU_POLLING)
267 return 0;
268 }
269
270 return -ETIMEDOUT;
271 }
272
273 static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
274 {
275 int ret, err;
276 unsigned long timeout;
277
278 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
279 if (ret < 0)
280 return ret;
281
282 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
283 ret | GLOBAL_CONTROL_PPU_ENABLE);
284 if (err)
285 return err;
286
287 timeout = jiffies + 1 * HZ;
288 while (time_before(jiffies, timeout)) {
289 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
290 if (ret < 0)
291 return ret;
292
293 usleep_range(1000, 2000);
294 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
295 GLOBAL_STATUS_PPU_POLLING)
296 return 0;
297 }
298
299 return -ETIMEDOUT;
300 }
301
302 static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
303 {
304 struct mv88e6xxx_priv_state *ps;
305
306 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
307 if (mutex_trylock(&ps->ppu_mutex)) {
308 if (mv88e6xxx_ppu_enable(ps) == 0)
309 ps->ppu_disabled = 0;
310 mutex_unlock(&ps->ppu_mutex);
311 }
312 }
313
314 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
315 {
316 struct mv88e6xxx_priv_state *ps = (void *)_ps;
317
318 schedule_work(&ps->ppu_work);
319 }
320
321 static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps)
322 {
323 int ret;
324
325 mutex_lock(&ps->ppu_mutex);
326
327 /* If the PHY polling unit is enabled, disable it so that
328 * we can access the PHY registers. If it was already
329 * disabled, cancel the timer that is going to re-enable
330 * it.
331 */
332 if (!ps->ppu_disabled) {
333 ret = mv88e6xxx_ppu_disable(ps);
334 if (ret < 0) {
335 mutex_unlock(&ps->ppu_mutex);
336 return ret;
337 }
338 ps->ppu_disabled = 1;
339 } else {
340 del_timer(&ps->ppu_timer);
341 ret = 0;
342 }
343
344 return ret;
345 }
346
347 static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps)
348 {
349 /* Schedule a timer to re-enable the PHY polling unit. */
350 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
351 mutex_unlock(&ps->ppu_mutex);
352 }
353
354 void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps)
355 {
356 mutex_init(&ps->ppu_mutex);
357 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
358 init_timer(&ps->ppu_timer);
359 ps->ppu_timer.data = (unsigned long)ps;
360 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
361 }
362
363 static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
364 int regnum)
365 {
366 int ret;
367
368 ret = mv88e6xxx_ppu_access_get(ps);
369 if (ret >= 0) {
370 ret = _mv88e6xxx_reg_read(ps, addr, regnum);
371 mv88e6xxx_ppu_access_put(ps);
372 }
373
374 return ret;
375 }
376
377 static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
378 int regnum, u16 val)
379 {
380 int ret;
381
382 ret = mv88e6xxx_ppu_access_get(ps);
383 if (ret >= 0) {
384 ret = _mv88e6xxx_reg_write(ps, addr, regnum, val);
385 mv88e6xxx_ppu_access_put(ps);
386 }
387
388 return ret;
389 }
390
391 static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps)
392 {
393 return ps->info->family == MV88E6XXX_FAMILY_6065;
394 }
395
396 static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps)
397 {
398 return ps->info->family == MV88E6XXX_FAMILY_6095;
399 }
400
401 static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps)
402 {
403 return ps->info->family == MV88E6XXX_FAMILY_6097;
404 }
405
406 static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps)
407 {
408 return ps->info->family == MV88E6XXX_FAMILY_6165;
409 }
410
411 static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps)
412 {
413 return ps->info->family == MV88E6XXX_FAMILY_6185;
414 }
415
416 static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps)
417 {
418 return ps->info->family == MV88E6XXX_FAMILY_6320;
419 }
420
421 static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps)
422 {
423 return ps->info->family == MV88E6XXX_FAMILY_6351;
424 }
425
426 static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps)
427 {
428 return ps->info->family == MV88E6XXX_FAMILY_6352;
429 }
430
431 static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps)
432 {
433 return ps->info->num_databases;
434 }
435
436 static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
437 {
438 /* Does the device have dedicated FID registers for ATU and VTU ops? */
439 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
440 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
441 return true;
442
443 return false;
444 }
445
446 static bool mv88e6xxx_has_stu(struct mv88e6xxx_priv_state *ps)
447 {
448 /* Does the device have STU and dedicated SID registers for VTU ops? */
449 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
450 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
451 return true;
452
453 return false;
454 }
455
456 /* We expect the switch to perform auto negotiation if there is a real
457 * phy. However, in the case of a fixed link phy, we force the port
458 * settings from the fixed link settings.
459 */
460 void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
461 struct phy_device *phydev)
462 {
463 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
464 u32 reg;
465 int ret;
466
467 if (!phy_is_pseudo_fixed_link(phydev))
468 return;
469
470 mutex_lock(&ps->smi_mutex);
471
472 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
473 if (ret < 0)
474 goto out;
475
476 reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
477 PORT_PCS_CTRL_FORCE_LINK |
478 PORT_PCS_CTRL_DUPLEX_FULL |
479 PORT_PCS_CTRL_FORCE_DUPLEX |
480 PORT_PCS_CTRL_UNFORCED);
481
482 reg |= PORT_PCS_CTRL_FORCE_LINK;
483 if (phydev->link)
484 reg |= PORT_PCS_CTRL_LINK_UP;
485
486 if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100)
487 goto out;
488
489 switch (phydev->speed) {
490 case SPEED_1000:
491 reg |= PORT_PCS_CTRL_1000;
492 break;
493 case SPEED_100:
494 reg |= PORT_PCS_CTRL_100;
495 break;
496 case SPEED_10:
497 reg |= PORT_PCS_CTRL_10;
498 break;
499 default:
500 pr_info("Unknown speed");
501 goto out;
502 }
503
504 reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
505 if (phydev->duplex == DUPLEX_FULL)
506 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
507
508 if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) &&
509 (port >= ps->info->num_ports - 2)) {
510 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
511 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
512 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
513 reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
514 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
515 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
516 PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
517 }
518 _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg);
519
520 out:
521 mutex_unlock(&ps->smi_mutex);
522 }
523
524 static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
525 {
526 int ret;
527 int i;
528
529 for (i = 0; i < 10; i++) {
530 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP);
531 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
532 return 0;
533 }
534
535 return -ETIMEDOUT;
536 }
537
538 static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps,
539 int port)
540 {
541 int ret;
542
543 if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
544 port = (port + 1) << 5;
545
546 /* Snapshot the hardware statistics counters for this port. */
547 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
548 GLOBAL_STATS_OP_CAPTURE_PORT |
549 GLOBAL_STATS_OP_HIST_RX_TX | port);
550 if (ret < 0)
551 return ret;
552
553 /* Wait for the snapshotting to complete. */
554 ret = _mv88e6xxx_stats_wait(ps);
555 if (ret < 0)
556 return ret;
557
558 return 0;
559 }
560
561 static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
562 int stat, u32 *val)
563 {
564 u32 _val;
565 int ret;
566
567 *val = 0;
568
569 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
570 GLOBAL_STATS_OP_READ_CAPTURED |
571 GLOBAL_STATS_OP_HIST_RX_TX | stat);
572 if (ret < 0)
573 return;
574
575 ret = _mv88e6xxx_stats_wait(ps);
576 if (ret < 0)
577 return;
578
579 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
580 if (ret < 0)
581 return;
582
583 _val = ret << 16;
584
585 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
586 if (ret < 0)
587 return;
588
589 *val = _val | ret;
590 }
591
592 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
593 { "in_good_octets", 8, 0x00, BANK0, },
594 { "in_bad_octets", 4, 0x02, BANK0, },
595 { "in_unicast", 4, 0x04, BANK0, },
596 { "in_broadcasts", 4, 0x06, BANK0, },
597 { "in_multicasts", 4, 0x07, BANK0, },
598 { "in_pause", 4, 0x16, BANK0, },
599 { "in_undersize", 4, 0x18, BANK0, },
600 { "in_fragments", 4, 0x19, BANK0, },
601 { "in_oversize", 4, 0x1a, BANK0, },
602 { "in_jabber", 4, 0x1b, BANK0, },
603 { "in_rx_error", 4, 0x1c, BANK0, },
604 { "in_fcs_error", 4, 0x1d, BANK0, },
605 { "out_octets", 8, 0x0e, BANK0, },
606 { "out_unicast", 4, 0x10, BANK0, },
607 { "out_broadcasts", 4, 0x13, BANK0, },
608 { "out_multicasts", 4, 0x12, BANK0, },
609 { "out_pause", 4, 0x15, BANK0, },
610 { "excessive", 4, 0x11, BANK0, },
611 { "collisions", 4, 0x1e, BANK0, },
612 { "deferred", 4, 0x05, BANK0, },
613 { "single", 4, 0x14, BANK0, },
614 { "multiple", 4, 0x17, BANK0, },
615 { "out_fcs_error", 4, 0x03, BANK0, },
616 { "late", 4, 0x1f, BANK0, },
617 { "hist_64bytes", 4, 0x08, BANK0, },
618 { "hist_65_127bytes", 4, 0x09, BANK0, },
619 { "hist_128_255bytes", 4, 0x0a, BANK0, },
620 { "hist_256_511bytes", 4, 0x0b, BANK0, },
621 { "hist_512_1023bytes", 4, 0x0c, BANK0, },
622 { "hist_1024_max_bytes", 4, 0x0d, BANK0, },
623 { "sw_in_discards", 4, 0x10, PORT, },
624 { "sw_in_filtered", 2, 0x12, PORT, },
625 { "sw_out_filtered", 2, 0x13, PORT, },
626 { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, },
627 { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, },
628 { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, },
629 { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, },
630 { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, },
631 { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, },
632 { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, },
633 { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, },
634 { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, },
635 { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, },
636 { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, },
637 { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, },
638 { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, },
639 { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, },
640 { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, },
641 { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, },
642 { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, },
643 { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, },
644 { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, },
645 { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, },
646 { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, },
647 { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, },
648 { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, },
649 { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, },
650 { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, },
651 { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
652 };
653
654 static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps,
655 struct mv88e6xxx_hw_stat *stat)
656 {
657 switch (stat->type) {
658 case BANK0:
659 return true;
660 case BANK1:
661 return mv88e6xxx_6320_family(ps);
662 case PORT:
663 return mv88e6xxx_6095_family(ps) ||
664 mv88e6xxx_6185_family(ps) ||
665 mv88e6xxx_6097_family(ps) ||
666 mv88e6xxx_6165_family(ps) ||
667 mv88e6xxx_6351_family(ps) ||
668 mv88e6xxx_6352_family(ps);
669 }
670 return false;
671 }
672
673 static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
674 struct mv88e6xxx_hw_stat *s,
675 int port)
676 {
677 u32 low;
678 u32 high = 0;
679 int ret;
680 u64 value;
681
682 switch (s->type) {
683 case PORT:
684 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg);
685 if (ret < 0)
686 return UINT64_MAX;
687
688 low = ret;
689 if (s->sizeof_stat == 4) {
690 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port),
691 s->reg + 1);
692 if (ret < 0)
693 return UINT64_MAX;
694 high = ret;
695 }
696 break;
697 case BANK0:
698 case BANK1:
699 _mv88e6xxx_stats_read(ps, s->reg, &low);
700 if (s->sizeof_stat == 8)
701 _mv88e6xxx_stats_read(ps, s->reg + 1, &high);
702 }
703 value = (((u64)high) << 16) | low;
704 return value;
705 }
706
707 void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
708 {
709 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
710 struct mv88e6xxx_hw_stat *stat;
711 int i, j;
712
713 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
714 stat = &mv88e6xxx_hw_stats[i];
715 if (mv88e6xxx_has_stat(ps, stat)) {
716 memcpy(data + j * ETH_GSTRING_LEN, stat->string,
717 ETH_GSTRING_LEN);
718 j++;
719 }
720 }
721 }
722
723 int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
724 {
725 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
726 struct mv88e6xxx_hw_stat *stat;
727 int i, j;
728
729 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
730 stat = &mv88e6xxx_hw_stats[i];
731 if (mv88e6xxx_has_stat(ps, stat))
732 j++;
733 }
734 return j;
735 }
736
737 void
738 mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
739 int port, uint64_t *data)
740 {
741 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
742 struct mv88e6xxx_hw_stat *stat;
743 int ret;
744 int i, j;
745
746 mutex_lock(&ps->smi_mutex);
747
748 ret = _mv88e6xxx_stats_snapshot(ps, port);
749 if (ret < 0) {
750 mutex_unlock(&ps->smi_mutex);
751 return;
752 }
753 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
754 stat = &mv88e6xxx_hw_stats[i];
755 if (mv88e6xxx_has_stat(ps, stat)) {
756 data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port);
757 j++;
758 }
759 }
760
761 mutex_unlock(&ps->smi_mutex);
762 }
763
764 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
765 {
766 return 32 * sizeof(u16);
767 }
768
769 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
770 struct ethtool_regs *regs, void *_p)
771 {
772 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
773 u16 *p = _p;
774 int i;
775
776 regs->version = 0;
777
778 memset(p, 0xff, 32 * sizeof(u16));
779
780 for (i = 0; i < 32; i++) {
781 int ret;
782
783 ret = mv88e6xxx_reg_read(ps, REG_PORT(port), i);
784 if (ret >= 0)
785 p[i] = ret;
786 }
787 }
788
789 static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
790 u16 mask)
791 {
792 unsigned long timeout = jiffies + HZ / 10;
793
794 while (time_before(jiffies, timeout)) {
795 int ret;
796
797 ret = _mv88e6xxx_reg_read(ps, reg, offset);
798 if (ret < 0)
799 return ret;
800 if (!(ret & mask))
801 return 0;
802
803 usleep_range(1000, 2000);
804 }
805 return -ETIMEDOUT;
806 }
807
808 static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg,
809 int offset, u16 mask)
810 {
811 int ret;
812
813 mutex_lock(&ps->smi_mutex);
814 ret = _mv88e6xxx_wait(ps, reg, offset, mask);
815 mutex_unlock(&ps->smi_mutex);
816
817 return ret;
818 }
819
820 static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps)
821 {
822 return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
823 GLOBAL2_SMI_OP_BUSY);
824 }
825
826 static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
827 {
828 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
829
830 return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
831 GLOBAL2_EEPROM_OP_LOAD);
832 }
833
834 static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
835 {
836 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
837
838 return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
839 GLOBAL2_EEPROM_OP_BUSY);
840 }
841
842 static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
843 {
844 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
845 int ret;
846
847 mutex_lock(&ps->eeprom_mutex);
848
849 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
850 GLOBAL2_EEPROM_OP_READ |
851 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
852 if (ret < 0)
853 goto error;
854
855 ret = mv88e6xxx_eeprom_busy_wait(ds);
856 if (ret < 0)
857 goto error;
858
859 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
860 error:
861 mutex_unlock(&ps->eeprom_mutex);
862 return ret;
863 }
864
865 int mv88e6xxx_get_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom,
866 u8 *data)
867 {
868 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
869 int offset;
870 int len;
871 int ret;
872
873 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
874 return -EOPNOTSUPP;
875
876 offset = eeprom->offset;
877 len = eeprom->len;
878 eeprom->len = 0;
879
880 eeprom->magic = 0xc3ec4951;
881
882 ret = mv88e6xxx_eeprom_load_wait(ds);
883 if (ret < 0)
884 return ret;
885
886 if (offset & 1) {
887 int word;
888
889 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
890 if (word < 0)
891 return word;
892
893 *data++ = (word >> 8) & 0xff;
894
895 offset++;
896 len--;
897 eeprom->len++;
898 }
899
900 while (len >= 2) {
901 int word;
902
903 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
904 if (word < 0)
905 return word;
906
907 *data++ = word & 0xff;
908 *data++ = (word >> 8) & 0xff;
909
910 offset += 2;
911 len -= 2;
912 eeprom->len += 2;
913 }
914
915 if (len) {
916 int word;
917
918 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
919 if (word < 0)
920 return word;
921
922 *data++ = word & 0xff;
923
924 offset++;
925 len--;
926 eeprom->len++;
927 }
928
929 return 0;
930 }
931
932 static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
933 {
934 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
935 int ret;
936
937 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
938 if (ret < 0)
939 return ret;
940
941 if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
942 return -EROFS;
943
944 return 0;
945 }
946
947 static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
948 u16 data)
949 {
950 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
951 int ret;
952
953 mutex_lock(&ps->eeprom_mutex);
954
955 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
956 if (ret < 0)
957 goto error;
958
959 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
960 GLOBAL2_EEPROM_OP_WRITE |
961 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
962 if (ret < 0)
963 goto error;
964
965 ret = mv88e6xxx_eeprom_busy_wait(ds);
966 error:
967 mutex_unlock(&ps->eeprom_mutex);
968 return ret;
969 }
970
971 int mv88e6xxx_set_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom,
972 u8 *data)
973 {
974 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
975 int offset;
976 int ret;
977 int len;
978
979 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
980 return -EOPNOTSUPP;
981
982 if (eeprom->magic != 0xc3ec4951)
983 return -EINVAL;
984
985 ret = mv88e6xxx_eeprom_is_readonly(ds);
986 if (ret)
987 return ret;
988
989 offset = eeprom->offset;
990 len = eeprom->len;
991 eeprom->len = 0;
992
993 ret = mv88e6xxx_eeprom_load_wait(ds);
994 if (ret < 0)
995 return ret;
996
997 if (offset & 1) {
998 int word;
999
1000 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
1001 if (word < 0)
1002 return word;
1003
1004 word = (*data++ << 8) | (word & 0xff);
1005
1006 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1007 if (ret < 0)
1008 return ret;
1009
1010 offset++;
1011 len--;
1012 eeprom->len++;
1013 }
1014
1015 while (len >= 2) {
1016 int word;
1017
1018 word = *data++;
1019 word |= *data++ << 8;
1020
1021 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1022 if (ret < 0)
1023 return ret;
1024
1025 offset += 2;
1026 len -= 2;
1027 eeprom->len += 2;
1028 }
1029
1030 if (len) {
1031 int word;
1032
1033 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
1034 if (word < 0)
1035 return word;
1036
1037 word = (word & 0xff00) | *data++;
1038
1039 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1040 if (ret < 0)
1041 return ret;
1042
1043 offset++;
1044 len--;
1045 eeprom->len++;
1046 }
1047
1048 return 0;
1049 }
1050
1051 static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps)
1052 {
1053 return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP,
1054 GLOBAL_ATU_OP_BUSY);
1055 }
1056
1057 static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
1058 int addr, int regnum)
1059 {
1060 int ret;
1061
1062 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
1063 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
1064 regnum);
1065 if (ret < 0)
1066 return ret;
1067
1068 ret = _mv88e6xxx_phy_wait(ps);
1069 if (ret < 0)
1070 return ret;
1071
1072 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA);
1073
1074 return ret;
1075 }
1076
1077 static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps,
1078 int addr, int regnum, u16 val)
1079 {
1080 int ret;
1081
1082 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
1083 if (ret < 0)
1084 return ret;
1085
1086 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
1087 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
1088 regnum);
1089
1090 return _mv88e6xxx_phy_wait(ps);
1091 }
1092
1093 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
1094 {
1095 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1096 int reg;
1097
1098 mutex_lock(&ps->smi_mutex);
1099
1100 reg = _mv88e6xxx_phy_read_indirect(ps, port, 16);
1101 if (reg < 0)
1102 goto out;
1103
1104 e->eee_enabled = !!(reg & 0x0200);
1105 e->tx_lpi_enabled = !!(reg & 0x0100);
1106
1107 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
1108 if (reg < 0)
1109 goto out;
1110
1111 e->eee_active = !!(reg & PORT_STATUS_EEE);
1112 reg = 0;
1113
1114 out:
1115 mutex_unlock(&ps->smi_mutex);
1116 return reg;
1117 }
1118
1119 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
1120 struct phy_device *phydev, struct ethtool_eee *e)
1121 {
1122 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1123 int reg;
1124 int ret;
1125
1126 mutex_lock(&ps->smi_mutex);
1127
1128 ret = _mv88e6xxx_phy_read_indirect(ps, port, 16);
1129 if (ret < 0)
1130 goto out;
1131
1132 reg = ret & ~0x0300;
1133 if (e->eee_enabled)
1134 reg |= 0x0200;
1135 if (e->tx_lpi_enabled)
1136 reg |= 0x0100;
1137
1138 ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg);
1139 out:
1140 mutex_unlock(&ps->smi_mutex);
1141
1142 return ret;
1143 }
1144
1145 static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
1146 {
1147 int ret;
1148
1149 if (mv88e6xxx_has_fid_reg(ps)) {
1150 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid);
1151 if (ret < 0)
1152 return ret;
1153 } else if (mv88e6xxx_num_databases(ps) == 256) {
1154 /* ATU DBNum[7:4] are located in ATU Control 15:12 */
1155 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL);
1156 if (ret < 0)
1157 return ret;
1158
1159 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
1160 (ret & 0xfff) |
1161 ((fid << 8) & 0xf000));
1162 if (ret < 0)
1163 return ret;
1164
1165 /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
1166 cmd |= fid & 0xf;
1167 }
1168
1169 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
1170 if (ret < 0)
1171 return ret;
1172
1173 return _mv88e6xxx_atu_wait(ps);
1174 }
1175
1176 static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
1177 struct mv88e6xxx_atu_entry *entry)
1178 {
1179 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
1180
1181 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1182 unsigned int mask, shift;
1183
1184 if (entry->trunk) {
1185 data |= GLOBAL_ATU_DATA_TRUNK;
1186 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1187 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1188 } else {
1189 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1190 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1191 }
1192
1193 data |= (entry->portv_trunkid << shift) & mask;
1194 }
1195
1196 return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data);
1197 }
1198
1199 static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
1200 struct mv88e6xxx_atu_entry *entry,
1201 bool static_too)
1202 {
1203 int op;
1204 int err;
1205
1206 err = _mv88e6xxx_atu_wait(ps);
1207 if (err)
1208 return err;
1209
1210 err = _mv88e6xxx_atu_data_write(ps, entry);
1211 if (err)
1212 return err;
1213
1214 if (entry->fid) {
1215 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
1216 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
1217 } else {
1218 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
1219 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
1220 }
1221
1222 return _mv88e6xxx_atu_cmd(ps, entry->fid, op);
1223 }
1224
1225 static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
1226 u16 fid, bool static_too)
1227 {
1228 struct mv88e6xxx_atu_entry entry = {
1229 .fid = fid,
1230 .state = 0, /* EntryState bits must be 0 */
1231 };
1232
1233 return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
1234 }
1235
1236 static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
1237 int from_port, int to_port, bool static_too)
1238 {
1239 struct mv88e6xxx_atu_entry entry = {
1240 .trunk = false,
1241 .fid = fid,
1242 };
1243
1244 /* EntryState bits must be 0xF */
1245 entry.state = GLOBAL_ATU_DATA_STATE_MASK;
1246
1247 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1248 entry.portv_trunkid = (to_port & 0x0f) << 4;
1249 entry.portv_trunkid |= from_port & 0x0f;
1250
1251 return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
1252 }
1253
1254 static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid,
1255 int port, bool static_too)
1256 {
1257 /* Destination port 0xF means remove the entries */
1258 return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too);
1259 }
1260
1261 static const char * const mv88e6xxx_port_state_names[] = {
1262 [PORT_CONTROL_STATE_DISABLED] = "Disabled",
1263 [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
1264 [PORT_CONTROL_STATE_LEARNING] = "Learning",
1265 [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
1266 };
1267
1268 static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
1269 u8 state)
1270 {
1271 struct dsa_switch *ds = ps->ds;
1272 int reg, ret = 0;
1273 u8 oldstate;
1274
1275 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL);
1276 if (reg < 0)
1277 return reg;
1278
1279 oldstate = reg & PORT_CONTROL_STATE_MASK;
1280
1281 if (oldstate != state) {
1282 /* Flush forwarding database if we're moving a port
1283 * from Learning or Forwarding state to Disabled or
1284 * Blocking or Listening state.
1285 */
1286 if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
1287 oldstate == PORT_CONTROL_STATE_FORWARDING)
1288 && (state == PORT_CONTROL_STATE_DISABLED ||
1289 state == PORT_CONTROL_STATE_BLOCKING)) {
1290 ret = _mv88e6xxx_atu_remove(ps, 0, port, false);
1291 if (ret)
1292 return ret;
1293 }
1294
1295 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1296 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL,
1297 reg);
1298 if (ret)
1299 return ret;
1300
1301 netdev_dbg(ds->ports[port], "PortState %s (was %s)\n",
1302 mv88e6xxx_port_state_names[state],
1303 mv88e6xxx_port_state_names[oldstate]);
1304 }
1305
1306 return ret;
1307 }
1308
1309 static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
1310 int port)
1311 {
1312 struct net_device *bridge = ps->ports[port].bridge_dev;
1313 const u16 mask = (1 << ps->info->num_ports) - 1;
1314 struct dsa_switch *ds = ps->ds;
1315 u16 output_ports = 0;
1316 int reg;
1317 int i;
1318
1319 /* allow CPU port or DSA link(s) to send frames to every port */
1320 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
1321 output_ports = mask;
1322 } else {
1323 for (i = 0; i < ps->info->num_ports; ++i) {
1324 /* allow sending frames to every group member */
1325 if (bridge && ps->ports[i].bridge_dev == bridge)
1326 output_ports |= BIT(i);
1327
1328 /* allow sending frames to CPU port and DSA link(s) */
1329 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
1330 output_ports |= BIT(i);
1331 }
1332 }
1333
1334 /* prevent frames from going back out of the port they came in on */
1335 output_ports &= ~BIT(port);
1336
1337 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
1338 if (reg < 0)
1339 return reg;
1340
1341 reg &= ~mask;
1342 reg |= output_ports & mask;
1343
1344 return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg);
1345 }
1346
1347 void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
1348 {
1349 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1350 int stp_state;
1351
1352 switch (state) {
1353 case BR_STATE_DISABLED:
1354 stp_state = PORT_CONTROL_STATE_DISABLED;
1355 break;
1356 case BR_STATE_BLOCKING:
1357 case BR_STATE_LISTENING:
1358 stp_state = PORT_CONTROL_STATE_BLOCKING;
1359 break;
1360 case BR_STATE_LEARNING:
1361 stp_state = PORT_CONTROL_STATE_LEARNING;
1362 break;
1363 case BR_STATE_FORWARDING:
1364 default:
1365 stp_state = PORT_CONTROL_STATE_FORWARDING;
1366 break;
1367 }
1368
1369 /* mv88e6xxx_port_stp_state_set may be called with softirqs disabled,
1370 * so we can not update the port state directly but need to schedule it.
1371 */
1372 ps->ports[port].state = stp_state;
1373 set_bit(port, ps->port_state_update_mask);
1374 schedule_work(&ps->bridge_work);
1375 }
1376
1377 static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
1378 u16 *new, u16 *old)
1379 {
1380 struct dsa_switch *ds = ps->ds;
1381 u16 pvid;
1382 int ret;
1383
1384 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN);
1385 if (ret < 0)
1386 return ret;
1387
1388 pvid = ret & PORT_DEFAULT_VLAN_MASK;
1389
1390 if (new) {
1391 ret &= ~PORT_DEFAULT_VLAN_MASK;
1392 ret |= *new & PORT_DEFAULT_VLAN_MASK;
1393
1394 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
1395 PORT_DEFAULT_VLAN, ret);
1396 if (ret < 0)
1397 return ret;
1398
1399 netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new,
1400 pvid);
1401 }
1402
1403 if (old)
1404 *old = pvid;
1405
1406 return 0;
1407 }
1408
1409 static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps,
1410 int port, u16 *pvid)
1411 {
1412 return _mv88e6xxx_port_pvid(ps, port, NULL, pvid);
1413 }
1414
1415 static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps,
1416 int port, u16 pvid)
1417 {
1418 return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL);
1419 }
1420
1421 static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps)
1422 {
1423 return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP,
1424 GLOBAL_VTU_OP_BUSY);
1425 }
1426
1427 static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op)
1428 {
1429 int ret;
1430
1431 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op);
1432 if (ret < 0)
1433 return ret;
1434
1435 return _mv88e6xxx_vtu_wait(ps);
1436 }
1437
1438 static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps)
1439 {
1440 int ret;
1441
1442 ret = _mv88e6xxx_vtu_wait(ps);
1443 if (ret < 0)
1444 return ret;
1445
1446 return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL);
1447 }
1448
1449 static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
1450 struct mv88e6xxx_vtu_stu_entry *entry,
1451 unsigned int nibble_offset)
1452 {
1453 u16 regs[3];
1454 int i;
1455 int ret;
1456
1457 for (i = 0; i < 3; ++i) {
1458 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1459 GLOBAL_VTU_DATA_0_3 + i);
1460 if (ret < 0)
1461 return ret;
1462
1463 regs[i] = ret;
1464 }
1465
1466 for (i = 0; i < ps->info->num_ports; ++i) {
1467 unsigned int shift = (i % 4) * 4 + nibble_offset;
1468 u16 reg = regs[i / 4];
1469
1470 entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1471 }
1472
1473 return 0;
1474 }
1475
1476 static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
1477 struct mv88e6xxx_vtu_stu_entry *entry,
1478 unsigned int nibble_offset)
1479 {
1480 u16 regs[3] = { 0 };
1481 int i;
1482 int ret;
1483
1484 for (i = 0; i < ps->info->num_ports; ++i) {
1485 unsigned int shift = (i % 4) * 4 + nibble_offset;
1486 u8 data = entry->data[i];
1487
1488 regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1489 }
1490
1491 for (i = 0; i < 3; ++i) {
1492 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL,
1493 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1494 if (ret < 0)
1495 return ret;
1496 }
1497
1498 return 0;
1499 }
1500
1501 static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid)
1502 {
1503 return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID,
1504 vid & GLOBAL_VTU_VID_MASK);
1505 }
1506
1507 static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
1508 struct mv88e6xxx_vtu_stu_entry *entry)
1509 {
1510 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1511 int ret;
1512
1513 ret = _mv88e6xxx_vtu_wait(ps);
1514 if (ret < 0)
1515 return ret;
1516
1517 ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT);
1518 if (ret < 0)
1519 return ret;
1520
1521 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
1522 if (ret < 0)
1523 return ret;
1524
1525 next.vid = ret & GLOBAL_VTU_VID_MASK;
1526 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1527
1528 if (next.valid) {
1529 ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 0);
1530 if (ret < 0)
1531 return ret;
1532
1533 if (mv88e6xxx_has_fid_reg(ps)) {
1534 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1535 GLOBAL_VTU_FID);
1536 if (ret < 0)
1537 return ret;
1538
1539 next.fid = ret & GLOBAL_VTU_FID_MASK;
1540 } else if (mv88e6xxx_num_databases(ps) == 256) {
1541 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1542 * VTU DBNum[3:0] are located in VTU Operation 3:0
1543 */
1544 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1545 GLOBAL_VTU_OP);
1546 if (ret < 0)
1547 return ret;
1548
1549 next.fid = (ret & 0xf00) >> 4;
1550 next.fid |= ret & 0xf;
1551 }
1552
1553 if (mv88e6xxx_has_stu(ps)) {
1554 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1555 GLOBAL_VTU_SID);
1556 if (ret < 0)
1557 return ret;
1558
1559 next.sid = ret & GLOBAL_VTU_SID_MASK;
1560 }
1561 }
1562
1563 *entry = next;
1564 return 0;
1565 }
1566
1567 int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
1568 struct switchdev_obj_port_vlan *vlan,
1569 int (*cb)(struct switchdev_obj *obj))
1570 {
1571 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1572 struct mv88e6xxx_vtu_stu_entry next;
1573 u16 pvid;
1574 int err;
1575
1576 mutex_lock(&ps->smi_mutex);
1577
1578 err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
1579 if (err)
1580 goto unlock;
1581
1582 err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
1583 if (err)
1584 goto unlock;
1585
1586 do {
1587 err = _mv88e6xxx_vtu_getnext(ps, &next);
1588 if (err)
1589 break;
1590
1591 if (!next.valid)
1592 break;
1593
1594 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1595 continue;
1596
1597 /* reinit and dump this VLAN obj */
1598 vlan->vid_begin = vlan->vid_end = next.vid;
1599 vlan->flags = 0;
1600
1601 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1602 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1603
1604 if (next.vid == pvid)
1605 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1606
1607 err = cb(&vlan->obj);
1608 if (err)
1609 break;
1610 } while (next.vid < GLOBAL_VTU_VID_MASK);
1611
1612 unlock:
1613 mutex_unlock(&ps->smi_mutex);
1614
1615 return err;
1616 }
1617
1618 static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
1619 struct mv88e6xxx_vtu_stu_entry *entry)
1620 {
1621 u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
1622 u16 reg = 0;
1623 int ret;
1624
1625 ret = _mv88e6xxx_vtu_wait(ps);
1626 if (ret < 0)
1627 return ret;
1628
1629 if (!entry->valid)
1630 goto loadpurge;
1631
1632 /* Write port member tags */
1633 ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 0);
1634 if (ret < 0)
1635 return ret;
1636
1637 if (mv88e6xxx_has_stu(ps)) {
1638 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1639 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1640 if (ret < 0)
1641 return ret;
1642 }
1643
1644 if (mv88e6xxx_has_fid_reg(ps)) {
1645 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1646 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1647 if (ret < 0)
1648 return ret;
1649 } else if (mv88e6xxx_num_databases(ps) == 256) {
1650 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1651 * VTU DBNum[3:0] are located in VTU Operation 3:0
1652 */
1653 op |= (entry->fid & 0xf0) << 8;
1654 op |= entry->fid & 0xf;
1655 }
1656
1657 reg = GLOBAL_VTU_VID_VALID;
1658 loadpurge:
1659 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1660 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1661 if (ret < 0)
1662 return ret;
1663
1664 return _mv88e6xxx_vtu_cmd(ps, op);
1665 }
1666
1667 static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
1668 struct mv88e6xxx_vtu_stu_entry *entry)
1669 {
1670 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1671 int ret;
1672
1673 ret = _mv88e6xxx_vtu_wait(ps);
1674 if (ret < 0)
1675 return ret;
1676
1677 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID,
1678 sid & GLOBAL_VTU_SID_MASK);
1679 if (ret < 0)
1680 return ret;
1681
1682 ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT);
1683 if (ret < 0)
1684 return ret;
1685
1686 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID);
1687 if (ret < 0)
1688 return ret;
1689
1690 next.sid = ret & GLOBAL_VTU_SID_MASK;
1691
1692 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
1693 if (ret < 0)
1694 return ret;
1695
1696 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1697
1698 if (next.valid) {
1699 ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 2);
1700 if (ret < 0)
1701 return ret;
1702 }
1703
1704 *entry = next;
1705 return 0;
1706 }
1707
1708 static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
1709 struct mv88e6xxx_vtu_stu_entry *entry)
1710 {
1711 u16 reg = 0;
1712 int ret;
1713
1714 ret = _mv88e6xxx_vtu_wait(ps);
1715 if (ret < 0)
1716 return ret;
1717
1718 if (!entry->valid)
1719 goto loadpurge;
1720
1721 /* Write port states */
1722 ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 2);
1723 if (ret < 0)
1724 return ret;
1725
1726 reg = GLOBAL_VTU_VID_VALID;
1727 loadpurge:
1728 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1729 if (ret < 0)
1730 return ret;
1731
1732 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1733 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1734 if (ret < 0)
1735 return ret;
1736
1737 return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1738 }
1739
1740 static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
1741 u16 *new, u16 *old)
1742 {
1743 struct dsa_switch *ds = ps->ds;
1744 u16 upper_mask;
1745 u16 fid;
1746 int ret;
1747
1748 if (mv88e6xxx_num_databases(ps) == 4096)
1749 upper_mask = 0xff;
1750 else if (mv88e6xxx_num_databases(ps) == 256)
1751 upper_mask = 0xf;
1752 else
1753 return -EOPNOTSUPP;
1754
1755 /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
1756 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
1757 if (ret < 0)
1758 return ret;
1759
1760 fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
1761
1762 if (new) {
1763 ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
1764 ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
1765
1766 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN,
1767 ret);
1768 if (ret < 0)
1769 return ret;
1770 }
1771
1772 /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
1773 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1);
1774 if (ret < 0)
1775 return ret;
1776
1777 fid |= (ret & upper_mask) << 4;
1778
1779 if (new) {
1780 ret &= ~upper_mask;
1781 ret |= (*new >> 4) & upper_mask;
1782
1783 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1,
1784 ret);
1785 if (ret < 0)
1786 return ret;
1787
1788 netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid);
1789 }
1790
1791 if (old)
1792 *old = fid;
1793
1794 return 0;
1795 }
1796
1797 static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps,
1798 int port, u16 *fid)
1799 {
1800 return _mv88e6xxx_port_fid(ps, port, NULL, fid);
1801 }
1802
1803 static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps,
1804 int port, u16 fid)
1805 {
1806 return _mv88e6xxx_port_fid(ps, port, &fid, NULL);
1807 }
1808
1809 static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
1810 {
1811 DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
1812 struct mv88e6xxx_vtu_stu_entry vlan;
1813 int i, err;
1814
1815 bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
1816
1817 /* Set every FID bit used by the (un)bridged ports */
1818 for (i = 0; i < ps->info->num_ports; ++i) {
1819 err = _mv88e6xxx_port_fid_get(ps, i, fid);
1820 if (err)
1821 return err;
1822
1823 set_bit(*fid, fid_bitmap);
1824 }
1825
1826 /* Set every FID bit used by the VLAN entries */
1827 err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
1828 if (err)
1829 return err;
1830
1831 do {
1832 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
1833 if (err)
1834 return err;
1835
1836 if (!vlan.valid)
1837 break;
1838
1839 set_bit(vlan.fid, fid_bitmap);
1840 } while (vlan.vid < GLOBAL_VTU_VID_MASK);
1841
1842 /* The reset value 0x000 is used to indicate that multiple address
1843 * databases are not needed. Return the next positive available.
1844 */
1845 *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
1846 if (unlikely(*fid >= mv88e6xxx_num_databases(ps)))
1847 return -ENOSPC;
1848
1849 /* Clear the database */
1850 return _mv88e6xxx_atu_flush(ps, *fid, true);
1851 }
1852
1853 static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
1854 struct mv88e6xxx_vtu_stu_entry *entry)
1855 {
1856 struct dsa_switch *ds = ps->ds;
1857 struct mv88e6xxx_vtu_stu_entry vlan = {
1858 .valid = true,
1859 .vid = vid,
1860 };
1861 int i, err;
1862
1863 err = _mv88e6xxx_fid_new(ps, &vlan.fid);
1864 if (err)
1865 return err;
1866
1867 /* exclude all ports except the CPU and DSA ports */
1868 for (i = 0; i < ps->info->num_ports; ++i)
1869 vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
1870 ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
1871 : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1872
1873 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
1874 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) {
1875 struct mv88e6xxx_vtu_stu_entry vstp;
1876
1877 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1878 * implemented, only one STU entry is needed to cover all VTU
1879 * entries. Thus, validate the SID 0.
1880 */
1881 vlan.sid = 0;
1882 err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp);
1883 if (err)
1884 return err;
1885
1886 if (vstp.sid != vlan.sid || !vstp.valid) {
1887 memset(&vstp, 0, sizeof(vstp));
1888 vstp.valid = true;
1889 vstp.sid = vlan.sid;
1890
1891 err = _mv88e6xxx_stu_loadpurge(ps, &vstp);
1892 if (err)
1893 return err;
1894 }
1895 }
1896
1897 *entry = vlan;
1898 return 0;
1899 }
1900
1901 static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
1902 struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
1903 {
1904 int err;
1905
1906 if (!vid)
1907 return -EINVAL;
1908
1909 err = _mv88e6xxx_vtu_vid_write(ps, vid - 1);
1910 if (err)
1911 return err;
1912
1913 err = _mv88e6xxx_vtu_getnext(ps, entry);
1914 if (err)
1915 return err;
1916
1917 if (entry->vid != vid || !entry->valid) {
1918 if (!creat)
1919 return -EOPNOTSUPP;
1920 /* -ENOENT would've been more appropriate, but switchdev expects
1921 * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
1922 */
1923
1924 err = _mv88e6xxx_vtu_new(ps, vid, entry);
1925 }
1926
1927 return err;
1928 }
1929
1930 static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
1931 u16 vid_begin, u16 vid_end)
1932 {
1933 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1934 struct mv88e6xxx_vtu_stu_entry vlan;
1935 int i, err;
1936
1937 if (!vid_begin)
1938 return -EOPNOTSUPP;
1939
1940 mutex_lock(&ps->smi_mutex);
1941
1942 err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1);
1943 if (err)
1944 goto unlock;
1945
1946 do {
1947 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
1948 if (err)
1949 goto unlock;
1950
1951 if (!vlan.valid)
1952 break;
1953
1954 if (vlan.vid > vid_end)
1955 break;
1956
1957 for (i = 0; i < ps->info->num_ports; ++i) {
1958 if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
1959 continue;
1960
1961 if (vlan.data[i] ==
1962 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1963 continue;
1964
1965 if (ps->ports[i].bridge_dev ==
1966 ps->ports[port].bridge_dev)
1967 break; /* same bridge, check next VLAN */
1968
1969 netdev_warn(ds->ports[port],
1970 "hardware VLAN %d already used by %s\n",
1971 vlan.vid,
1972 netdev_name(ps->ports[i].bridge_dev));
1973 err = -EOPNOTSUPP;
1974 goto unlock;
1975 }
1976 } while (vlan.vid < vid_end);
1977
1978 unlock:
1979 mutex_unlock(&ps->smi_mutex);
1980
1981 return err;
1982 }
1983
1984 static const char * const mv88e6xxx_port_8021q_mode_names[] = {
1985 [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
1986 [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
1987 [PORT_CONTROL_2_8021Q_CHECK] = "Check",
1988 [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
1989 };
1990
1991 int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
1992 bool vlan_filtering)
1993 {
1994 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1995 u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
1996 PORT_CONTROL_2_8021Q_DISABLED;
1997 int ret;
1998
1999 mutex_lock(&ps->smi_mutex);
2000
2001 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2);
2002 if (ret < 0)
2003 goto unlock;
2004
2005 old = ret & PORT_CONTROL_2_8021Q_MASK;
2006
2007 if (new != old) {
2008 ret &= ~PORT_CONTROL_2_8021Q_MASK;
2009 ret |= new & PORT_CONTROL_2_8021Q_MASK;
2010
2011 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2,
2012 ret);
2013 if (ret < 0)
2014 goto unlock;
2015
2016 netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n",
2017 mv88e6xxx_port_8021q_mode_names[new],
2018 mv88e6xxx_port_8021q_mode_names[old]);
2019 }
2020
2021 ret = 0;
2022 unlock:
2023 mutex_unlock(&ps->smi_mutex);
2024
2025 return ret;
2026 }
2027
2028 int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
2029 const struct switchdev_obj_port_vlan *vlan,
2030 struct switchdev_trans *trans)
2031 {
2032 int err;
2033
2034 /* If the requested port doesn't belong to the same bridge as the VLAN
2035 * members, do not support it (yet) and fallback to software VLAN.
2036 */
2037 err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
2038 vlan->vid_end);
2039 if (err)
2040 return err;
2041
2042 /* We don't need any dynamic resource from the kernel (yet),
2043 * so skip the prepare phase.
2044 */
2045 return 0;
2046 }
2047
2048 static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
2049 u16 vid, bool untagged)
2050 {
2051 struct mv88e6xxx_vtu_stu_entry vlan;
2052 int err;
2053
2054 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true);
2055 if (err)
2056 return err;
2057
2058 vlan.data[port] = untagged ?
2059 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
2060 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
2061
2062 return _mv88e6xxx_vtu_loadpurge(ps, &vlan);
2063 }
2064
2065 void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
2066 const struct switchdev_obj_port_vlan *vlan,
2067 struct switchdev_trans *trans)
2068 {
2069 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2070 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2071 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2072 u16 vid;
2073
2074 mutex_lock(&ps->smi_mutex);
2075
2076 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
2077 if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged))
2078 netdev_err(ds->ports[port], "failed to add VLAN %d%c\n",
2079 vid, untagged ? 'u' : 't');
2080
2081 if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end))
2082 netdev_err(ds->ports[port], "failed to set PVID %d\n",
2083 vlan->vid_end);
2084
2085 mutex_unlock(&ps->smi_mutex);
2086 }
2087
2088 static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
2089 int port, u16 vid)
2090 {
2091 struct dsa_switch *ds = ps->ds;
2092 struct mv88e6xxx_vtu_stu_entry vlan;
2093 int i, err;
2094
2095 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
2096 if (err)
2097 return err;
2098
2099 /* Tell switchdev if this VLAN is handled in software */
2100 if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
2101 return -EOPNOTSUPP;
2102
2103 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
2104
2105 /* keep the VLAN unless all ports are excluded */
2106 vlan.valid = false;
2107 for (i = 0; i < ps->info->num_ports; ++i) {
2108 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
2109 continue;
2110
2111 if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
2112 vlan.valid = true;
2113 break;
2114 }
2115 }
2116
2117 err = _mv88e6xxx_vtu_loadpurge(ps, &vlan);
2118 if (err)
2119 return err;
2120
2121 return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false);
2122 }
2123
2124 int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
2125 const struct switchdev_obj_port_vlan *vlan)
2126 {
2127 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2128 u16 pvid, vid;
2129 int err = 0;
2130
2131 mutex_lock(&ps->smi_mutex);
2132
2133 err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
2134 if (err)
2135 goto unlock;
2136
2137 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
2138 err = _mv88e6xxx_port_vlan_del(ps, port, vid);
2139 if (err)
2140 goto unlock;
2141
2142 if (vid == pvid) {
2143 err = _mv88e6xxx_port_pvid_set(ps, port, 0);
2144 if (err)
2145 goto unlock;
2146 }
2147 }
2148
2149 unlock:
2150 mutex_unlock(&ps->smi_mutex);
2151
2152 return err;
2153 }
2154
2155 static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
2156 const unsigned char *addr)
2157 {
2158 int i, ret;
2159
2160 for (i = 0; i < 3; i++) {
2161 ret = _mv88e6xxx_reg_write(
2162 ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
2163 (addr[i * 2] << 8) | addr[i * 2 + 1]);
2164 if (ret < 0)
2165 return ret;
2166 }
2167
2168 return 0;
2169 }
2170
2171 static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
2172 unsigned char *addr)
2173 {
2174 int i, ret;
2175
2176 for (i = 0; i < 3; i++) {
2177 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
2178 GLOBAL_ATU_MAC_01 + i);
2179 if (ret < 0)
2180 return ret;
2181 addr[i * 2] = ret >> 8;
2182 addr[i * 2 + 1] = ret & 0xff;
2183 }
2184
2185 return 0;
2186 }
2187
2188 static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps,
2189 struct mv88e6xxx_atu_entry *entry)
2190 {
2191 int ret;
2192
2193 ret = _mv88e6xxx_atu_wait(ps);
2194 if (ret < 0)
2195 return ret;
2196
2197 ret = _mv88e6xxx_atu_mac_write(ps, entry->mac);
2198 if (ret < 0)
2199 return ret;
2200
2201 ret = _mv88e6xxx_atu_data_write(ps, entry);
2202 if (ret < 0)
2203 return ret;
2204
2205 return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
2206 }
2207
2208 static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
2209 const unsigned char *addr, u16 vid,
2210 u8 state)
2211 {
2212 struct mv88e6xxx_atu_entry entry = { 0 };
2213 struct mv88e6xxx_vtu_stu_entry vlan;
2214 int err;
2215
2216 /* Null VLAN ID corresponds to the port private database */
2217 if (vid == 0)
2218 err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid);
2219 else
2220 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
2221 if (err)
2222 return err;
2223
2224 entry.fid = vlan.fid;
2225 entry.state = state;
2226 ether_addr_copy(entry.mac, addr);
2227 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
2228 entry.trunk = false;
2229 entry.portv_trunkid = BIT(port);
2230 }
2231
2232 return _mv88e6xxx_atu_load(ps, &entry);
2233 }
2234
2235 int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
2236 const struct switchdev_obj_port_fdb *fdb,
2237 struct switchdev_trans *trans)
2238 {
2239 /* We don't need any dynamic resource from the kernel (yet),
2240 * so skip the prepare phase.
2241 */
2242 return 0;
2243 }
2244
2245 void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
2246 const struct switchdev_obj_port_fdb *fdb,
2247 struct switchdev_trans *trans)
2248 {
2249 int state = is_multicast_ether_addr(fdb->addr) ?
2250 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2251 GLOBAL_ATU_DATA_STATE_UC_STATIC;
2252 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2253
2254 mutex_lock(&ps->smi_mutex);
2255 if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state))
2256 netdev_err(ds->ports[port], "failed to load MAC address\n");
2257 mutex_unlock(&ps->smi_mutex);
2258 }
2259
2260 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
2261 const struct switchdev_obj_port_fdb *fdb)
2262 {
2263 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2264 int ret;
2265
2266 mutex_lock(&ps->smi_mutex);
2267 ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid,
2268 GLOBAL_ATU_DATA_STATE_UNUSED);
2269 mutex_unlock(&ps->smi_mutex);
2270
2271 return ret;
2272 }
2273
2274 static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
2275 struct mv88e6xxx_atu_entry *entry)
2276 {
2277 struct mv88e6xxx_atu_entry next = { 0 };
2278 int ret;
2279
2280 next.fid = fid;
2281
2282 ret = _mv88e6xxx_atu_wait(ps);
2283 if (ret < 0)
2284 return ret;
2285
2286 ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
2287 if (ret < 0)
2288 return ret;
2289
2290 ret = _mv88e6xxx_atu_mac_read(ps, next.mac);
2291 if (ret < 0)
2292 return ret;
2293
2294 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA);
2295 if (ret < 0)
2296 return ret;
2297
2298 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
2299 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
2300 unsigned int mask, shift;
2301
2302 if (ret & GLOBAL_ATU_DATA_TRUNK) {
2303 next.trunk = true;
2304 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
2305 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
2306 } else {
2307 next.trunk = false;
2308 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
2309 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
2310 }
2311
2312 next.portv_trunkid = (ret & mask) >> shift;
2313 }
2314
2315 *entry = next;
2316 return 0;
2317 }
2318
2319 static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
2320 u16 fid, u16 vid, int port,
2321 struct switchdev_obj_port_fdb *fdb,
2322 int (*cb)(struct switchdev_obj *obj))
2323 {
2324 struct mv88e6xxx_atu_entry addr = {
2325 .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
2326 };
2327 int err;
2328
2329 err = _mv88e6xxx_atu_mac_write(ps, addr.mac);
2330 if (err)
2331 return err;
2332
2333 do {
2334 err = _mv88e6xxx_atu_getnext(ps, fid, &addr);
2335 if (err)
2336 break;
2337
2338 if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
2339 break;
2340
2341 if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
2342 bool is_static = addr.state ==
2343 (is_multicast_ether_addr(addr.mac) ?
2344 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2345 GLOBAL_ATU_DATA_STATE_UC_STATIC);
2346
2347 fdb->vid = vid;
2348 ether_addr_copy(fdb->addr, addr.mac);
2349 fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
2350
2351 err = cb(&fdb->obj);
2352 if (err)
2353 break;
2354 }
2355 } while (!is_broadcast_ether_addr(addr.mac));
2356
2357 return err;
2358 }
2359
2360 int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
2361 struct switchdev_obj_port_fdb *fdb,
2362 int (*cb)(struct switchdev_obj *obj))
2363 {
2364 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2365 struct mv88e6xxx_vtu_stu_entry vlan = {
2366 .vid = GLOBAL_VTU_VID_MASK, /* all ones */
2367 };
2368 u16 fid;
2369 int err;
2370
2371 mutex_lock(&ps->smi_mutex);
2372
2373 /* Dump port's default Filtering Information Database (VLAN ID 0) */
2374 err = _mv88e6xxx_port_fid_get(ps, port, &fid);
2375 if (err)
2376 goto unlock;
2377
2378 err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb);
2379 if (err)
2380 goto unlock;
2381
2382 /* Dump VLANs' Filtering Information Databases */
2383 err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid);
2384 if (err)
2385 goto unlock;
2386
2387 do {
2388 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
2389 if (err)
2390 break;
2391
2392 if (!vlan.valid)
2393 break;
2394
2395 err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port,
2396 fdb, cb);
2397 if (err)
2398 break;
2399 } while (vlan.vid < GLOBAL_VTU_VID_MASK);
2400
2401 unlock:
2402 mutex_unlock(&ps->smi_mutex);
2403
2404 return err;
2405 }
2406
2407 int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2408 struct net_device *bridge)
2409 {
2410 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2411 int i, err = 0;
2412
2413 mutex_lock(&ps->smi_mutex);
2414
2415 /* Assign the bridge and remap each port's VLANTable */
2416 ps->ports[port].bridge_dev = bridge;
2417
2418 for (i = 0; i < ps->info->num_ports; ++i) {
2419 if (ps->ports[i].bridge_dev == bridge) {
2420 err = _mv88e6xxx_port_based_vlan_map(ps, i);
2421 if (err)
2422 break;
2423 }
2424 }
2425
2426 mutex_unlock(&ps->smi_mutex);
2427
2428 return err;
2429 }
2430
2431 void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
2432 {
2433 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2434 struct net_device *bridge = ps->ports[port].bridge_dev;
2435 int i;
2436
2437 mutex_lock(&ps->smi_mutex);
2438
2439 /* Unassign the bridge and remap each port's VLANTable */
2440 ps->ports[port].bridge_dev = NULL;
2441
2442 for (i = 0; i < ps->info->num_ports; ++i)
2443 if (i == port || ps->ports[i].bridge_dev == bridge)
2444 if (_mv88e6xxx_port_based_vlan_map(ps, i))
2445 netdev_warn(ds->ports[i], "failed to remap\n");
2446
2447 mutex_unlock(&ps->smi_mutex);
2448 }
2449
2450 static void mv88e6xxx_bridge_work(struct work_struct *work)
2451 {
2452 struct mv88e6xxx_priv_state *ps;
2453 struct dsa_switch *ds;
2454 int port;
2455
2456 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
2457 ds = ps->ds;
2458
2459 mutex_lock(&ps->smi_mutex);
2460
2461 for (port = 0; port < ps->info->num_ports; ++port)
2462 if (test_and_clear_bit(port, ps->port_state_update_mask) &&
2463 _mv88e6xxx_port_state(ps, port, ps->ports[port].state))
2464 netdev_warn(ds->ports[port],
2465 "failed to update state to %s\n",
2466 mv88e6xxx_port_state_names[ps->ports[port].state]);
2467
2468 mutex_unlock(&ps->smi_mutex);
2469 }
2470
2471 static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps,
2472 int port, int page, int reg, int val)
2473 {
2474 int ret;
2475
2476 ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
2477 if (ret < 0)
2478 goto restore_page_0;
2479
2480 ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val);
2481 restore_page_0:
2482 _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
2483
2484 return ret;
2485 }
2486
2487 static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps,
2488 int port, int page, int reg)
2489 {
2490 int ret;
2491
2492 ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
2493 if (ret < 0)
2494 goto restore_page_0;
2495
2496 ret = _mv88e6xxx_phy_read_indirect(ps, port, reg);
2497 restore_page_0:
2498 _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
2499
2500 return ret;
2501 }
2502
2503 static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
2504 {
2505 int ret;
2506
2507 ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
2508 MII_BMCR);
2509 if (ret < 0)
2510 return ret;
2511
2512 if (ret & BMCR_PDOWN) {
2513 ret &= ~BMCR_PDOWN;
2514 ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES,
2515 PAGE_FIBER_SERDES, MII_BMCR,
2516 ret);
2517 }
2518
2519 return ret;
2520 }
2521
2522 static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2523 {
2524 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2525 int ret;
2526 u16 reg;
2527
2528 mutex_lock(&ps->smi_mutex);
2529
2530 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2531 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2532 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
2533 mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) {
2534 /* MAC Forcing register: don't force link, speed,
2535 * duplex or flow control state to any particular
2536 * values on physical ports, but force the CPU port
2537 * and all DSA ports to their maximum bandwidth and
2538 * full duplex.
2539 */
2540 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
2541 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
2542 reg &= ~PORT_PCS_CTRL_UNFORCED;
2543 reg |= PORT_PCS_CTRL_FORCE_LINK |
2544 PORT_PCS_CTRL_LINK_UP |
2545 PORT_PCS_CTRL_DUPLEX_FULL |
2546 PORT_PCS_CTRL_FORCE_DUPLEX;
2547 if (mv88e6xxx_6065_family(ps))
2548 reg |= PORT_PCS_CTRL_100;
2549 else
2550 reg |= PORT_PCS_CTRL_1000;
2551 } else {
2552 reg |= PORT_PCS_CTRL_UNFORCED;
2553 }
2554
2555 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2556 PORT_PCS_CTRL, reg);
2557 if (ret)
2558 goto abort;
2559 }
2560
2561 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2562 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2563 * tunneling, determine priority by looking at 802.1p and IP
2564 * priority fields (IP prio has precedence), and set STP state
2565 * to Forwarding.
2566 *
2567 * If this is the CPU link, use DSA or EDSA tagging depending
2568 * on which tagging mode was configured.
2569 *
2570 * If this is a link to another switch, use DSA tagging mode.
2571 *
2572 * If this is the upstream port for this switch, enable
2573 * forwarding of unknown unicasts and multicasts.
2574 */
2575 reg = 0;
2576 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2577 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2578 mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
2579 mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps))
2580 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
2581 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
2582 PORT_CONTROL_STATE_FORWARDING;
2583 if (dsa_is_cpu_port(ds, port)) {
2584 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
2585 reg |= PORT_CONTROL_DSA_TAG;
2586 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2587 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2588 mv88e6xxx_6320_family(ps)) {
2589 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2590 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2591 else
2592 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2593 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2594 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2595 }
2596
2597 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2598 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2599 mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
2600 mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) {
2601 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2602 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
2603 }
2604 }
2605 if (dsa_is_dsa_port(ds, port)) {
2606 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
2607 reg |= PORT_CONTROL_DSA_TAG;
2608 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2609 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2610 mv88e6xxx_6320_family(ps)) {
2611 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2612 }
2613
2614 if (port == dsa_upstream_port(ds))
2615 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2616 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2617 }
2618 if (reg) {
2619 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2620 PORT_CONTROL, reg);
2621 if (ret)
2622 goto abort;
2623 }
2624
2625 /* If this port is connected to a SerDes, make sure the SerDes is not
2626 * powered down.
2627 */
2628 if (mv88e6xxx_6352_family(ps)) {
2629 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
2630 if (ret < 0)
2631 goto abort;
2632 ret &= PORT_STATUS_CMODE_MASK;
2633 if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
2634 (ret == PORT_STATUS_CMODE_1000BASE_X) ||
2635 (ret == PORT_STATUS_CMODE_SGMII)) {
2636 ret = mv88e6xxx_power_on_serdes(ps);
2637 if (ret < 0)
2638 goto abort;
2639 }
2640 }
2641
2642 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2643 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2644 * untagged frames on this port, do a destination address lookup on all
2645 * received packets as usual, disable ARP mirroring and don't send a
2646 * copy of all transmitted/received frames on this port to the CPU.
2647 */
2648 reg = 0;
2649 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2650 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2651 mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) ||
2652 mv88e6xxx_6185_family(ps))
2653 reg = PORT_CONTROL_2_MAP_DA;
2654
2655 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2656 mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps))
2657 reg |= PORT_CONTROL_2_JUMBO_10240;
2658
2659 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) {
2660 /* Set the upstream port this port should use */
2661 reg |= dsa_upstream_port(ds);
2662 /* enable forwarding of unknown multicast addresses to
2663 * the upstream port
2664 */
2665 if (port == dsa_upstream_port(ds))
2666 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
2667 }
2668
2669 reg |= PORT_CONTROL_2_8021Q_DISABLED;
2670
2671 if (reg) {
2672 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2673 PORT_CONTROL_2, reg);
2674 if (ret)
2675 goto abort;
2676 }
2677
2678 /* Port Association Vector: when learning source addresses
2679 * of packets, add the address to the address database using
2680 * a port bitmap that has only the bit for this port set and
2681 * the other bits clear.
2682 */
2683 reg = 1 << port;
2684 /* Disable learning for CPU port */
2685 if (dsa_is_cpu_port(ds, port))
2686 reg = 0;
2687
2688 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
2689 if (ret)
2690 goto abort;
2691
2692 /* Egress rate control 2: disable egress rate control. */
2693 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2,
2694 0x0000);
2695 if (ret)
2696 goto abort;
2697
2698 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2699 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2700 mv88e6xxx_6320_family(ps)) {
2701 /* Do not limit the period of time that this port can
2702 * be paused for by the remote end or the period of
2703 * time that this port can pause the remote end.
2704 */
2705 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2706 PORT_PAUSE_CTRL, 0x0000);
2707 if (ret)
2708 goto abort;
2709
2710 /* Port ATU control: disable limiting the number of
2711 * address database entries that this port is allowed
2712 * to use.
2713 */
2714 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2715 PORT_ATU_CONTROL, 0x0000);
2716 /* Priority Override: disable DA, SA and VTU priority
2717 * override.
2718 */
2719 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2720 PORT_PRI_OVERRIDE, 0x0000);
2721 if (ret)
2722 goto abort;
2723
2724 /* Port Ethertype: use the Ethertype DSA Ethertype
2725 * value.
2726 */
2727 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2728 PORT_ETH_TYPE, ETH_P_EDSA);
2729 if (ret)
2730 goto abort;
2731 /* Tag Remap: use an identity 802.1p prio -> switch
2732 * prio mapping.
2733 */
2734 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2735 PORT_TAG_REGMAP_0123, 0x3210);
2736 if (ret)
2737 goto abort;
2738
2739 /* Tag Remap 2: use an identity 802.1p prio -> switch
2740 * prio mapping.
2741 */
2742 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2743 PORT_TAG_REGMAP_4567, 0x7654);
2744 if (ret)
2745 goto abort;
2746 }
2747
2748 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2749 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2750 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
2751 mv88e6xxx_6320_family(ps)) {
2752 /* Rate Control: disable ingress rate limiting. */
2753 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2754 PORT_RATE_CONTROL, 0x0001);
2755 if (ret)
2756 goto abort;
2757 }
2758
2759 /* Port Control 1: disable trunking, disable sending
2760 * learning messages to this port.
2761 */
2762 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000);
2763 if (ret)
2764 goto abort;
2765
2766 /* Port based VLAN map: give each port the same default address
2767 * database, and allow bidirectional communication between the
2768 * CPU and DSA port(s), and the other ports.
2769 */
2770 ret = _mv88e6xxx_port_fid_set(ps, port, 0);
2771 if (ret)
2772 goto abort;
2773
2774 ret = _mv88e6xxx_port_based_vlan_map(ps, port);
2775 if (ret)
2776 goto abort;
2777
2778 /* Default VLAN ID and priority: don't set a default VLAN
2779 * ID, and set the default packet priority to zero.
2780 */
2781 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN,
2782 0x0000);
2783 abort:
2784 mutex_unlock(&ps->smi_mutex);
2785 return ret;
2786 }
2787
2788 int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2789 {
2790 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2791 int ret;
2792 int i;
2793
2794 for (i = 0; i < ps->info->num_ports; i++) {
2795 ret = mv88e6xxx_setup_port(ds, i);
2796 if (ret < 0)
2797 return ret;
2798 }
2799 return 0;
2800 }
2801
2802 int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps)
2803 {
2804 mutex_init(&ps->smi_mutex);
2805
2806 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
2807
2808 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
2809 mutex_init(&ps->eeprom_mutex);
2810
2811 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
2812 mv88e6xxx_ppu_state_init(ps);
2813
2814 return 0;
2815 }
2816
2817 int mv88e6xxx_setup_global(struct dsa_switch *ds)
2818 {
2819 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2820 int err;
2821 int i;
2822
2823 mutex_lock(&ps->smi_mutex);
2824 /* Set the default address aging time to 5 minutes, and
2825 * enable address learn messages to be sent to all message
2826 * ports.
2827 */
2828 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
2829 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2830 if (err)
2831 goto unlock;
2832
2833 /* Configure the IP ToS mapping registers. */
2834 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2835 if (err)
2836 goto unlock;
2837 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2838 if (err)
2839 goto unlock;
2840 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2841 if (err)
2842 goto unlock;
2843 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2844 if (err)
2845 goto unlock;
2846 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2847 if (err)
2848 goto unlock;
2849 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2850 if (err)
2851 goto unlock;
2852 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2853 if (err)
2854 goto unlock;
2855 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2856 if (err)
2857 goto unlock;
2858
2859 /* Configure the IEEE 802.1p priority mapping register. */
2860 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2861 if (err)
2862 goto unlock;
2863
2864 /* Send all frames with destination addresses matching
2865 * 01:80:c2:00:00:0x to the CPU port.
2866 */
2867 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2868 if (err)
2869 goto unlock;
2870
2871 /* Ignore removed tag data on doubly tagged packets, disable
2872 * flow control messages, force flow control priority to the
2873 * highest, and send all special multicast frames to the CPU
2874 * port at the highest priority.
2875 */
2876 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2877 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
2878 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
2879 if (err)
2880 goto unlock;
2881
2882 /* Program the DSA routing table. */
2883 for (i = 0; i < 32; i++) {
2884 int nexthop = 0x1f;
2885
2886 if (ds->pd->rtable &&
2887 i != ds->index && i < ds->dst->pd->nr_chips)
2888 nexthop = ds->pd->rtable[i] & 0x1f;
2889
2890 err = _mv88e6xxx_reg_write(
2891 ps, REG_GLOBAL2,
2892 GLOBAL2_DEVICE_MAPPING,
2893 GLOBAL2_DEVICE_MAPPING_UPDATE |
2894 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop);
2895 if (err)
2896 goto unlock;
2897 }
2898
2899 /* Clear all trunk masks. */
2900 for (i = 0; i < 8; i++) {
2901 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2902 0x8000 |
2903 (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2904 ((1 << ps->info->num_ports) - 1));
2905 if (err)
2906 goto unlock;
2907 }
2908
2909 /* Clear all trunk mappings. */
2910 for (i = 0; i < 16; i++) {
2911 err = _mv88e6xxx_reg_write(
2912 ps, REG_GLOBAL2,
2913 GLOBAL2_TRUNK_MAPPING,
2914 GLOBAL2_TRUNK_MAPPING_UPDATE |
2915 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2916 if (err)
2917 goto unlock;
2918 }
2919
2920 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2921 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2922 mv88e6xxx_6320_family(ps)) {
2923 /* Send all frames with destination addresses matching
2924 * 01:80:c2:00:00:2x to the CPU port.
2925 */
2926 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2927 GLOBAL2_MGMT_EN_2X, 0xffff);
2928 if (err)
2929 goto unlock;
2930
2931 /* Initialise cross-chip port VLAN table to reset
2932 * defaults.
2933 */
2934 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2935 GLOBAL2_PVT_ADDR, 0x9000);
2936 if (err)
2937 goto unlock;
2938
2939 /* Clear the priority override table. */
2940 for (i = 0; i < 16; i++) {
2941 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2942 GLOBAL2_PRIO_OVERRIDE,
2943 0x8000 | (i << 8));
2944 if (err)
2945 goto unlock;
2946 }
2947 }
2948
2949 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2950 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2951 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
2952 mv88e6xxx_6320_family(ps)) {
2953 /* Disable ingress rate limiting by resetting all
2954 * ingress rate limit registers to their initial
2955 * state.
2956 */
2957 for (i = 0; i < ps->info->num_ports; i++) {
2958 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2959 GLOBAL2_INGRESS_OP,
2960 0x9000 | (i << 8));
2961 if (err)
2962 goto unlock;
2963 }
2964 }
2965
2966 /* Clear the statistics counters for all ports */
2967 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
2968 GLOBAL_STATS_OP_FLUSH_ALL);
2969 if (err)
2970 goto unlock;
2971
2972 /* Wait for the flush to complete. */
2973 err = _mv88e6xxx_stats_wait(ps);
2974 if (err < 0)
2975 goto unlock;
2976
2977 /* Clear all ATU entries */
2978 err = _mv88e6xxx_atu_flush(ps, 0, true);
2979 if (err < 0)
2980 goto unlock;
2981
2982 /* Clear all the VTU and STU entries */
2983 err = _mv88e6xxx_vtu_stu_flush(ps);
2984 unlock:
2985 mutex_unlock(&ps->smi_mutex);
2986
2987 return err;
2988 }
2989
2990 int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps, bool ppu_active)
2991 {
2992 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2993 struct gpio_desc *gpiod = ps->ds->pd->reset;
2994 unsigned long timeout;
2995 int ret;
2996 int i;
2997
2998 mutex_lock(&ps->smi_mutex);
2999
3000 /* Set all ports to the disabled state. */
3001 for (i = 0; i < ps->info->num_ports; i++) {
3002 ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL);
3003 if (ret < 0)
3004 goto unlock;
3005
3006 ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL,
3007 ret & 0xfffc);
3008 if (ret)
3009 goto unlock;
3010 }
3011
3012 /* Wait for transmit queues to drain. */
3013 usleep_range(2000, 4000);
3014
3015 /* If there is a gpio connected to the reset pin, toggle it */
3016 if (gpiod) {
3017 gpiod_set_value_cansleep(gpiod, 1);
3018 usleep_range(10000, 20000);
3019 gpiod_set_value_cansleep(gpiod, 0);
3020 usleep_range(10000, 20000);
3021 }
3022
3023 /* Reset the switch. Keep the PPU active if requested. The PPU
3024 * needs to be active to support indirect phy register access
3025 * through global registers 0x18 and 0x19.
3026 */
3027 if (ppu_active)
3028 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000);
3029 else
3030 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400);
3031 if (ret)
3032 goto unlock;
3033
3034 /* Wait up to one second for reset to complete. */
3035 timeout = jiffies + 1 * HZ;
3036 while (time_before(jiffies, timeout)) {
3037 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00);
3038 if (ret < 0)
3039 goto unlock;
3040
3041 if ((ret & is_reset) == is_reset)
3042 break;
3043 usleep_range(1000, 2000);
3044 }
3045 if (time_after(jiffies, timeout))
3046 ret = -ETIMEDOUT;
3047 else
3048 ret = 0;
3049 unlock:
3050 mutex_unlock(&ps->smi_mutex);
3051
3052 return ret;
3053 }
3054
3055 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
3056 {
3057 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3058 int ret;
3059
3060 mutex_lock(&ps->smi_mutex);
3061 ret = _mv88e6xxx_phy_page_read(ps, port, page, reg);
3062 mutex_unlock(&ps->smi_mutex);
3063
3064 return ret;
3065 }
3066
3067 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
3068 int reg, int val)
3069 {
3070 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3071 int ret;
3072
3073 mutex_lock(&ps->smi_mutex);
3074 ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val);
3075 mutex_unlock(&ps->smi_mutex);
3076
3077 return ret;
3078 }
3079
3080 static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps,
3081 int port)
3082 {
3083 if (port >= 0 && port < ps->info->num_ports)
3084 return port;
3085 return -EINVAL;
3086 }
3087
3088 int
3089 mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
3090 {
3091 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3092 int addr = mv88e6xxx_port_to_phy_addr(ps, port);
3093 int ret;
3094
3095 if (addr < 0)
3096 return 0xffff;
3097
3098 mutex_lock(&ps->smi_mutex);
3099
3100 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
3101 ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum);
3102 else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
3103 ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum);
3104 else
3105 ret = _mv88e6xxx_phy_read(ps, addr, regnum);
3106
3107 mutex_unlock(&ps->smi_mutex);
3108 return ret;
3109 }
3110
3111 int
3112 mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
3113 {
3114 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3115 int addr = mv88e6xxx_port_to_phy_addr(ps, port);
3116 int ret;
3117
3118 if (addr < 0)
3119 return 0xffff;
3120
3121 mutex_lock(&ps->smi_mutex);
3122
3123 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
3124 ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val);
3125 else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
3126 ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val);
3127 else
3128 ret = _mv88e6xxx_phy_write(ps, addr, regnum, val);
3129
3130 mutex_unlock(&ps->smi_mutex);
3131 return ret;
3132 }
3133
3134 #ifdef CONFIG_NET_DSA_HWMON
3135
3136 static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
3137 {
3138 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3139 int ret;
3140 int val;
3141
3142 *temp = 0;
3143
3144 mutex_lock(&ps->smi_mutex);
3145
3146 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6);
3147 if (ret < 0)
3148 goto error;
3149
3150 /* Enable temperature sensor */
3151 ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
3152 if (ret < 0)
3153 goto error;
3154
3155 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5));
3156 if (ret < 0)
3157 goto error;
3158
3159 /* Wait for temperature to stabilize */
3160 usleep_range(10000, 12000);
3161
3162 val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
3163 if (val < 0) {
3164 ret = val;
3165 goto error;
3166 }
3167
3168 /* Disable temperature sensor */
3169 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5));
3170 if (ret < 0)
3171 goto error;
3172
3173 *temp = ((val & 0x1f) - 5) * 5;
3174
3175 error:
3176 _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0);
3177 mutex_unlock(&ps->smi_mutex);
3178 return ret;
3179 }
3180
3181 static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
3182 {
3183 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3184 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3185 int ret;
3186
3187 *temp = 0;
3188
3189 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
3190 if (ret < 0)
3191 return ret;
3192
3193 *temp = (ret & 0xff) - 25;
3194
3195 return 0;
3196 }
3197
3198 int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
3199 {
3200 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3201
3202 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP))
3203 return -EOPNOTSUPP;
3204
3205 if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
3206 return mv88e63xx_get_temp(ds, temp);
3207
3208 return mv88e61xx_get_temp(ds, temp);
3209 }
3210
3211 int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
3212 {
3213 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3214 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3215 int ret;
3216
3217 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3218 return -EOPNOTSUPP;
3219
3220 *temp = 0;
3221
3222 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3223 if (ret < 0)
3224 return ret;
3225
3226 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
3227
3228 return 0;
3229 }
3230
3231 int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
3232 {
3233 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3234 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3235 int ret;
3236
3237 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3238 return -EOPNOTSUPP;
3239
3240 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3241 if (ret < 0)
3242 return ret;
3243 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
3244 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
3245 (ret & 0xe0ff) | (temp << 8));
3246 }
3247
3248 int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
3249 {
3250 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3251 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3252 int ret;
3253
3254 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3255 return -EOPNOTSUPP;
3256
3257 *alarm = false;
3258
3259 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3260 if (ret < 0)
3261 return ret;
3262
3263 *alarm = !!(ret & 0x40);
3264
3265 return 0;
3266 }
3267 #endif /* CONFIG_NET_DSA_HWMON */
3268
3269 static const struct mv88e6xxx_info *
3270 mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table,
3271 unsigned int num)
3272 {
3273 int i;
3274
3275 for (i = 0; i < num; ++i)
3276 if (table[i].prod_num == prod_num)
3277 return &table[i];
3278
3279 return NULL;
3280 }
3281
3282 const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev,
3283 int sw_addr, void **priv,
3284 const struct mv88e6xxx_info *table,
3285 unsigned int num)
3286 {
3287 const struct mv88e6xxx_info *info;
3288 struct mv88e6xxx_priv_state *ps;
3289 struct mii_bus *bus;
3290 const char *name;
3291 int id, prod_num, rev;
3292
3293 bus = dsa_host_dev_to_mii_bus(host_dev);
3294 if (!bus)
3295 return NULL;
3296
3297 id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
3298 if (id < 0)
3299 return NULL;
3300
3301 prod_num = (id & 0xfff0) >> 4;
3302 rev = id & 0x000f;
3303
3304 info = mv88e6xxx_lookup_info(prod_num, table, num);
3305 if (!info)
3306 return NULL;
3307
3308 name = info->name;
3309
3310 ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL);
3311 if (!ps)
3312 return NULL;
3313
3314 ps->bus = bus;
3315 ps->sw_addr = sw_addr;
3316 ps->info = info;
3317
3318 *priv = ps;
3319
3320 dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n",
3321 prod_num, name, rev);
3322
3323 return name;
3324 }
3325
3326 static int __init mv88e6xxx_init(void)
3327 {
3328 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
3329 register_switch_driver(&mv88e6131_switch_driver);
3330 #endif
3331 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
3332 register_switch_driver(&mv88e6123_switch_driver);
3333 #endif
3334 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
3335 register_switch_driver(&mv88e6352_switch_driver);
3336 #endif
3337 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
3338 register_switch_driver(&mv88e6171_switch_driver);
3339 #endif
3340 return 0;
3341 }
3342 module_init(mv88e6xxx_init);
3343
3344 static void __exit mv88e6xxx_cleanup(void)
3345 {
3346 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
3347 unregister_switch_driver(&mv88e6171_switch_driver);
3348 #endif
3349 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
3350 unregister_switch_driver(&mv88e6352_switch_driver);
3351 #endif
3352 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
3353 unregister_switch_driver(&mv88e6123_switch_driver);
3354 #endif
3355 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
3356 unregister_switch_driver(&mv88e6131_switch_driver);
3357 #endif
3358 }
3359 module_exit(mv88e6xxx_cleanup);
3360
3361 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
3362 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
3363 MODULE_LICENSE("GPL");
This page took 0.096112 seconds and 4 git commands to generate.