2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/phy.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
21 #include "thunder_bgx.h"
23 #define DRV_NAME "thunder-BGX"
24 #define DRV_VERSION "1.0"
29 unsigned char mac
[ETH_ALEN
];
31 int lmacid
; /* ID within BGX */
32 int lmacid_bd
; /* ID on board */
33 struct net_device netdev
;
34 struct phy_device
*phydev
;
35 unsigned int last_duplex
;
36 unsigned int last_link
;
37 unsigned int last_speed
;
39 struct delayed_work dwork
;
40 struct workqueue_struct
*check_link
;
46 struct lmac lmac
[MAX_LMAC_PER_BGX
];
51 void __iomem
*reg_base
;
55 static struct bgx
*bgx_vnic
[MAX_BGX_THUNDER
];
56 static int lmac_count
; /* Total no of LMACs in system */
58 static int bgx_xaui_check_link(struct lmac
*lmac
);
60 /* Supported devices */
61 static const struct pci_device_id bgx_id_table
[] = {
62 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVICE_ID_THUNDER_BGX
) },
63 { 0, } /* end of table */
66 MODULE_AUTHOR("Cavium Inc");
67 MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
68 MODULE_LICENSE("GPL v2");
69 MODULE_VERSION(DRV_VERSION
);
70 MODULE_DEVICE_TABLE(pci
, bgx_id_table
);
72 /* The Cavium ThunderX network controller can *only* be found in SoCs
73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
74 * registers on this platform are implicitly strongly ordered with respect
75 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
76 * with no memory barriers in this driver. The readq()/writeq() functions add
77 * explicit ordering operation which in this case are redundant, and only
81 /* Register read/write APIs */
82 static u64
bgx_reg_read(struct bgx
*bgx
, u8 lmac
, u64 offset
)
84 void __iomem
*addr
= bgx
->reg_base
+ ((u32
)lmac
<< 20) + offset
;
86 return readq_relaxed(addr
);
89 static void bgx_reg_write(struct bgx
*bgx
, u8 lmac
, u64 offset
, u64 val
)
91 void __iomem
*addr
= bgx
->reg_base
+ ((u32
)lmac
<< 20) + offset
;
93 writeq_relaxed(val
, addr
);
96 static void bgx_reg_modify(struct bgx
*bgx
, u8 lmac
, u64 offset
, u64 val
)
98 void __iomem
*addr
= bgx
->reg_base
+ ((u32
)lmac
<< 20) + offset
;
100 writeq_relaxed(val
| readq_relaxed(addr
), addr
);
103 static int bgx_poll_reg(struct bgx
*bgx
, u8 lmac
, u64 reg
, u64 mask
, bool zero
)
109 reg_val
= bgx_reg_read(bgx
, lmac
, reg
);
110 if (zero
&& !(reg_val
& mask
))
112 if (!zero
&& (reg_val
& mask
))
114 usleep_range(1000, 2000);
120 /* Return number of BGX present in HW */
121 unsigned bgx_get_map(int node
)
126 for (i
= 0; i
< MAX_BGX_PER_CN88XX
; i
++) {
127 if (bgx_vnic
[(node
* MAX_BGX_PER_CN88XX
) + i
])
133 EXPORT_SYMBOL(bgx_get_map
);
135 /* Return number of LMAC configured for this BGX */
136 int bgx_get_lmac_count(int node
, int bgx_idx
)
140 bgx
= bgx_vnic
[(node
* MAX_BGX_PER_CN88XX
) + bgx_idx
];
142 return bgx
->lmac_count
;
146 EXPORT_SYMBOL(bgx_get_lmac_count
);
148 /* Returns the current link status of LMAC */
149 void bgx_get_lmac_link_state(int node
, int bgx_idx
, int lmacid
, void *status
)
151 struct bgx_link_status
*link
= (struct bgx_link_status
*)status
;
155 bgx
= bgx_vnic
[(node
* MAX_BGX_PER_CN88XX
) + bgx_idx
];
159 lmac
= &bgx
->lmac
[lmacid
];
160 link
->link_up
= lmac
->link_up
;
161 link
->duplex
= lmac
->last_duplex
;
162 link
->speed
= lmac
->last_speed
;
164 EXPORT_SYMBOL(bgx_get_lmac_link_state
);
166 const u8
*bgx_get_lmac_mac(int node
, int bgx_idx
, int lmacid
)
168 struct bgx
*bgx
= bgx_vnic
[(node
* MAX_BGX_PER_CN88XX
) + bgx_idx
];
171 return bgx
->lmac
[lmacid
].mac
;
175 EXPORT_SYMBOL(bgx_get_lmac_mac
);
177 void bgx_set_lmac_mac(int node
, int bgx_idx
, int lmacid
, const u8
*mac
)
179 struct bgx
*bgx
= bgx_vnic
[(node
* MAX_BGX_PER_CN88XX
) + bgx_idx
];
184 ether_addr_copy(bgx
->lmac
[lmacid
].mac
, mac
);
186 EXPORT_SYMBOL(bgx_set_lmac_mac
);
188 static void bgx_sgmii_change_link_state(struct lmac
*lmac
)
190 struct bgx
*bgx
= lmac
->bgx
;
195 cmr_cfg
= bgx_reg_read(bgx
, lmac
->lmacid
, BGX_CMRX_CFG
);
197 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_CMRX_CFG
, cmr_cfg
);
199 port_cfg
= bgx_reg_read(bgx
, lmac
->lmacid
, BGX_GMP_GMI_PRTX_CFG
);
200 misc_ctl
= bgx_reg_read(bgx
, lmac
->lmacid
, BGX_GMP_PCS_MISCX_CTL
);
203 misc_ctl
&= ~PCS_MISC_CTL_GMX_ENO
;
204 port_cfg
&= ~GMI_PORT_CFG_DUPLEX
;
205 port_cfg
|= (lmac
->last_duplex
<< 2);
207 misc_ctl
|= PCS_MISC_CTL_GMX_ENO
;
210 switch (lmac
->last_speed
) {
212 port_cfg
&= ~GMI_PORT_CFG_SPEED
; /* speed 0 */
213 port_cfg
|= GMI_PORT_CFG_SPEED_MSB
; /* speed_msb 1 */
214 port_cfg
&= ~GMI_PORT_CFG_SLOT_TIME
; /* slottime 0 */
215 misc_ctl
&= ~PCS_MISC_CTL_SAMP_PT_MASK
;
216 misc_ctl
|= 50; /* samp_pt */
217 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_TXX_SLOT
, 64);
218 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_TXX_BURST
, 0);
221 port_cfg
&= ~GMI_PORT_CFG_SPEED
; /* speed 0 */
222 port_cfg
&= ~GMI_PORT_CFG_SPEED_MSB
; /* speed_msb 0 */
223 port_cfg
&= ~GMI_PORT_CFG_SLOT_TIME
; /* slottime 0 */
224 misc_ctl
&= ~PCS_MISC_CTL_SAMP_PT_MASK
;
225 misc_ctl
|= 5; /* samp_pt */
226 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_TXX_SLOT
, 64);
227 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_TXX_BURST
, 0);
230 port_cfg
|= GMI_PORT_CFG_SPEED
; /* speed 1 */
231 port_cfg
&= ~GMI_PORT_CFG_SPEED_MSB
; /* speed_msb 0 */
232 port_cfg
|= GMI_PORT_CFG_SLOT_TIME
; /* slottime 1 */
233 misc_ctl
&= ~PCS_MISC_CTL_SAMP_PT_MASK
;
234 misc_ctl
|= 1; /* samp_pt */
235 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_TXX_SLOT
, 512);
236 if (lmac
->last_duplex
)
237 bgx_reg_write(bgx
, lmac
->lmacid
,
238 BGX_GMP_GMI_TXX_BURST
, 0);
240 bgx_reg_write(bgx
, lmac
->lmacid
,
241 BGX_GMP_GMI_TXX_BURST
, 8192);
246 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_PCS_MISCX_CTL
, misc_ctl
);
247 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_PRTX_CFG
, port_cfg
);
249 port_cfg
= bgx_reg_read(bgx
, lmac
->lmacid
, BGX_GMP_GMI_PRTX_CFG
);
253 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_CMRX_CFG
, cmr_cfg
);
256 static void bgx_lmac_handler(struct net_device
*netdev
)
258 struct lmac
*lmac
= container_of(netdev
, struct lmac
, netdev
);
259 struct phy_device
*phydev
= lmac
->phydev
;
260 int link_changed
= 0;
265 if (!phydev
->link
&& lmac
->last_link
)
269 (lmac
->last_duplex
!= phydev
->duplex
||
270 lmac
->last_link
!= phydev
->link
||
271 lmac
->last_speed
!= phydev
->speed
)) {
275 lmac
->last_link
= phydev
->link
;
276 lmac
->last_speed
= phydev
->speed
;
277 lmac
->last_duplex
= phydev
->duplex
;
282 if (link_changed
> 0)
283 lmac
->link_up
= true;
285 lmac
->link_up
= false;
288 bgx_sgmii_change_link_state(lmac
);
290 bgx_xaui_check_link(lmac
);
293 u64
bgx_get_rx_stats(int node
, int bgx_idx
, int lmac
, int idx
)
297 bgx
= bgx_vnic
[(node
* MAX_BGX_PER_CN88XX
) + bgx_idx
];
303 return bgx_reg_read(bgx
, lmac
, BGX_CMRX_RX_STAT0
+ (idx
* 8));
305 EXPORT_SYMBOL(bgx_get_rx_stats
);
307 u64
bgx_get_tx_stats(int node
, int bgx_idx
, int lmac
, int idx
)
311 bgx
= bgx_vnic
[(node
* MAX_BGX_PER_CN88XX
) + bgx_idx
];
315 return bgx_reg_read(bgx
, lmac
, BGX_CMRX_TX_STAT0
+ (idx
* 8));
317 EXPORT_SYMBOL(bgx_get_tx_stats
);
319 static void bgx_flush_dmac_addrs(struct bgx
*bgx
, int lmac
)
323 while (bgx
->lmac
[lmac
].dmac
> 0) {
324 offset
= ((bgx
->lmac
[lmac
].dmac
- 1) * sizeof(u64
)) +
325 (lmac
* MAX_DMAC_PER_LMAC
* sizeof(u64
));
326 bgx_reg_write(bgx
, 0, BGX_CMR_RX_DMACX_CAM
+ offset
, 0);
327 bgx
->lmac
[lmac
].dmac
--;
331 static int bgx_lmac_sgmii_init(struct bgx
*bgx
, int lmacid
)
335 bgx_reg_modify(bgx
, lmacid
, BGX_GMP_GMI_TXX_THRESH
, 0x30);
336 /* max packet size */
337 bgx_reg_modify(bgx
, lmacid
, BGX_GMP_GMI_RXX_JABBER
, MAX_FRAME_SIZE
);
339 /* Disable frame alignment if using preamble */
340 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_GMP_GMI_TXX_APPEND
);
342 bgx_reg_write(bgx
, lmacid
, BGX_GMP_GMI_TXX_SGMII_CTL
, 0);
345 bgx_reg_modify(bgx
, lmacid
, BGX_CMRX_CFG
, CMR_EN
);
348 bgx_reg_modify(bgx
, lmacid
, BGX_GMP_PCS_MRX_CTL
, PCS_MRX_CTL_RESET
);
349 if (bgx_poll_reg(bgx
, lmacid
, BGX_GMP_PCS_MRX_CTL
,
350 PCS_MRX_CTL_RESET
, true)) {
351 dev_err(&bgx
->pdev
->dev
, "BGX PCS reset not completed\n");
355 /* power down, reset autoneg, autoneg enable */
356 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_GMP_PCS_MRX_CTL
);
357 cfg
&= ~PCS_MRX_CTL_PWR_DN
;
358 cfg
|= (PCS_MRX_CTL_RST_AN
| PCS_MRX_CTL_AN_EN
);
359 bgx_reg_write(bgx
, lmacid
, BGX_GMP_PCS_MRX_CTL
, cfg
);
361 if (bgx_poll_reg(bgx
, lmacid
, BGX_GMP_PCS_MRX_STATUS
,
362 PCS_MRX_STATUS_AN_CPT
, false)) {
363 dev_err(&bgx
->pdev
->dev
, "BGX AN_CPT not completed\n");
370 static int bgx_lmac_xaui_init(struct bgx
*bgx
, int lmacid
, int lmac_type
)
375 bgx_reg_modify(bgx
, lmacid
, BGX_SPUX_CONTROL1
, SPU_CTL_RESET
);
376 if (bgx_poll_reg(bgx
, lmacid
, BGX_SPUX_CONTROL1
, SPU_CTL_RESET
, true)) {
377 dev_err(&bgx
->pdev
->dev
, "BGX SPU reset not completed\n");
382 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_CMRX_CFG
);
384 bgx_reg_write(bgx
, lmacid
, BGX_CMRX_CFG
, cfg
);
386 bgx_reg_modify(bgx
, lmacid
, BGX_SPUX_CONTROL1
, SPU_CTL_LOW_POWER
);
387 /* Set interleaved running disparity for RXAUI */
388 if (bgx
->lmac_type
!= BGX_MODE_RXAUI
)
389 bgx_reg_modify(bgx
, lmacid
,
390 BGX_SPUX_MISC_CONTROL
, SPU_MISC_CTL_RX_DIS
);
392 bgx_reg_modify(bgx
, lmacid
, BGX_SPUX_MISC_CONTROL
,
393 SPU_MISC_CTL_RX_DIS
| SPU_MISC_CTL_INTLV_RDISP
);
395 /* clear all interrupts */
396 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_RX_INT
);
397 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_RX_INT
, cfg
);
398 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_TX_INT
);
399 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_TX_INT
, cfg
);
400 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_INT
);
401 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_INT
, cfg
);
403 if (bgx
->use_training
) {
404 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_BR_PMD_LP_CUP
, 0x00);
405 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_BR_PMD_LD_CUP
, 0x00);
406 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_BR_PMD_LD_REP
, 0x00);
407 /* training enable */
408 bgx_reg_modify(bgx
, lmacid
,
409 BGX_SPUX_BR_PMD_CRTL
, SPU_PMD_CRTL_TRAIN_EN
);
412 /* Append FCS to each packet */
413 bgx_reg_modify(bgx
, lmacid
, BGX_SMUX_TX_APPEND
, SMU_TX_APPEND_FCS_D
);
415 /* Disable forward error correction */
416 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_FEC_CONTROL
);
417 cfg
&= ~SPU_FEC_CTL_FEC_EN
;
418 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_FEC_CONTROL
, cfg
);
420 /* Disable autoneg */
421 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_AN_CONTROL
);
422 cfg
= cfg
& ~(SPU_AN_CTL_AN_EN
| SPU_AN_CTL_XNP_EN
);
423 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_AN_CONTROL
, cfg
);
425 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_AN_ADV
);
426 if (bgx
->lmac_type
== BGX_MODE_10G_KR
)
428 else if (bgx
->lmac_type
== BGX_MODE_40G_KR
)
431 cfg
&= ~((1 << 23) | (1 << 24));
432 cfg
= cfg
& (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
433 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_AN_ADV
, cfg
);
435 cfg
= bgx_reg_read(bgx
, 0, BGX_SPU_DBG_CONTROL
);
436 cfg
&= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN
;
437 bgx_reg_write(bgx
, 0, BGX_SPU_DBG_CONTROL
, cfg
);
440 bgx_reg_modify(bgx
, lmacid
, BGX_CMRX_CFG
, CMR_EN
);
442 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_CONTROL1
);
443 cfg
&= ~SPU_CTL_LOW_POWER
;
444 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_CONTROL1
, cfg
);
446 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_TX_CTL
);
447 cfg
&= ~SMU_TX_CTL_UNI_EN
;
448 cfg
|= SMU_TX_CTL_DIC_EN
;
449 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_TX_CTL
, cfg
);
451 /* take lmac_count into account */
452 bgx_reg_modify(bgx
, lmacid
, BGX_SMUX_TX_THRESH
, (0x100 - 1));
453 /* max packet size */
454 bgx_reg_modify(bgx
, lmacid
, BGX_SMUX_RX_JABBER
, MAX_FRAME_SIZE
);
459 static int bgx_xaui_check_link(struct lmac
*lmac
)
461 struct bgx
*bgx
= lmac
->bgx
;
462 int lmacid
= lmac
->lmacid
;
463 int lmac_type
= bgx
->lmac_type
;
466 bgx_reg_modify(bgx
, lmacid
, BGX_SPUX_MISC_CONTROL
, SPU_MISC_CTL_RX_DIS
);
467 if (bgx
->use_training
) {
468 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_INT
);
469 if (!(cfg
& (1ull << 13))) {
470 cfg
= (1ull << 13) | (1ull << 14);
471 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_INT
, cfg
);
472 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_BR_PMD_CRTL
);
474 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_BR_PMD_CRTL
, cfg
);
479 /* wait for PCS to come out of reset */
480 if (bgx_poll_reg(bgx
, lmacid
, BGX_SPUX_CONTROL1
, SPU_CTL_RESET
, true)) {
481 dev_err(&bgx
->pdev
->dev
, "BGX SPU reset not completed\n");
485 if ((lmac_type
== BGX_MODE_10G_KR
) || (lmac_type
== BGX_MODE_XFI
) ||
486 (lmac_type
== BGX_MODE_40G_KR
) || (lmac_type
== BGX_MODE_XLAUI
)) {
487 if (bgx_poll_reg(bgx
, lmacid
, BGX_SPUX_BR_STATUS1
,
488 SPU_BR_STATUS_BLK_LOCK
, false)) {
489 dev_err(&bgx
->pdev
->dev
,
490 "SPU_BR_STATUS_BLK_LOCK not completed\n");
494 if (bgx_poll_reg(bgx
, lmacid
, BGX_SPUX_BX_STATUS
,
495 SPU_BX_STATUS_RX_ALIGN
, false)) {
496 dev_err(&bgx
->pdev
->dev
,
497 "SPU_BX_STATUS_RX_ALIGN not completed\n");
502 /* Clear rcvflt bit (latching high) and read it back */
503 bgx_reg_modify(bgx
, lmacid
, BGX_SPUX_STATUS2
, SPU_STATUS2_RCVFLT
);
504 if (bgx_reg_read(bgx
, lmacid
, BGX_SPUX_STATUS2
) & SPU_STATUS2_RCVFLT
) {
505 dev_err(&bgx
->pdev
->dev
, "Receive fault, retry training\n");
506 if (bgx
->use_training
) {
507 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_INT
);
508 if (!(cfg
& (1ull << 13))) {
509 cfg
= (1ull << 13) | (1ull << 14);
510 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_INT
, cfg
);
511 cfg
= bgx_reg_read(bgx
, lmacid
,
512 BGX_SPUX_BR_PMD_CRTL
);
514 bgx_reg_write(bgx
, lmacid
,
515 BGX_SPUX_BR_PMD_CRTL
, cfg
);
522 /* Wait for MAC RX to be ready */
523 if (bgx_poll_reg(bgx
, lmacid
, BGX_SMUX_RX_CTL
,
524 SMU_RX_CTL_STATUS
, true)) {
525 dev_err(&bgx
->pdev
->dev
, "SMU RX link not okay\n");
529 /* Wait for BGX RX to be idle */
530 if (bgx_poll_reg(bgx
, lmacid
, BGX_SMUX_CTL
, SMU_CTL_RX_IDLE
, false)) {
531 dev_err(&bgx
->pdev
->dev
, "SMU RX not idle\n");
535 /* Wait for BGX TX to be idle */
536 if (bgx_poll_reg(bgx
, lmacid
, BGX_SMUX_CTL
, SMU_CTL_TX_IDLE
, false)) {
537 dev_err(&bgx
->pdev
->dev
, "SMU TX not idle\n");
541 if (bgx_reg_read(bgx
, lmacid
, BGX_SPUX_STATUS2
) & SPU_STATUS2_RCVFLT
) {
542 dev_err(&bgx
->pdev
->dev
, "Receive fault\n");
546 /* Receive link is latching low. Force it high and verify it */
547 bgx_reg_modify(bgx
, lmacid
, BGX_SPUX_STATUS1
, SPU_STATUS1_RCV_LNK
);
548 if (bgx_poll_reg(bgx
, lmacid
, BGX_SPUX_STATUS1
,
549 SPU_STATUS1_RCV_LNK
, false)) {
550 dev_err(&bgx
->pdev
->dev
, "SPU receive link down\n");
554 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_MISC_CONTROL
);
555 cfg
&= ~SPU_MISC_CTL_RX_DIS
;
556 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_MISC_CONTROL
, cfg
);
560 static void bgx_poll_for_link(struct work_struct
*work
)
565 lmac
= container_of(work
, struct lmac
, dwork
.work
);
567 /* Receive link is latching low. Force it high and verify it */
568 bgx_reg_modify(lmac
->bgx
, lmac
->lmacid
,
569 BGX_SPUX_STATUS1
, SPU_STATUS1_RCV_LNK
);
570 bgx_poll_reg(lmac
->bgx
, lmac
->lmacid
, BGX_SPUX_STATUS1
,
571 SPU_STATUS1_RCV_LNK
, false);
573 link
= bgx_reg_read(lmac
->bgx
, lmac
->lmacid
, BGX_SPUX_STATUS1
);
574 if (link
& SPU_STATUS1_RCV_LNK
) {
576 if (lmac
->bgx
->lmac_type
== BGX_MODE_XLAUI
)
577 lmac
->last_speed
= 40000;
579 lmac
->last_speed
= 10000;
580 lmac
->last_duplex
= 1;
585 if (lmac
->last_link
!= lmac
->link_up
) {
586 lmac
->last_link
= lmac
->link_up
;
588 bgx_xaui_check_link(lmac
);
591 queue_delayed_work(lmac
->check_link
, &lmac
->dwork
, HZ
* 2);
594 static int bgx_lmac_enable(struct bgx
*bgx
, u8 lmacid
)
599 lmac
= &bgx
->lmac
[lmacid
];
602 if (bgx
->lmac_type
== BGX_MODE_SGMII
) {
604 if (bgx_lmac_sgmii_init(bgx
, lmacid
))
608 if (bgx_lmac_xaui_init(bgx
, lmacid
, bgx
->lmac_type
))
612 if (lmac
->is_sgmii
) {
613 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_GMP_GMI_TXX_APPEND
);
614 cfg
|= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
615 bgx_reg_modify(bgx
, lmacid
, BGX_GMP_GMI_TXX_APPEND
, cfg
);
616 bgx_reg_write(bgx
, lmacid
, BGX_GMP_GMI_TXX_MIN_PKT
, 60 - 1);
618 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_TX_APPEND
);
619 cfg
|= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
620 bgx_reg_modify(bgx
, lmacid
, BGX_SMUX_TX_APPEND
, cfg
);
621 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_TX_MIN_PKT
, 60 + 4);
625 bgx_reg_modify(bgx
, lmacid
, BGX_CMRX_CFG
,
626 CMR_EN
| CMR_PKT_RX_EN
| CMR_PKT_TX_EN
);
628 /* Restore default cfg, incase low level firmware changed it */
629 bgx_reg_write(bgx
, lmacid
, BGX_CMRX_RX_DMAC_CTL
, 0x03);
631 if ((bgx
->lmac_type
!= BGX_MODE_XFI
) &&
632 (bgx
->lmac_type
!= BGX_MODE_XLAUI
) &&
633 (bgx
->lmac_type
!= BGX_MODE_40G_KR
) &&
634 (bgx
->lmac_type
!= BGX_MODE_10G_KR
)) {
638 lmac
->phydev
->dev_flags
= 0;
640 if (phy_connect_direct(&lmac
->netdev
, lmac
->phydev
,
642 PHY_INTERFACE_MODE_SGMII
))
645 phy_start_aneg(lmac
->phydev
);
647 lmac
->check_link
= alloc_workqueue("check_link", WQ_UNBOUND
|
649 if (!lmac
->check_link
)
651 INIT_DELAYED_WORK(&lmac
->dwork
, bgx_poll_for_link
);
652 queue_delayed_work(lmac
->check_link
, &lmac
->dwork
, 0);
658 static void bgx_lmac_disable(struct bgx
*bgx
, u8 lmacid
)
663 lmac
= &bgx
->lmac
[lmacid
];
664 if (lmac
->check_link
) {
665 /* Destroy work queue */
666 cancel_delayed_work(&lmac
->dwork
);
667 flush_workqueue(lmac
->check_link
);
668 destroy_workqueue(lmac
->check_link
);
671 cmrx_cfg
= bgx_reg_read(bgx
, lmacid
, BGX_CMRX_CFG
);
672 cmrx_cfg
&= ~(1 << 15);
673 bgx_reg_write(bgx
, lmacid
, BGX_CMRX_CFG
, cmrx_cfg
);
674 bgx_flush_dmac_addrs(bgx
, lmacid
);
677 phy_disconnect(lmac
->phydev
);
682 static void bgx_set_num_ports(struct bgx
*bgx
)
686 switch (bgx
->qlm_mode
) {
689 bgx
->lmac_type
= BGX_MODE_SGMII
;
690 bgx
->lane_to_sds
= 0;
692 case QLM_MODE_XAUI_1X4
:
694 bgx
->lmac_type
= BGX_MODE_XAUI
;
695 bgx
->lane_to_sds
= 0xE4;
697 case QLM_MODE_RXAUI_2X2
:
699 bgx
->lmac_type
= BGX_MODE_RXAUI
;
700 bgx
->lane_to_sds
= 0xE4;
702 case QLM_MODE_XFI_4X1
:
704 bgx
->lmac_type
= BGX_MODE_XFI
;
705 bgx
->lane_to_sds
= 0;
707 case QLM_MODE_XLAUI_1X4
:
709 bgx
->lmac_type
= BGX_MODE_XLAUI
;
710 bgx
->lane_to_sds
= 0xE4;
712 case QLM_MODE_10G_KR_4X1
:
714 bgx
->lmac_type
= BGX_MODE_10G_KR
;
715 bgx
->lane_to_sds
= 0;
716 bgx
->use_training
= 1;
718 case QLM_MODE_40G_KR4_1X4
:
720 bgx
->lmac_type
= BGX_MODE_40G_KR
;
721 bgx
->lane_to_sds
= 0xE4;
722 bgx
->use_training
= 1;
729 /* Check if low level firmware has programmed LMAC count
730 * based on board type, if yes consider that otherwise
731 * the default static values
733 lmac_count
= bgx_reg_read(bgx
, 0, BGX_CMR_RX_LMACS
) & 0x7;
735 bgx
->lmac_count
= lmac_count
;
738 static void bgx_init_hw(struct bgx
*bgx
)
742 bgx_set_num_ports(bgx
);
744 bgx_reg_modify(bgx
, 0, BGX_CMR_GLOBAL_CFG
, CMR_GLOBAL_CFG_FCS_STRIP
);
745 if (bgx_reg_read(bgx
, 0, BGX_CMR_BIST_STATUS
))
746 dev_err(&bgx
->pdev
->dev
, "BGX%d BIST failed\n", bgx
->bgx_id
);
748 /* Set lmac type and lane2serdes mapping */
749 for (i
= 0; i
< bgx
->lmac_count
; i
++) {
750 if (bgx
->lmac_type
== BGX_MODE_RXAUI
) {
752 bgx
->lane_to_sds
= 0x0e;
754 bgx
->lane_to_sds
= 0x04;
755 bgx_reg_write(bgx
, i
, BGX_CMRX_CFG
,
756 (bgx
->lmac_type
<< 8) | bgx
->lane_to_sds
);
759 bgx_reg_write(bgx
, i
, BGX_CMRX_CFG
,
760 (bgx
->lmac_type
<< 8) | (bgx
->lane_to_sds
+ i
));
761 bgx
->lmac
[i
].lmacid_bd
= lmac_count
;
765 bgx_reg_write(bgx
, 0, BGX_CMR_TX_LMACS
, bgx
->lmac_count
);
766 bgx_reg_write(bgx
, 0, BGX_CMR_RX_LMACS
, bgx
->lmac_count
);
768 /* Set the backpressure AND mask */
769 for (i
= 0; i
< bgx
->lmac_count
; i
++)
770 bgx_reg_modify(bgx
, 0, BGX_CMR_CHAN_MSK_AND
,
771 ((1ULL << MAX_BGX_CHANS_PER_LMAC
) - 1) <<
772 (i
* MAX_BGX_CHANS_PER_LMAC
));
774 /* Disable all MAC filtering */
775 for (i
= 0; i
< RX_DMAC_COUNT
; i
++)
776 bgx_reg_write(bgx
, 0, BGX_CMR_RX_DMACX_CAM
+ (i
* 8), 0x00);
778 /* Disable MAC steering (NCSI traffic) */
779 for (i
= 0; i
< RX_TRAFFIC_STEER_RULE_COUNT
; i
++)
780 bgx_reg_write(bgx
, 0, BGX_CMR_RX_STREERING
+ (i
* 8), 0x00);
783 static void bgx_get_qlm_mode(struct bgx
*bgx
)
785 struct device
*dev
= &bgx
->pdev
->dev
;
789 /* Read LMAC0 type to figure out QLM mode
790 * This is configured by low level firmware
792 lmac_type
= bgx_reg_read(bgx
, 0, BGX_CMRX_CFG
);
793 lmac_type
= (lmac_type
>> 8) & 0x07;
795 train_en
= bgx_reg_read(bgx
, 0, BGX_SPUX_BR_PMD_CRTL
) &
796 SPU_PMD_CRTL_TRAIN_EN
;
800 bgx
->qlm_mode
= QLM_MODE_SGMII
;
801 dev_info(dev
, "BGX%d QLM mode: SGMII\n", bgx
->bgx_id
);
804 bgx
->qlm_mode
= QLM_MODE_XAUI_1X4
;
805 dev_info(dev
, "BGX%d QLM mode: XAUI\n", bgx
->bgx_id
);
808 bgx
->qlm_mode
= QLM_MODE_RXAUI_2X2
;
809 dev_info(dev
, "BGX%d QLM mode: RXAUI\n", bgx
->bgx_id
);
813 bgx
->qlm_mode
= QLM_MODE_XFI_4X1
;
814 dev_info(dev
, "BGX%d QLM mode: XFI\n", bgx
->bgx_id
);
816 bgx
->qlm_mode
= QLM_MODE_10G_KR_4X1
;
817 dev_info(dev
, "BGX%d QLM mode: 10G_KR\n", bgx
->bgx_id
);
822 bgx
->qlm_mode
= QLM_MODE_XLAUI_1X4
;
823 dev_info(dev
, "BGX%d QLM mode: XLAUI\n", bgx
->bgx_id
);
825 bgx
->qlm_mode
= QLM_MODE_40G_KR4_1X4
;
826 dev_info(dev
, "BGX%d QLM mode: 40G_KR4\n", bgx
->bgx_id
);
830 bgx
->qlm_mode
= QLM_MODE_SGMII
;
831 dev_info(dev
, "BGX%d QLM default mode: SGMII\n", bgx
->bgx_id
);
835 static void bgx_init_of(struct bgx
*bgx
, struct device_node
*np
)
837 struct device_node
*np_child
;
840 for_each_child_of_node(np
, np_child
) {
841 struct device_node
*phy_np
;
844 phy_np
= of_parse_phandle(np_child
, "phy-handle", 0);
846 bgx
->lmac
[lmac
].phydev
= of_phy_find_device(phy_np
);
848 mac
= of_get_mac_address(np_child
);
850 ether_addr_copy(bgx
->lmac
[lmac
].mac
, mac
);
852 SET_NETDEV_DEV(&bgx
->lmac
[lmac
].netdev
, &bgx
->pdev
->dev
);
853 bgx
->lmac
[lmac
].lmacid
= lmac
;
855 if (lmac
== MAX_LMAC_PER_BGX
)
860 static int bgx_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
863 struct device
*dev
= &pdev
->dev
;
864 struct bgx
*bgx
= NULL
;
865 struct device_node
*np
;
869 bgx
= devm_kzalloc(dev
, sizeof(*bgx
), GFP_KERNEL
);
874 pci_set_drvdata(pdev
, bgx
);
876 err
= pci_enable_device(pdev
);
878 dev_err(dev
, "Failed to enable PCI device\n");
879 pci_set_drvdata(pdev
, NULL
);
883 err
= pci_request_regions(pdev
, DRV_NAME
);
885 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
886 goto err_disable_device
;
889 /* MAP configuration registers */
890 bgx
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
891 if (!bgx
->reg_base
) {
892 dev_err(dev
, "BGX: Cannot map CSR memory space, aborting\n");
894 goto err_release_regions
;
896 bgx
->bgx_id
= (pci_resource_start(pdev
, PCI_CFG_REG_BAR_NUM
) >> 24) & 1;
897 bgx
->bgx_id
+= nic_get_node_id(pdev
) * MAX_BGX_PER_CN88XX
;
899 bgx_vnic
[bgx
->bgx_id
] = bgx
;
900 bgx_get_qlm_mode(bgx
);
902 snprintf(bgx_sel
, 5, "bgx%d", bgx
->bgx_id
);
903 np
= of_find_node_by_name(NULL
, bgx_sel
);
905 bgx_init_of(bgx
, np
);
909 /* Enable all LMACs */
910 for (lmac
= 0; lmac
< bgx
->lmac_count
; lmac
++) {
911 err
= bgx_lmac_enable(bgx
, lmac
);
913 dev_err(dev
, "BGX%d failed to enable lmac%d\n",
922 bgx_vnic
[bgx
->bgx_id
] = NULL
;
924 pci_release_regions(pdev
);
926 pci_disable_device(pdev
);
927 pci_set_drvdata(pdev
, NULL
);
931 static void bgx_remove(struct pci_dev
*pdev
)
933 struct bgx
*bgx
= pci_get_drvdata(pdev
);
936 /* Disable all LMACs */
937 for (lmac
= 0; lmac
< bgx
->lmac_count
; lmac
++)
938 bgx_lmac_disable(bgx
, lmac
);
940 bgx_vnic
[bgx
->bgx_id
] = NULL
;
941 pci_release_regions(pdev
);
942 pci_disable_device(pdev
);
943 pci_set_drvdata(pdev
, NULL
);
946 static struct pci_driver bgx_driver
= {
948 .id_table
= bgx_id_table
,
950 .remove
= bgx_remove
,
953 static int __init
bgx_init_module(void)
955 pr_info("%s, ver %s\n", DRV_NAME
, DRV_VERSION
);
957 return pci_register_driver(&bgx_driver
);
960 static void __exit
bgx_cleanup_module(void)
962 pci_unregister_driver(&bgx_driver
);
965 module_init(bgx_init_module
);
966 module_exit(bgx_cleanup_module
);