2 * AMD 10Gb Ethernet PHY driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
25 * License 2: Modified BSD
27 * Copyright (c) 2014 Advanced Micro Devices, Inc.
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in the
36 * documentation and/or other materials provided with the distribution.
37 * * Neither the name of Advanced Micro Devices, Inc. nor the
38 * names of its contributors may be used to endorse or promote products
39 * derived from this software without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
42 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 #include <linux/kernel.h>
54 #include <linux/device.h>
55 #include <linux/platform_device.h>
56 #include <linux/string.h>
57 #include <linux/errno.h>
58 #include <linux/unistd.h>
59 #include <linux/slab.h>
60 #include <linux/interrupt.h>
61 #include <linux/init.h>
62 #include <linux/delay.h>
63 #include <linux/netdevice.h>
64 #include <linux/etherdevice.h>
65 #include <linux/skbuff.h>
67 #include <linux/module.h>
68 #include <linux/mii.h>
69 #include <linux/ethtool.h>
70 #include <linux/phy.h>
71 #include <linux/mdio.h>
74 #include <linux/of_platform.h>
75 #include <linux/of_device.h>
76 #include <linux/uaccess.h>
77 #include <linux/bitops.h>
79 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
80 MODULE_LICENSE("Dual BSD/GPL");
81 MODULE_VERSION("1.0.0-a");
82 MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
84 #define XGBE_PHY_ID 0x000162d0
85 #define XGBE_PHY_MASK 0xfffffff0
87 #define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
89 #define XGBE_AN_INT_CMPLT 0x01
90 #define XGBE_AN_INC_LINK 0x02
91 #define XGBE_AN_PG_RCV 0x04
93 #define XNP_MCF_NULL_MESSAGE 0x001
94 #define XNP_ACK_PROCESSED BIT(12)
95 #define XNP_MP_FORMATTED BIT(13)
96 #define XNP_NP_EXCHANGE BIT(15)
98 #define XGBE_PHY_RATECHANGE_COUNT 500
100 #ifndef MDIO_PMA_10GBR_PMD_CTRL
101 #define MDIO_PMA_10GBR_PMD_CTRL 0x0096
104 #ifndef MDIO_PMA_10GBR_FEC_CTRL
105 #define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
109 #define MDIO_AN_XNP 0x0016
112 #ifndef MDIO_AN_INTMASK
113 #define MDIO_AN_INTMASK 0x8001
117 #define MDIO_AN_INT 0x8002
120 #ifndef MDIO_AN_KR_CTRL
121 #define MDIO_AN_KR_CTRL 0x8003
124 #ifndef MDIO_CTRL1_SPEED1G
125 #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
128 #ifndef MDIO_KR_CTRL_PDETECT
129 #define MDIO_KR_CTRL_PDETECT 0x01
132 /* SerDes integration register offsets */
133 #define SIR0_KR_RT_1 0x002c
134 #define SIR0_STATUS 0x0040
135 #define SIR1_SPEED 0x0000
137 /* SerDes integration register entry bit positions and sizes */
138 #define SIR0_KR_RT_1_RESET_INDEX 11
139 #define SIR0_KR_RT_1_RESET_WIDTH 1
140 #define SIR0_STATUS_RX_READY_INDEX 0
141 #define SIR0_STATUS_RX_READY_WIDTH 1
142 #define SIR0_STATUS_TX_READY_INDEX 8
143 #define SIR0_STATUS_TX_READY_WIDTH 1
144 #define SIR1_SPEED_DATARATE_INDEX 4
145 #define SIR1_SPEED_DATARATE_WIDTH 2
146 #define SIR1_SPEED_PI_SPD_SEL_INDEX 12
147 #define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
148 #define SIR1_SPEED_PLLSEL_INDEX 3
149 #define SIR1_SPEED_PLLSEL_WIDTH 1
150 #define SIR1_SPEED_RATECHANGE_INDEX 6
151 #define SIR1_SPEED_RATECHANGE_WIDTH 1
152 #define SIR1_SPEED_TXAMP_INDEX 8
153 #define SIR1_SPEED_TXAMP_WIDTH 4
154 #define SIR1_SPEED_WORDMODE_INDEX 0
155 #define SIR1_SPEED_WORDMODE_WIDTH 3
157 #define SPEED_10000_CDR 0x7
158 #define SPEED_10000_PLL 0x1
159 #define SPEED_10000_RATE 0x0
160 #define SPEED_10000_TXAMP 0xa
161 #define SPEED_10000_WORD 0x7
163 #define SPEED_2500_CDR 0x2
164 #define SPEED_2500_PLL 0x0
165 #define SPEED_2500_RATE 0x1
166 #define SPEED_2500_TXAMP 0xf
167 #define SPEED_2500_WORD 0x1
169 #define SPEED_1000_CDR 0x2
170 #define SPEED_1000_PLL 0x0
171 #define SPEED_1000_RATE 0x3
172 #define SPEED_1000_TXAMP 0xf
173 #define SPEED_1000_WORD 0x1
175 /* SerDes RxTx register offsets */
176 #define RXTX_REG20 0x0050
177 #define RXTX_REG114 0x01c8
179 /* SerDes RxTx register entry bit positions and sizes */
180 #define RXTX_REG20_BLWC_ENA_INDEX 2
181 #define RXTX_REG20_BLWC_ENA_WIDTH 1
182 #define RXTX_REG114_PQ_REG_INDEX 9
183 #define RXTX_REG114_PQ_REG_WIDTH 7
185 #define RXTX_10000_BLWC 0
186 #define RXTX_10000_PQ 0x1e
188 #define RXTX_2500_BLWC 1
189 #define RXTX_2500_PQ 0xa
191 #define RXTX_1000_BLWC 1
192 #define RXTX_1000_PQ 0xa
194 /* Bit setting and getting macros
195 * The get macro will extract the current bit field value from within
198 * The set macro will clear the current bit field value within the
199 * variable and then set the bit field of the variable to the
202 #define GET_BITS(_var, _index, _width) \
203 (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
205 #define SET_BITS(_var, _index, _width, _val) \
207 (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
208 (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
211 #define XSIR_GET_BITS(_var, _prefix, _field) \
213 _prefix##_##_field##_INDEX, \
214 _prefix##_##_field##_WIDTH)
216 #define XSIR_SET_BITS(_var, _prefix, _field, _val) \
218 _prefix##_##_field##_INDEX, \
219 _prefix##_##_field##_WIDTH, (_val))
221 /* Macros for reading or writing SerDes integration registers
222 * The ioread macros will get bit fields or full values using the
223 * register definitions formed using the input names
225 * The iowrite macros will set bit fields or full values using the
226 * register definitions formed using the input names
228 #define XSIR0_IOREAD(_priv, _reg) \
229 ioread16((_priv)->sir0_regs + _reg)
231 #define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
232 GET_BITS(XSIR0_IOREAD((_priv), _reg), \
233 _reg##_##_field##_INDEX, \
234 _reg##_##_field##_WIDTH)
236 #define XSIR0_IOWRITE(_priv, _reg, _val) \
237 iowrite16((_val), (_priv)->sir0_regs + _reg)
239 #define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
241 u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
243 _reg##_##_field##_INDEX, \
244 _reg##_##_field##_WIDTH, (_val)); \
245 XSIR0_IOWRITE((_priv), _reg, reg_val); \
248 #define XSIR1_IOREAD(_priv, _reg) \
249 ioread16((_priv)->sir1_regs + _reg)
251 #define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
252 GET_BITS(XSIR1_IOREAD((_priv), _reg), \
253 _reg##_##_field##_INDEX, \
254 _reg##_##_field##_WIDTH)
256 #define XSIR1_IOWRITE(_priv, _reg, _val) \
257 iowrite16((_val), (_priv)->sir1_regs + _reg)
259 #define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
261 u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
263 _reg##_##_field##_INDEX, \
264 _reg##_##_field##_WIDTH, (_val)); \
265 XSIR1_IOWRITE((_priv), _reg, reg_val); \
268 /* Macros for reading or writing SerDes RxTx registers
269 * The ioread macros will get bit fields or full values using the
270 * register definitions formed using the input names
272 * The iowrite macros will set bit fields or full values using the
273 * register definitions formed using the input names
275 #define XRXTX_IOREAD(_priv, _reg) \
276 ioread16((_priv)->rxtx_regs + _reg)
278 #define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
279 GET_BITS(XRXTX_IOREAD((_priv), _reg), \
280 _reg##_##_field##_INDEX, \
281 _reg##_##_field##_WIDTH)
283 #define XRXTX_IOWRITE(_priv, _reg, _val) \
284 iowrite16((_val), (_priv)->rxtx_regs + _reg)
286 #define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
288 u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
290 _reg##_##_field##_INDEX, \
291 _reg##_##_field##_WIDTH, (_val)); \
292 XRXTX_IOWRITE((_priv), _reg, reg_val); \
295 enum amd_xgbe_phy_an
{
296 AMD_XGBE_AN_READY
= 0,
299 AMD_XGBE_AN_PAGE_RECEIVED
,
300 AMD_XGBE_AN_INCOMPAT_LINK
,
301 AMD_XGBE_AN_COMPLETE
,
307 enum amd_xgbe_phy_rx
{
308 AMD_XGBE_RX_READY
= 0,
311 AMD_XGBE_RX_COMPLETE
,
314 enum amd_xgbe_phy_mode
{
319 enum amd_xgbe_phy_speedset
{
320 AMD_XGBE_PHY_SPEEDSET_1000_10000
,
321 AMD_XGBE_PHY_SPEEDSET_2500_10000
,
324 struct amd_xgbe_phy_priv
{
325 struct platform_device
*pdev
;
328 struct phy_device
*phydev
;
330 /* SerDes related mmio resources */
331 struct resource
*rxtx_res
;
332 struct resource
*sir0_res
;
333 struct resource
*sir1_res
;
335 /* SerDes related mmio registers */
336 void __iomem
*rxtx_regs
; /* SerDes Rx/Tx CSRs */
337 void __iomem
*sir0_regs
; /* SerDes integration registers (1/2) */
338 void __iomem
*sir1_regs
; /* SerDes integration registers (2/2) */
340 /* Maintain link status for re-starting auto-negotiation */
342 unsigned int speed_set
;
344 /* Auto-negotiation state machine support */
345 struct mutex an_mutex
;
346 enum amd_xgbe_phy_an an_result
;
347 enum amd_xgbe_phy_an an_state
;
348 enum amd_xgbe_phy_rx kr_state
;
349 enum amd_xgbe_phy_rx kx_state
;
350 struct work_struct an_work
;
351 struct workqueue_struct
*an_workqueue
;
352 unsigned int parallel_detect
;
355 static int amd_xgbe_an_enable_kr_training(struct phy_device
*phydev
)
359 ret
= phy_read_mmd(phydev
, MDIO_MMD_PMAPMD
, MDIO_PMA_10GBR_PMD_CTRL
);
364 phy_write_mmd(phydev
, MDIO_MMD_PMAPMD
, MDIO_PMA_10GBR_PMD_CTRL
, ret
);
369 static int amd_xgbe_an_disable_kr_training(struct phy_device
*phydev
)
373 ret
= phy_read_mmd(phydev
, MDIO_MMD_PMAPMD
, MDIO_PMA_10GBR_PMD_CTRL
);
378 phy_write_mmd(phydev
, MDIO_MMD_PMAPMD
, MDIO_PMA_10GBR_PMD_CTRL
, ret
);
383 static int amd_xgbe_phy_pcs_power_cycle(struct phy_device
*phydev
)
387 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
);
391 ret
|= MDIO_CTRL1_LPOWER
;
392 phy_write_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
, ret
);
394 usleep_range(75, 100);
396 ret
&= ~MDIO_CTRL1_LPOWER
;
397 phy_write_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
, ret
);
402 static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device
*phydev
)
404 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
406 /* Assert Rx and Tx ratechange */
407 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, RATECHANGE
, 1);
410 static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device
*phydev
)
412 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
416 /* Release Rx and Tx ratechange */
417 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, RATECHANGE
, 0);
419 /* Wait for Rx and Tx ready */
420 wait
= XGBE_PHY_RATECHANGE_COUNT
;
422 usleep_range(50, 75);
424 status
= XSIR0_IOREAD(priv
, SIR0_STATUS
);
425 if (XSIR_GET_BITS(status
, SIR0_STATUS
, RX_READY
) &&
426 XSIR_GET_BITS(status
, SIR0_STATUS
, TX_READY
))
430 netdev_dbg(phydev
->attached_dev
, "SerDes rx/tx not ready (%#hx)\n",
434 static int amd_xgbe_phy_xgmii_mode(struct phy_device
*phydev
)
436 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
439 /* Enable KR training */
440 ret
= amd_xgbe_an_enable_kr_training(phydev
);
444 /* Set PCS to KR/10G speed */
445 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL2
);
449 ret
&= ~MDIO_PCS_CTRL2_TYPE
;
450 ret
|= MDIO_PCS_CTRL2_10GBR
;
451 phy_write_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL2
, ret
);
453 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
);
457 ret
&= ~MDIO_CTRL1_SPEEDSEL
;
458 ret
|= MDIO_CTRL1_SPEED10G
;
459 phy_write_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
, ret
);
461 ret
= amd_xgbe_phy_pcs_power_cycle(phydev
);
465 /* Set SerDes to 10G speed */
466 amd_xgbe_phy_serdes_start_ratechange(phydev
);
468 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, DATARATE
, SPEED_10000_RATE
);
469 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, WORDMODE
, SPEED_10000_WORD
);
470 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, TXAMP
, SPEED_10000_TXAMP
);
471 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, PLLSEL
, SPEED_10000_PLL
);
472 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, PI_SPD_SEL
, SPEED_10000_CDR
);
474 XRXTX_IOWRITE_BITS(priv
, RXTX_REG20
, BLWC_ENA
, RXTX_10000_BLWC
);
475 XRXTX_IOWRITE_BITS(priv
, RXTX_REG114
, PQ_REG
, RXTX_10000_PQ
);
477 amd_xgbe_phy_serdes_complete_ratechange(phydev
);
482 static int amd_xgbe_phy_gmii_2500_mode(struct phy_device
*phydev
)
484 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
487 /* Disable KR training */
488 ret
= amd_xgbe_an_disable_kr_training(phydev
);
492 /* Set PCS to KX/1G speed */
493 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL2
);
497 ret
&= ~MDIO_PCS_CTRL2_TYPE
;
498 ret
|= MDIO_PCS_CTRL2_10GBX
;
499 phy_write_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL2
, ret
);
501 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
);
505 ret
&= ~MDIO_CTRL1_SPEEDSEL
;
506 ret
|= MDIO_CTRL1_SPEED1G
;
507 phy_write_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
, ret
);
509 ret
= amd_xgbe_phy_pcs_power_cycle(phydev
);
513 /* Set SerDes to 2.5G speed */
514 amd_xgbe_phy_serdes_start_ratechange(phydev
);
516 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, DATARATE
, SPEED_2500_RATE
);
517 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, WORDMODE
, SPEED_2500_WORD
);
518 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, TXAMP
, SPEED_2500_TXAMP
);
519 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, PLLSEL
, SPEED_2500_PLL
);
520 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, PI_SPD_SEL
, SPEED_2500_CDR
);
522 XRXTX_IOWRITE_BITS(priv
, RXTX_REG20
, BLWC_ENA
, RXTX_2500_BLWC
);
523 XRXTX_IOWRITE_BITS(priv
, RXTX_REG114
, PQ_REG
, RXTX_2500_PQ
);
525 amd_xgbe_phy_serdes_complete_ratechange(phydev
);
530 static int amd_xgbe_phy_gmii_mode(struct phy_device
*phydev
)
532 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
535 /* Disable KR training */
536 ret
= amd_xgbe_an_disable_kr_training(phydev
);
540 /* Set PCS to KX/1G speed */
541 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL2
);
545 ret
&= ~MDIO_PCS_CTRL2_TYPE
;
546 ret
|= MDIO_PCS_CTRL2_10GBX
;
547 phy_write_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL2
, ret
);
549 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
);
553 ret
&= ~MDIO_CTRL1_SPEEDSEL
;
554 ret
|= MDIO_CTRL1_SPEED1G
;
555 phy_write_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
, ret
);
557 ret
= amd_xgbe_phy_pcs_power_cycle(phydev
);
561 /* Set SerDes to 1G speed */
562 amd_xgbe_phy_serdes_start_ratechange(phydev
);
564 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, DATARATE
, SPEED_1000_RATE
);
565 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, WORDMODE
, SPEED_1000_WORD
);
566 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, TXAMP
, SPEED_1000_TXAMP
);
567 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, PLLSEL
, SPEED_1000_PLL
);
568 XSIR1_IOWRITE_BITS(priv
, SIR1_SPEED
, PI_SPD_SEL
, SPEED_1000_CDR
);
570 XRXTX_IOWRITE_BITS(priv
, RXTX_REG20
, BLWC_ENA
, RXTX_1000_BLWC
);
571 XRXTX_IOWRITE_BITS(priv
, RXTX_REG114
, PQ_REG
, RXTX_1000_PQ
);
573 amd_xgbe_phy_serdes_complete_ratechange(phydev
);
578 static int amd_xgbe_phy_cur_mode(struct phy_device
*phydev
,
579 enum amd_xgbe_phy_mode
*mode
)
583 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL2
);
587 if ((ret
& MDIO_PCS_CTRL2_TYPE
) == MDIO_PCS_CTRL2_10GBR
)
588 *mode
= AMD_XGBE_MODE_KR
;
590 *mode
= AMD_XGBE_MODE_KX
;
595 static bool amd_xgbe_phy_in_kr_mode(struct phy_device
*phydev
)
597 enum amd_xgbe_phy_mode mode
;
599 if (amd_xgbe_phy_cur_mode(phydev
, &mode
))
602 return (mode
== AMD_XGBE_MODE_KR
);
605 static int amd_xgbe_phy_switch_mode(struct phy_device
*phydev
)
607 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
610 /* If we are in KR switch to KX, and vice-versa */
611 if (amd_xgbe_phy_in_kr_mode(phydev
)) {
612 if (priv
->speed_set
== AMD_XGBE_PHY_SPEEDSET_1000_10000
)
613 ret
= amd_xgbe_phy_gmii_mode(phydev
);
615 ret
= amd_xgbe_phy_gmii_2500_mode(phydev
);
617 ret
= amd_xgbe_phy_xgmii_mode(phydev
);
623 static int amd_xgbe_phy_set_mode(struct phy_device
*phydev
,
624 enum amd_xgbe_phy_mode mode
)
626 enum amd_xgbe_phy_mode cur_mode
;
629 ret
= amd_xgbe_phy_cur_mode(phydev
, &cur_mode
);
633 if (mode
!= cur_mode
)
634 ret
= amd_xgbe_phy_switch_mode(phydev
);
639 static enum amd_xgbe_phy_an
amd_xgbe_an_tx_training(struct phy_device
*phydev
,
640 enum amd_xgbe_phy_rx
*state
)
642 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
643 int ad_reg
, lp_reg
, ret
;
645 *state
= AMD_XGBE_RX_COMPLETE
;
647 /* If we're not in KR mode then we're done */
648 if (!amd_xgbe_phy_in_kr_mode(phydev
))
649 return AMD_XGBE_AN_EVENT
;
651 /* Enable/Disable FEC */
652 ad_reg
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_ADVERTISE
+ 2);
654 return AMD_XGBE_AN_ERROR
;
656 lp_reg
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_LPA
+ 2);
658 return AMD_XGBE_AN_ERROR
;
660 ret
= phy_read_mmd(phydev
, MDIO_MMD_PMAPMD
, MDIO_PMA_10GBR_FEC_CTRL
);
662 return AMD_XGBE_AN_ERROR
;
664 if ((ad_reg
& 0xc000) && (lp_reg
& 0xc000))
669 phy_write_mmd(phydev
, MDIO_MMD_PMAPMD
, MDIO_PMA_10GBR_FEC_CTRL
, ret
);
671 /* Start KR training */
672 ret
= phy_read_mmd(phydev
, MDIO_MMD_PMAPMD
, MDIO_PMA_10GBR_PMD_CTRL
);
674 return AMD_XGBE_AN_ERROR
;
676 XSIR0_IOWRITE_BITS(priv
, SIR0_KR_RT_1
, RESET
, 1);
679 phy_write_mmd(phydev
, MDIO_MMD_PMAPMD
, MDIO_PMA_10GBR_PMD_CTRL
, ret
);
681 XSIR0_IOWRITE_BITS(priv
, SIR0_KR_RT_1
, RESET
, 0);
683 return AMD_XGBE_AN_EVENT
;
686 static enum amd_xgbe_phy_an
amd_xgbe_an_tx_xnp(struct phy_device
*phydev
,
687 enum amd_xgbe_phy_rx
*state
)
691 *state
= AMD_XGBE_RX_XNP
;
693 msg
= XNP_MCF_NULL_MESSAGE
;
694 msg
|= XNP_MP_FORMATTED
;
696 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_XNP
+ 2, 0);
697 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_XNP
+ 1, 0);
698 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_XNP
, msg
);
700 return AMD_XGBE_AN_EVENT
;
703 static enum amd_xgbe_phy_an
amd_xgbe_an_rx_bpa(struct phy_device
*phydev
,
704 enum amd_xgbe_phy_rx
*state
)
706 unsigned int link_support
;
707 int ret
, ad_reg
, lp_reg
;
709 /* Read Base Ability register 2 first */
710 ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_LPA
+ 1);
712 return AMD_XGBE_AN_ERROR
;
714 /* Check for a supported mode, otherwise restart in a different one */
715 link_support
= amd_xgbe_phy_in_kr_mode(phydev
) ? 0x80 : 0x20;
716 if (!(ret
& link_support
))
717 return AMD_XGBE_AN_INCOMPAT_LINK
;
719 /* Check Extended Next Page support */
720 ad_reg
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_ADVERTISE
);
722 return AMD_XGBE_AN_ERROR
;
724 lp_reg
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_LPA
);
726 return AMD_XGBE_AN_ERROR
;
728 return ((ad_reg
& XNP_NP_EXCHANGE
) || (lp_reg
& XNP_NP_EXCHANGE
)) ?
729 amd_xgbe_an_tx_xnp(phydev
, state
) :
730 amd_xgbe_an_tx_training(phydev
, state
);
733 static enum amd_xgbe_phy_an
amd_xgbe_an_rx_xnp(struct phy_device
*phydev
,
734 enum amd_xgbe_phy_rx
*state
)
738 /* Check Extended Next Page support */
739 ad_reg
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_ADVERTISE
);
741 return AMD_XGBE_AN_ERROR
;
743 lp_reg
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_LPA
);
745 return AMD_XGBE_AN_ERROR
;
747 return ((ad_reg
& XNP_NP_EXCHANGE
) || (lp_reg
& XNP_NP_EXCHANGE
)) ?
748 amd_xgbe_an_tx_xnp(phydev
, state
) :
749 amd_xgbe_an_tx_training(phydev
, state
);
752 static enum amd_xgbe_phy_an
amd_xgbe_an_start(struct phy_device
*phydev
)
754 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
757 /* Be sure we aren't looping trying to negotiate */
758 if (amd_xgbe_phy_in_kr_mode(phydev
)) {
759 if (priv
->kr_state
!= AMD_XGBE_RX_READY
)
760 return AMD_XGBE_AN_NO_LINK
;
761 priv
->kr_state
= AMD_XGBE_RX_BPA
;
763 if (priv
->kx_state
!= AMD_XGBE_RX_READY
)
764 return AMD_XGBE_AN_NO_LINK
;
765 priv
->kx_state
= AMD_XGBE_RX_BPA
;
768 /* Set up Advertisement register 3 first */
769 ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_ADVERTISE
+ 2);
771 return AMD_XGBE_AN_ERROR
;
773 if (phydev
->supported
& SUPPORTED_10000baseR_FEC
)
778 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_ADVERTISE
+ 2, ret
);
780 /* Set up Advertisement register 2 next */
781 ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_ADVERTISE
+ 1);
783 return AMD_XGBE_AN_ERROR
;
785 if (phydev
->supported
& SUPPORTED_10000baseKR_Full
)
790 if ((phydev
->supported
& SUPPORTED_1000baseKX_Full
) ||
791 (phydev
->supported
& SUPPORTED_2500baseX_Full
))
796 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_ADVERTISE
+ 1, ret
);
798 /* Set up Advertisement register 1 last */
799 ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_ADVERTISE
);
801 return AMD_XGBE_AN_ERROR
;
803 if (phydev
->supported
& SUPPORTED_Pause
)
808 if (phydev
->supported
& SUPPORTED_Asym_Pause
)
813 /* We don't intend to perform XNP */
814 ret
&= ~XNP_NP_EXCHANGE
;
816 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_ADVERTISE
, ret
);
818 /* Enable and start auto-negotiation */
819 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_INT
, 0);
821 ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_KR_CTRL
);
823 return AMD_XGBE_AN_ERROR
;
825 ret
|= MDIO_KR_CTRL_PDETECT
;
826 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_KR_CTRL
, ret
);
828 ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_CTRL1
);
830 return AMD_XGBE_AN_ERROR
;
832 ret
|= MDIO_AN_CTRL1_ENABLE
;
833 ret
|= MDIO_AN_CTRL1_RESTART
;
834 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_CTRL1
, ret
);
836 return AMD_XGBE_AN_EVENT
;
839 static enum amd_xgbe_phy_an
amd_xgbe_an_event(struct phy_device
*phydev
)
841 enum amd_xgbe_phy_an new_state
;
844 ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_INT
);
846 return AMD_XGBE_AN_ERROR
;
848 new_state
= AMD_XGBE_AN_EVENT
;
849 if (ret
& XGBE_AN_PG_RCV
)
850 new_state
= AMD_XGBE_AN_PAGE_RECEIVED
;
851 else if (ret
& XGBE_AN_INC_LINK
)
852 new_state
= AMD_XGBE_AN_INCOMPAT_LINK
;
853 else if (ret
& XGBE_AN_INT_CMPLT
)
854 new_state
= AMD_XGBE_AN_COMPLETE
;
856 if (new_state
!= AMD_XGBE_AN_EVENT
)
857 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_INT
, 0);
862 static enum amd_xgbe_phy_an
amd_xgbe_an_page_received(struct phy_device
*phydev
)
864 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
865 enum amd_xgbe_phy_rx
*state
;
868 state
= amd_xgbe_phy_in_kr_mode(phydev
) ? &priv
->kr_state
872 case AMD_XGBE_RX_BPA
:
873 ret
= amd_xgbe_an_rx_bpa(phydev
, state
);
876 case AMD_XGBE_RX_XNP
:
877 ret
= amd_xgbe_an_rx_xnp(phydev
, state
);
881 ret
= AMD_XGBE_AN_ERROR
;
887 static enum amd_xgbe_phy_an
amd_xgbe_an_incompat_link(struct phy_device
*phydev
)
891 ret
= amd_xgbe_phy_switch_mode(phydev
);
893 return AMD_XGBE_AN_ERROR
;
895 return AMD_XGBE_AN_START
;
898 static void amd_xgbe_an_state_machine(struct work_struct
*work
)
900 struct amd_xgbe_phy_priv
*priv
= container_of(work
,
901 struct amd_xgbe_phy_priv
,
903 struct phy_device
*phydev
= priv
->phydev
;
904 enum amd_xgbe_phy_an cur_state
;
906 unsigned int an_supported
= 0;
908 /* Start in KX mode */
909 if (amd_xgbe_phy_set_mode(phydev
, AMD_XGBE_MODE_KX
))
910 priv
->an_state
= AMD_XGBE_AN_ERROR
;
913 mutex_lock(&priv
->an_mutex
);
915 cur_state
= priv
->an_state
;
917 switch (priv
->an_state
) {
918 case AMD_XGBE_AN_START
:
920 priv
->parallel_detect
= 0;
921 priv
->an_state
= amd_xgbe_an_start(phydev
);
924 case AMD_XGBE_AN_EVENT
:
925 priv
->an_state
= amd_xgbe_an_event(phydev
);
928 case AMD_XGBE_AN_PAGE_RECEIVED
:
929 priv
->an_state
= amd_xgbe_an_page_received(phydev
);
933 case AMD_XGBE_AN_INCOMPAT_LINK
:
934 priv
->an_state
= amd_xgbe_an_incompat_link(phydev
);
937 case AMD_XGBE_AN_COMPLETE
:
938 priv
->parallel_detect
= an_supported
? 0 : 1;
939 netdev_info(phydev
->attached_dev
, "%s successful\n",
940 an_supported
? "Auto negotiation"
941 : "Parallel detection");
944 case AMD_XGBE_AN_NO_LINK
:
945 case AMD_XGBE_AN_EXIT
:
949 priv
->an_state
= AMD_XGBE_AN_ERROR
;
952 if (priv
->an_state
== AMD_XGBE_AN_ERROR
) {
953 netdev_err(phydev
->attached_dev
,
954 "error during auto-negotiation, state=%u\n",
959 sleep
= (priv
->an_state
== AMD_XGBE_AN_EVENT
) ? 1 : 0;
961 mutex_unlock(&priv
->an_mutex
);
964 usleep_range(20, 50);
968 priv
->an_result
= priv
->an_state
;
969 priv
->an_state
= AMD_XGBE_AN_READY
;
971 mutex_unlock(&priv
->an_mutex
);
974 static int amd_xgbe_phy_soft_reset(struct phy_device
*phydev
)
978 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
);
982 ret
|= MDIO_CTRL1_RESET
;
983 phy_write_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
, ret
);
988 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
);
991 } while ((ret
& MDIO_CTRL1_RESET
) && --count
);
993 if (ret
& MDIO_CTRL1_RESET
)
996 /* Make sure the XPCS and SerDes are in compatible states */
997 return amd_xgbe_phy_xgmii_mode(phydev
);
1000 static int amd_xgbe_phy_config_init(struct phy_device
*phydev
)
1002 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
1004 /* Initialize supported features */
1005 phydev
->supported
= SUPPORTED_Autoneg
;
1006 phydev
->supported
|= SUPPORTED_Pause
| SUPPORTED_Asym_Pause
;
1007 phydev
->supported
|= SUPPORTED_Backplane
;
1008 phydev
->supported
|= SUPPORTED_10000baseKR_Full
|
1009 SUPPORTED_10000baseR_FEC
;
1010 switch (priv
->speed_set
) {
1011 case AMD_XGBE_PHY_SPEEDSET_1000_10000
:
1012 phydev
->supported
|= SUPPORTED_1000baseKX_Full
;
1014 case AMD_XGBE_PHY_SPEEDSET_2500_10000
:
1015 phydev
->supported
|= SUPPORTED_2500baseX_Full
;
1018 phydev
->advertising
= phydev
->supported
;
1020 /* Turn off and clear interrupts */
1021 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_INTMASK
, 0);
1022 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_INT
, 0);
1027 static int amd_xgbe_phy_setup_forced(struct phy_device
*phydev
)
1031 /* Disable auto-negotiation */
1032 ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_CTRL1
);
1036 ret
&= ~MDIO_AN_CTRL1_ENABLE
;
1037 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_CTRL1
, ret
);
1039 /* Validate/Set specified speed */
1040 switch (phydev
->speed
) {
1042 ret
= amd_xgbe_phy_xgmii_mode(phydev
);
1046 ret
= amd_xgbe_phy_gmii_2500_mode(phydev
);
1050 ret
= amd_xgbe_phy_gmii_mode(phydev
);
1060 /* Validate duplex mode */
1061 if (phydev
->duplex
!= DUPLEX_FULL
)
1065 phydev
->asym_pause
= 0;
1070 static int amd_xgbe_phy_config_aneg(struct phy_device
*phydev
)
1072 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
1073 u32 mmd_mask
= phydev
->c45_ids
.devices_in_package
;
1075 if (phydev
->autoneg
!= AUTONEG_ENABLE
)
1076 return amd_xgbe_phy_setup_forced(phydev
);
1078 /* Make sure we have the AN MMD present */
1079 if (!(mmd_mask
& MDIO_DEVS_AN
))
1082 /* Start/Restart the auto-negotiation state machine */
1083 mutex_lock(&priv
->an_mutex
);
1084 priv
->an_result
= AMD_XGBE_AN_READY
;
1085 priv
->an_state
= AMD_XGBE_AN_START
;
1086 priv
->kr_state
= AMD_XGBE_RX_READY
;
1087 priv
->kx_state
= AMD_XGBE_RX_READY
;
1088 mutex_unlock(&priv
->an_mutex
);
1090 queue_work(priv
->an_workqueue
, &priv
->an_work
);
1095 static int amd_xgbe_phy_aneg_done(struct phy_device
*phydev
)
1097 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
1098 enum amd_xgbe_phy_an state
;
1100 mutex_lock(&priv
->an_mutex
);
1101 state
= priv
->an_result
;
1102 mutex_unlock(&priv
->an_mutex
);
1104 return (state
== AMD_XGBE_AN_COMPLETE
);
1107 static int amd_xgbe_phy_update_link(struct phy_device
*phydev
)
1109 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
1110 enum amd_xgbe_phy_an state
;
1111 unsigned int check_again
, autoneg
;
1114 /* If we're doing auto-negotiation don't report link down */
1115 mutex_lock(&priv
->an_mutex
);
1116 state
= priv
->an_state
;
1117 mutex_unlock(&priv
->an_mutex
);
1119 if (state
!= AMD_XGBE_AN_READY
) {
1124 /* Since the device can be in the wrong mode when a link is
1125 * (re-)established (cable connected after the interface is
1126 * up, etc.), the link status may report no link. If there
1127 * is no link, try switching modes and checking the status
1128 * again if auto negotiation is enabled.
1130 check_again
= (phydev
->autoneg
== AUTONEG_ENABLE
) ? 1 : 0;
1132 /* Link status is latched low, so read once to clear
1133 * and then read again to get current state
1135 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_STAT1
);
1139 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_STAT1
);
1143 phydev
->link
= (ret
& MDIO_STAT1_LSTATUS
) ? 1 : 0;
1145 if (!phydev
->link
) {
1147 ret
= amd_xgbe_phy_switch_mode(phydev
);
1155 autoneg
= (phydev
->link
&& !priv
->link
) ? 1 : 0;
1156 priv
->link
= phydev
->link
;
1158 /* Link is (back) up, re-start auto-negotiation */
1159 ret
= amd_xgbe_phy_config_aneg(phydev
);
1167 static int amd_xgbe_phy_read_status(struct phy_device
*phydev
)
1169 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
1170 u32 mmd_mask
= phydev
->c45_ids
.devices_in_package
;
1171 int ret
, ad_ret
, lp_ret
;
1173 ret
= amd_xgbe_phy_update_link(phydev
);
1177 if ((phydev
->autoneg
== AUTONEG_ENABLE
) &&
1178 !priv
->parallel_detect
) {
1179 if (!(mmd_mask
& MDIO_DEVS_AN
))
1182 if (!amd_xgbe_phy_aneg_done(phydev
))
1185 /* Compare Advertisement and Link Partner register 1 */
1186 ad_ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_ADVERTISE
);
1189 lp_ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_LPA
);
1194 phydev
->pause
= (ad_ret
& 0x400) ? 1 : 0;
1195 phydev
->asym_pause
= (ad_ret
& 0x800) ? 1 : 0;
1197 /* Compare Advertisement and Link Partner register 2 */
1198 ad_ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
,
1199 MDIO_AN_ADVERTISE
+ 1);
1202 lp_ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_LPA
+ 1);
1207 if (ad_ret
& 0x80) {
1208 phydev
->speed
= SPEED_10000
;
1209 ret
= amd_xgbe_phy_set_mode(phydev
, AMD_XGBE_MODE_KR
);
1213 switch (priv
->speed_set
) {
1214 case AMD_XGBE_PHY_SPEEDSET_1000_10000
:
1215 phydev
->speed
= SPEED_1000
;
1218 case AMD_XGBE_PHY_SPEEDSET_2500_10000
:
1219 phydev
->speed
= SPEED_2500
;
1223 ret
= amd_xgbe_phy_set_mode(phydev
, AMD_XGBE_MODE_KX
);
1228 phydev
->duplex
= DUPLEX_FULL
;
1230 if (amd_xgbe_phy_in_kr_mode(phydev
)) {
1231 phydev
->speed
= SPEED_10000
;
1233 switch (priv
->speed_set
) {
1234 case AMD_XGBE_PHY_SPEEDSET_1000_10000
:
1235 phydev
->speed
= SPEED_1000
;
1238 case AMD_XGBE_PHY_SPEEDSET_2500_10000
:
1239 phydev
->speed
= SPEED_2500
;
1243 phydev
->duplex
= DUPLEX_FULL
;
1245 phydev
->asym_pause
= 0;
1251 static int amd_xgbe_phy_suspend(struct phy_device
*phydev
)
1255 mutex_lock(&phydev
->lock
);
1257 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
);
1261 ret
|= MDIO_CTRL1_LPOWER
;
1262 phy_write_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
, ret
);
1267 mutex_unlock(&phydev
->lock
);
1272 static int amd_xgbe_phy_resume(struct phy_device
*phydev
)
1276 mutex_lock(&phydev
->lock
);
1278 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
);
1282 ret
&= ~MDIO_CTRL1_LPOWER
;
1283 phy_write_mmd(phydev
, MDIO_MMD_PCS
, MDIO_CTRL1
, ret
);
1288 mutex_unlock(&phydev
->lock
);
1293 static int amd_xgbe_phy_probe(struct phy_device
*phydev
)
1295 struct amd_xgbe_phy_priv
*priv
;
1296 struct platform_device
*pdev
;
1299 const __be32
*property
;
1300 unsigned int speed_set
;
1303 if (!phydev
->dev
.of_node
)
1306 pdev
= of_find_device_by_node(phydev
->dev
.of_node
);
1311 wq_name
= kasprintf(GFP_KERNEL
, "%s-amd-xgbe-phy", phydev
->bus
->name
);
1317 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
1325 priv
->phydev
= phydev
;
1327 /* Get the device mmio areas */
1328 priv
->rxtx_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1329 priv
->rxtx_regs
= devm_ioremap_resource(dev
, priv
->rxtx_res
);
1330 if (IS_ERR(priv
->rxtx_regs
)) {
1331 dev_err(dev
, "rxtx ioremap failed\n");
1332 ret
= PTR_ERR(priv
->rxtx_regs
);
1336 priv
->sir0_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1337 priv
->sir0_regs
= devm_ioremap_resource(dev
, priv
->sir0_res
);
1338 if (IS_ERR(priv
->sir0_regs
)) {
1339 dev_err(dev
, "sir0 ioremap failed\n");
1340 ret
= PTR_ERR(priv
->sir0_regs
);
1344 priv
->sir1_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2);
1345 priv
->sir1_regs
= devm_ioremap_resource(dev
, priv
->sir1_res
);
1346 if (IS_ERR(priv
->sir1_regs
)) {
1347 dev_err(dev
, "sir1 ioremap failed\n");
1348 ret
= PTR_ERR(priv
->sir1_regs
);
1352 /* Get the device speed set property */
1354 property
= of_get_property(dev
->of_node
, XGBE_PHY_SPEEDSET_PROPERTY
,
1357 speed_set
= be32_to_cpu(*property
);
1359 switch (speed_set
) {
1361 priv
->speed_set
= AMD_XGBE_PHY_SPEEDSET_1000_10000
;
1364 priv
->speed_set
= AMD_XGBE_PHY_SPEEDSET_2500_10000
;
1367 dev_err(dev
, "invalid amd,speed-set property\n");
1374 mutex_init(&priv
->an_mutex
);
1375 INIT_WORK(&priv
->an_work
, amd_xgbe_an_state_machine
);
1376 priv
->an_workqueue
= create_singlethread_workqueue(wq_name
);
1377 if (!priv
->an_workqueue
) {
1382 phydev
->priv
= priv
;
1390 devm_iounmap(dev
, priv
->sir1_regs
);
1391 devm_release_mem_region(dev
, priv
->sir1_res
->start
,
1392 resource_size(priv
->sir1_res
));
1395 devm_iounmap(dev
, priv
->sir0_regs
);
1396 devm_release_mem_region(dev
, priv
->sir0_res
->start
,
1397 resource_size(priv
->sir0_res
));
1400 devm_iounmap(dev
, priv
->rxtx_regs
);
1401 devm_release_mem_region(dev
, priv
->rxtx_res
->start
,
1402 resource_size(priv
->rxtx_res
));
1405 devm_kfree(dev
, priv
);
1416 static void amd_xgbe_phy_remove(struct phy_device
*phydev
)
1418 struct amd_xgbe_phy_priv
*priv
= phydev
->priv
;
1419 struct device
*dev
= priv
->dev
;
1421 /* Stop any in process auto-negotiation */
1422 mutex_lock(&priv
->an_mutex
);
1423 priv
->an_state
= AMD_XGBE_AN_EXIT
;
1424 mutex_unlock(&priv
->an_mutex
);
1426 flush_workqueue(priv
->an_workqueue
);
1427 destroy_workqueue(priv
->an_workqueue
);
1429 /* Release resources */
1430 devm_iounmap(dev
, priv
->sir1_regs
);
1431 devm_release_mem_region(dev
, priv
->sir1_res
->start
,
1432 resource_size(priv
->sir1_res
));
1434 devm_iounmap(dev
, priv
->sir0_regs
);
1435 devm_release_mem_region(dev
, priv
->sir0_res
->start
,
1436 resource_size(priv
->sir0_res
));
1438 devm_iounmap(dev
, priv
->rxtx_regs
);
1439 devm_release_mem_region(dev
, priv
->rxtx_res
->start
,
1440 resource_size(priv
->rxtx_res
));
1442 devm_kfree(dev
, priv
);
1445 static int amd_xgbe_match_phy_device(struct phy_device
*phydev
)
1447 return phydev
->c45_ids
.device_ids
[MDIO_MMD_PCS
] == XGBE_PHY_ID
;
1450 static struct phy_driver amd_xgbe_phy_driver
[] = {
1452 .phy_id
= XGBE_PHY_ID
,
1453 .phy_id_mask
= XGBE_PHY_MASK
,
1454 .name
= "AMD XGBE PHY",
1456 .probe
= amd_xgbe_phy_probe
,
1457 .remove
= amd_xgbe_phy_remove
,
1458 .soft_reset
= amd_xgbe_phy_soft_reset
,
1459 .config_init
= amd_xgbe_phy_config_init
,
1460 .suspend
= amd_xgbe_phy_suspend
,
1461 .resume
= amd_xgbe_phy_resume
,
1462 .config_aneg
= amd_xgbe_phy_config_aneg
,
1463 .aneg_done
= amd_xgbe_phy_aneg_done
,
1464 .read_status
= amd_xgbe_phy_read_status
,
1465 .match_phy_device
= amd_xgbe_match_phy_device
,
1467 .owner
= THIS_MODULE
,
1472 module_phy_driver(amd_xgbe_phy_driver
);
1474 static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids
[] = {
1475 { XGBE_PHY_ID
, XGBE_PHY_MASK
},
1478 MODULE_DEVICE_TABLE(mdio
, amd_xgbe_phy_ids
);