2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/phy.h>
118 #include <linux/clk.h>
119 #include <linux/bitrev.h>
120 #include <linux/crc32.h>
123 #include "xgbe-common.h"
126 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data
*pdata
,
132 DBGPR("-->xgbe_usec_to_riwt\n");
134 rate
= clk_get_rate(pdata
->sysclock
);
137 * Convert the input usec value to the watchdog timer value. Each
138 * watchdog timer value is equivalent to 256 clock cycles.
139 * Calculate the required value as:
140 * ( usec * ( system_clock_mhz / 10^6 ) / 256
142 ret
= (usec
* (rate
/ 1000000)) / 256;
144 DBGPR("<--xgbe_usec_to_riwt\n");
149 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data
*pdata
,
155 DBGPR("-->xgbe_riwt_to_usec\n");
157 rate
= clk_get_rate(pdata
->sysclock
);
160 * Convert the input watchdog timer value to the usec value. Each
161 * watchdog timer value is equivalent to 256 clock cycles.
162 * Calculate the required value as:
163 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
165 ret
= (riwt
* 256) / (rate
/ 1000000);
167 DBGPR("<--xgbe_riwt_to_usec\n");
172 static int xgbe_config_pblx8(struct xgbe_prv_data
*pdata
)
174 struct xgbe_channel
*channel
;
177 channel
= pdata
->channel
;
178 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
179 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_CR
, PBLX8
,
185 static int xgbe_get_tx_pbl_val(struct xgbe_prv_data
*pdata
)
187 return XGMAC_DMA_IOREAD_BITS(pdata
->channel
, DMA_CH_TCR
, PBL
);
190 static int xgbe_config_tx_pbl_val(struct xgbe_prv_data
*pdata
)
192 struct xgbe_channel
*channel
;
195 channel
= pdata
->channel
;
196 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
197 if (!channel
->tx_ring
)
200 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, PBL
,
207 static int xgbe_get_rx_pbl_val(struct xgbe_prv_data
*pdata
)
209 return XGMAC_DMA_IOREAD_BITS(pdata
->channel
, DMA_CH_RCR
, PBL
);
212 static int xgbe_config_rx_pbl_val(struct xgbe_prv_data
*pdata
)
214 struct xgbe_channel
*channel
;
217 channel
= pdata
->channel
;
218 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
219 if (!channel
->rx_ring
)
222 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, PBL
,
229 static int xgbe_config_osp_mode(struct xgbe_prv_data
*pdata
)
231 struct xgbe_channel
*channel
;
234 channel
= pdata
->channel
;
235 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
236 if (!channel
->tx_ring
)
239 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, OSP
,
246 static int xgbe_config_rsf_mode(struct xgbe_prv_data
*pdata
, unsigned int val
)
250 for (i
= 0; i
< pdata
->hw_feat
.rx_q_cnt
; i
++)
251 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, RSF
, val
);
256 static int xgbe_config_tsf_mode(struct xgbe_prv_data
*pdata
, unsigned int val
)
260 for (i
= 0; i
< pdata
->hw_feat
.tx_q_cnt
; i
++)
261 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, TSF
, val
);
266 static int xgbe_config_rx_threshold(struct xgbe_prv_data
*pdata
,
271 for (i
= 0; i
< pdata
->hw_feat
.rx_q_cnt
; i
++)
272 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, RTC
, val
);
277 static int xgbe_config_tx_threshold(struct xgbe_prv_data
*pdata
,
282 for (i
= 0; i
< pdata
->hw_feat
.tx_q_cnt
; i
++)
283 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, TTC
, val
);
288 static int xgbe_config_rx_coalesce(struct xgbe_prv_data
*pdata
)
290 struct xgbe_channel
*channel
;
293 channel
= pdata
->channel
;
294 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
295 if (!channel
->rx_ring
)
298 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RIWT
, RWT
,
305 static int xgbe_config_tx_coalesce(struct xgbe_prv_data
*pdata
)
310 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data
*pdata
)
312 struct xgbe_channel
*channel
;
315 channel
= pdata
->channel
;
316 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
317 if (!channel
->rx_ring
)
320 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, RBSZ
,
325 static void xgbe_config_tso_mode(struct xgbe_prv_data
*pdata
)
327 struct xgbe_channel
*channel
;
330 channel
= pdata
->channel
;
331 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
332 if (!channel
->tx_ring
)
335 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, TSE
, 1);
339 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data
*pdata
)
341 unsigned int max_q_count
, q_count
;
342 unsigned int reg
, reg_val
;
345 /* Clear MTL flow control */
346 for (i
= 0; i
< pdata
->hw_feat
.rx_q_cnt
; i
++)
347 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, EHFC
, 0);
349 /* Clear MAC flow control */
350 max_q_count
= XGMAC_MAX_FLOW_CONTROL_QUEUES
;
351 q_count
= min_t(unsigned int, pdata
->hw_feat
.rx_q_cnt
, max_q_count
);
353 for (i
= 0; i
< q_count
; i
++) {
354 reg_val
= XGMAC_IOREAD(pdata
, reg
);
355 XGMAC_SET_BITS(reg_val
, MAC_Q0TFCR
, TFE
, 0);
356 XGMAC_IOWRITE(pdata
, reg
, reg_val
);
358 reg
+= MAC_QTFCR_INC
;
364 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data
*pdata
)
366 unsigned int max_q_count
, q_count
;
367 unsigned int reg
, reg_val
;
370 /* Set MTL flow control */
371 for (i
= 0; i
< pdata
->hw_feat
.rx_q_cnt
; i
++)
372 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, EHFC
, 1);
374 /* Set MAC flow control */
375 max_q_count
= XGMAC_MAX_FLOW_CONTROL_QUEUES
;
376 q_count
= min_t(unsigned int, pdata
->hw_feat
.rx_q_cnt
, max_q_count
);
378 for (i
= 0; i
< q_count
; i
++) {
379 reg_val
= XGMAC_IOREAD(pdata
, reg
);
381 /* Enable transmit flow control */
382 XGMAC_SET_BITS(reg_val
, MAC_Q0TFCR
, TFE
, 1);
384 XGMAC_SET_BITS(reg_val
, MAC_Q0TFCR
, PT
, 0xffff);
386 XGMAC_IOWRITE(pdata
, reg
, reg_val
);
388 reg
+= MAC_QTFCR_INC
;
394 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data
*pdata
)
396 XGMAC_IOWRITE_BITS(pdata
, MAC_RFCR
, RFE
, 0);
401 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data
*pdata
)
403 XGMAC_IOWRITE_BITS(pdata
, MAC_RFCR
, RFE
, 1);
408 static int xgbe_config_tx_flow_control(struct xgbe_prv_data
*pdata
)
411 xgbe_enable_tx_flow_control(pdata
);
413 xgbe_disable_tx_flow_control(pdata
);
418 static int xgbe_config_rx_flow_control(struct xgbe_prv_data
*pdata
)
421 xgbe_enable_rx_flow_control(pdata
);
423 xgbe_disable_rx_flow_control(pdata
);
428 static void xgbe_config_flow_control(struct xgbe_prv_data
*pdata
)
430 xgbe_config_tx_flow_control(pdata
);
431 xgbe_config_rx_flow_control(pdata
);
434 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data
*pdata
)
436 struct xgbe_channel
*channel
;
437 unsigned int dma_ch_isr
, dma_ch_ier
;
440 channel
= pdata
->channel
;
441 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
442 /* Clear all the interrupts which are set */
443 dma_ch_isr
= XGMAC_DMA_IOREAD(channel
, DMA_CH_SR
);
444 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_ch_isr
);
446 /* Clear all interrupt enable bits */
449 /* Enable following interrupts
450 * NIE - Normal Interrupt Summary Enable
451 * AIE - Abnormal Interrupt Summary Enable
452 * FBEE - Fatal Bus Error Enable
454 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, NIE
, 1);
455 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, AIE
, 1);
456 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, FBEE
, 1);
458 if (channel
->tx_ring
) {
459 /* Enable the following Tx interrupts
460 * TIE - Transmit Interrupt Enable (unless polling)
462 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TIE
, 1);
464 if (channel
->rx_ring
) {
465 /* Enable following Rx interrupts
466 * RBUE - Receive Buffer Unavailable Enable
467 * RIE - Receive Interrupt Enable
469 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RBUE
, 1);
470 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RIE
, 1);
473 XGMAC_DMA_IOWRITE(channel
, DMA_CH_IER
, dma_ch_ier
);
477 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data
*pdata
)
479 unsigned int mtl_q_isr
;
480 unsigned int q_count
, i
;
482 q_count
= max(pdata
->hw_feat
.tx_q_cnt
, pdata
->hw_feat
.rx_q_cnt
);
483 for (i
= 0; i
< q_count
; i
++) {
484 /* Clear all the interrupts which are set */
485 mtl_q_isr
= XGMAC_MTL_IOREAD(pdata
, i
, MTL_Q_ISR
);
486 XGMAC_MTL_IOWRITE(pdata
, i
, MTL_Q_ISR
, mtl_q_isr
);
488 /* No MTL interrupts to be enabled */
489 XGMAC_MTL_IOWRITE(pdata
, i
, MTL_Q_IER
, 0);
493 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data
*pdata
)
495 /* No MAC interrupts to be enabled */
496 XGMAC_IOWRITE(pdata
, MAC_IER
, 0);
498 /* Enable all counter interrupts */
499 XGMAC_IOWRITE_BITS(pdata
, MMC_RIER
, ALL_INTERRUPTS
, 0xff);
500 XGMAC_IOWRITE_BITS(pdata
, MMC_TIER
, ALL_INTERRUPTS
, 0xff);
503 static int xgbe_set_gmii_speed(struct xgbe_prv_data
*pdata
)
505 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, SS
, 0x3);
510 static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data
*pdata
)
512 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, SS
, 0x2);
517 static int xgbe_set_xgmii_speed(struct xgbe_prv_data
*pdata
)
519 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, SS
, 0);
524 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data
*pdata
,
527 unsigned int val
= enable
? 1 : 0;
529 if (XGMAC_IOREAD_BITS(pdata
, MAC_PFR
, PR
) == val
)
532 DBGPR(" %s promiscuous mode\n", enable
? "entering" : "leaving");
533 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, PR
, val
);
538 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data
*pdata
,
541 unsigned int val
= enable
? 1 : 0;
543 if (XGMAC_IOREAD_BITS(pdata
, MAC_PFR
, PM
) == val
)
546 DBGPR(" %s allmulti mode\n", enable
? "entering" : "leaving");
547 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, PM
, val
);
552 static void xgbe_set_mac_reg(struct xgbe_prv_data
*pdata
,
553 struct netdev_hw_addr
*ha
, unsigned int *mac_reg
)
555 unsigned int mac_addr_hi
, mac_addr_lo
;
562 mac_addr
= (u8
*)&mac_addr_lo
;
563 mac_addr
[0] = ha
->addr
[0];
564 mac_addr
[1] = ha
->addr
[1];
565 mac_addr
[2] = ha
->addr
[2];
566 mac_addr
[3] = ha
->addr
[3];
567 mac_addr
= (u8
*)&mac_addr_hi
;
568 mac_addr
[0] = ha
->addr
[4];
569 mac_addr
[1] = ha
->addr
[5];
571 DBGPR(" adding mac address %pM at 0x%04x\n", ha
->addr
,
574 XGMAC_SET_BITS(mac_addr_hi
, MAC_MACA1HR
, AE
, 1);
577 XGMAC_IOWRITE(pdata
, *mac_reg
, mac_addr_hi
);
578 *mac_reg
+= MAC_MACA_INC
;
579 XGMAC_IOWRITE(pdata
, *mac_reg
, mac_addr_lo
);
580 *mac_reg
+= MAC_MACA_INC
;
583 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data
*pdata
)
585 struct net_device
*netdev
= pdata
->netdev
;
586 struct netdev_hw_addr
*ha
;
587 unsigned int mac_reg
;
588 unsigned int addn_macs
;
590 mac_reg
= MAC_MACA1HR
;
591 addn_macs
= pdata
->hw_feat
.addn_mac
;
593 if (netdev_uc_count(netdev
) > addn_macs
) {
594 xgbe_set_promiscuous_mode(pdata
, 1);
596 netdev_for_each_uc_addr(ha
, netdev
) {
597 xgbe_set_mac_reg(pdata
, ha
, &mac_reg
);
601 if (netdev_mc_count(netdev
) > addn_macs
) {
602 xgbe_set_all_multicast_mode(pdata
, 1);
604 netdev_for_each_mc_addr(ha
, netdev
) {
605 xgbe_set_mac_reg(pdata
, ha
, &mac_reg
);
611 /* Clear remaining additional MAC address entries */
613 xgbe_set_mac_reg(pdata
, NULL
, &mac_reg
);
616 static void xgbe_set_mac_hash_table(struct xgbe_prv_data
*pdata
)
618 struct net_device
*netdev
= pdata
->netdev
;
619 struct netdev_hw_addr
*ha
;
620 unsigned int hash_reg
;
621 unsigned int hash_table_shift
, hash_table_count
;
622 u32 hash_table
[XGBE_MAC_HASH_TABLE_SIZE
];
626 hash_table_shift
= 26 - (pdata
->hw_feat
.hash_table_size
>> 7);
627 hash_table_count
= pdata
->hw_feat
.hash_table_size
/ 32;
628 memset(hash_table
, 0, sizeof(hash_table
));
630 /* Build the MAC Hash Table register values */
631 netdev_for_each_uc_addr(ha
, netdev
) {
632 crc
= bitrev32(~crc32_le(~0, ha
->addr
, ETH_ALEN
));
633 crc
>>= hash_table_shift
;
634 hash_table
[crc
>> 5] |= (1 << (crc
& 0x1f));
637 netdev_for_each_mc_addr(ha
, netdev
) {
638 crc
= bitrev32(~crc32_le(~0, ha
->addr
, ETH_ALEN
));
639 crc
>>= hash_table_shift
;
640 hash_table
[crc
>> 5] |= (1 << (crc
& 0x1f));
643 /* Set the MAC Hash Table registers */
645 for (i
= 0; i
< hash_table_count
; i
++) {
646 XGMAC_IOWRITE(pdata
, hash_reg
, hash_table
[i
]);
647 hash_reg
+= MAC_HTR_INC
;
651 static int xgbe_add_mac_addresses(struct xgbe_prv_data
*pdata
)
653 if (pdata
->hw_feat
.hash_table_size
)
654 xgbe_set_mac_hash_table(pdata
);
656 xgbe_set_mac_addn_addrs(pdata
);
661 static int xgbe_set_mac_address(struct xgbe_prv_data
*pdata
, u8
*addr
)
663 unsigned int mac_addr_hi
, mac_addr_lo
;
665 mac_addr_hi
= (addr
[5] << 8) | (addr
[4] << 0);
666 mac_addr_lo
= (addr
[3] << 24) | (addr
[2] << 16) |
667 (addr
[1] << 8) | (addr
[0] << 0);
669 XGMAC_IOWRITE(pdata
, MAC_MACA0HR
, mac_addr_hi
);
670 XGMAC_IOWRITE(pdata
, MAC_MACA0LR
, mac_addr_lo
);
675 static int xgbe_read_mmd_regs(struct xgbe_prv_data
*pdata
, int prtad
,
678 unsigned int mmd_address
;
681 if (mmd_reg
& MII_ADDR_C45
)
682 mmd_address
= mmd_reg
& ~MII_ADDR_C45
;
684 mmd_address
= (pdata
->mdio_mmd
<< 16) | (mmd_reg
& 0xffff);
686 /* The PCS registers are accessed using mmio. The underlying APB3
687 * management interface uses indirect addressing to access the MMD
688 * register sets. This requires accessing of the PCS register in two
689 * phases, an address phase and a data phase.
691 * The mmio interface is based on 32-bit offsets and values. All
692 * register offsets must therefore be adjusted by left shifting the
693 * offset 2 bits and reading 32 bits of data.
695 mutex_lock(&pdata
->xpcs_mutex
);
696 XPCS_IOWRITE(pdata
, PCS_MMD_SELECT
<< 2, mmd_address
>> 8);
697 mmd_data
= XPCS_IOREAD(pdata
, (mmd_address
& 0xff) << 2);
698 mutex_unlock(&pdata
->xpcs_mutex
);
703 static void xgbe_write_mmd_regs(struct xgbe_prv_data
*pdata
, int prtad
,
704 int mmd_reg
, int mmd_data
)
706 unsigned int mmd_address
;
708 if (mmd_reg
& MII_ADDR_C45
)
709 mmd_address
= mmd_reg
& ~MII_ADDR_C45
;
711 mmd_address
= (pdata
->mdio_mmd
<< 16) | (mmd_reg
& 0xffff);
713 /* The PCS registers are accessed using mmio. The underlying APB3
714 * management interface uses indirect addressing to access the MMD
715 * register sets. This requires accessing of the PCS register in two
716 * phases, an address phase and a data phase.
718 * The mmio interface is based on 32-bit offsets and values. All
719 * register offsets must therefore be adjusted by left shifting the
720 * offset 2 bits and reading 32 bits of data.
722 mutex_lock(&pdata
->xpcs_mutex
);
723 XPCS_IOWRITE(pdata
, PCS_MMD_SELECT
<< 2, mmd_address
>> 8);
724 XPCS_IOWRITE(pdata
, (mmd_address
& 0xff) << 2, mmd_data
);
725 mutex_unlock(&pdata
->xpcs_mutex
);
728 static int xgbe_tx_complete(struct xgbe_ring_desc
*rdesc
)
730 return !XGMAC_GET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, OWN
);
733 static int xgbe_disable_rx_csum(struct xgbe_prv_data
*pdata
)
735 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, IPC
, 0);
740 static int xgbe_enable_rx_csum(struct xgbe_prv_data
*pdata
)
742 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, IPC
, 1);
747 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data
*pdata
)
749 /* Put the VLAN tag in the Rx descriptor */
750 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, EVLRXS
, 1);
752 /* Don't check the VLAN type */
753 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, DOVLTC
, 1);
755 /* Check only C-TAG (0x8100) packets */
756 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, ERSVLM
, 0);
758 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
759 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, ESVL
, 0);
761 /* Enable VLAN tag stripping */
762 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, EVLS
, 0x3);
767 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data
*pdata
)
769 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, EVLS
, 0);
774 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data
*pdata
)
776 /* Enable VLAN filtering */
777 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, VTFE
, 1);
779 /* Enable VLAN Hash Table filtering */
780 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, VTHM
, 1);
782 /* Disable VLAN tag inverse matching */
783 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, VTIM
, 0);
785 /* Only filter on the lower 12-bits of the VLAN tag */
786 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, ETV
, 1);
788 /* In order for the VLAN Hash Table filtering to be effective,
789 * the VLAN tag identifier in the VLAN Tag Register must not
790 * be zero. Set the VLAN tag identifier to "1" to enable the
791 * VLAN Hash Table filtering. This implies that a VLAN tag of
792 * 1 will always pass filtering.
794 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, VL
, 1);
799 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data
*pdata
)
801 /* Disable VLAN filtering */
802 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, VTFE
, 0);
808 #define CRCPOLY_LE 0xedb88320
810 static u32
xgbe_vid_crc32_le(__le16 vid_le
)
812 u32 poly
= CRCPOLY_LE
;
815 unsigned char *data
= (unsigned char *)&vid_le
;
816 unsigned char data_byte
= 0;
819 bits
= get_bitmask_order(VLAN_VID_MASK
);
820 for (i
= 0; i
< bits
; i
++) {
822 data_byte
= data
[i
/ 8];
824 temp
= ((crc
& 1) ^ data_byte
) & 1;
835 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data
*pdata
)
840 u16 vlan_hash_table
= 0;
842 /* Generate the VLAN Hash Table value */
843 for_each_set_bit(vid
, pdata
->active_vlans
, VLAN_N_VID
) {
844 /* Get the CRC32 value of the VLAN ID */
845 vid_le
= cpu_to_le16(vid
);
846 crc
= bitrev32(~xgbe_vid_crc32_le(vid_le
)) >> 28;
848 vlan_hash_table
|= (1 << crc
);
851 /* Set the VLAN Hash Table filtering register */
852 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANHTR
, VLHT
, vlan_hash_table
);
857 static void xgbe_tx_desc_reset(struct xgbe_ring_data
*rdata
)
859 struct xgbe_ring_desc
*rdesc
= rdata
->rdesc
;
861 /* Reset the Tx descriptor
862 * Set buffer 1 (lo) address to zero
863 * Set buffer 1 (hi) address to zero
864 * Reset all other control bits (IC, TTSE, B2L & B1L)
865 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
873 static void xgbe_tx_desc_init(struct xgbe_channel
*channel
)
875 struct xgbe_ring
*ring
= channel
->tx_ring
;
876 struct xgbe_ring_data
*rdata
;
877 struct xgbe_ring_desc
*rdesc
;
879 int start_index
= ring
->cur
;
881 DBGPR("-->tx_desc_init\n");
883 /* Initialze all descriptors */
884 for (i
= 0; i
< ring
->rdesc_count
; i
++) {
885 rdata
= XGBE_GET_DESC_DATA(ring
, i
);
886 rdesc
= rdata
->rdesc
;
888 /* Initialize Tx descriptor
889 * Set buffer 1 (lo) address to zero
890 * Set buffer 1 (hi) address to zero
891 * Reset all other control bits (IC, TTSE, B2L & B1L)
892 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
901 /* Make sure everything is written to the descriptor(s) before
902 * telling the device about them
906 /* Update the total number of Tx descriptors */
907 XGMAC_DMA_IOWRITE(channel
, DMA_CH_TDRLR
, ring
->rdesc_count
- 1);
909 /* Update the starting address of descriptor ring */
910 rdata
= XGBE_GET_DESC_DATA(ring
, start_index
);
911 XGMAC_DMA_IOWRITE(channel
, DMA_CH_TDLR_HI
,
912 upper_32_bits(rdata
->rdesc_dma
));
913 XGMAC_DMA_IOWRITE(channel
, DMA_CH_TDLR_LO
,
914 lower_32_bits(rdata
->rdesc_dma
));
916 DBGPR("<--tx_desc_init\n");
919 static void xgbe_rx_desc_reset(struct xgbe_ring_data
*rdata
)
921 struct xgbe_ring_desc
*rdesc
= rdata
->rdesc
;
923 /* Reset the Rx descriptor
924 * Set buffer 1 (lo) address to dma address (lo)
925 * Set buffer 1 (hi) address to dma address (hi)
926 * Set buffer 2 (lo) address to zero
927 * Set buffer 2 (hi) address to zero and set control bits
930 rdesc
->desc0
= cpu_to_le32(lower_32_bits(rdata
->skb_dma
));
931 rdesc
->desc1
= cpu_to_le32(upper_32_bits(rdata
->skb_dma
));
935 if (rdata
->interrupt
)
936 XGMAC_SET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, INTE
, 1);
938 /* Since the Rx DMA engine is likely running, make sure everything
939 * is written to the descriptor(s) before setting the OWN bit
944 XGMAC_SET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, OWN
, 1);
946 /* Make sure ownership is written to the descriptor */
950 static void xgbe_rx_desc_init(struct xgbe_channel
*channel
)
952 struct xgbe_prv_data
*pdata
= channel
->pdata
;
953 struct xgbe_ring
*ring
= channel
->rx_ring
;
954 struct xgbe_ring_data
*rdata
;
955 struct xgbe_ring_desc
*rdesc
;
956 unsigned int start_index
= ring
->cur
;
957 unsigned int rx_coalesce
, rx_frames
;
960 DBGPR("-->rx_desc_init\n");
962 rx_coalesce
= (pdata
->rx_riwt
|| pdata
->rx_frames
) ? 1 : 0;
963 rx_frames
= pdata
->rx_frames
;
965 /* Initialize all descriptors */
966 for (i
= 0; i
< ring
->rdesc_count
; i
++) {
967 rdata
= XGBE_GET_DESC_DATA(ring
, i
);
968 rdesc
= rdata
->rdesc
;
970 /* Initialize Rx descriptor
971 * Set buffer 1 (lo) address to dma address (lo)
972 * Set buffer 1 (hi) address to dma address (hi)
973 * Set buffer 2 (lo) address to zero
974 * Set buffer 2 (hi) address to zero and set control
975 * bits OWN and INTE appropriateley
977 rdesc
->desc0
= cpu_to_le32(lower_32_bits(rdata
->skb_dma
));
978 rdesc
->desc1
= cpu_to_le32(upper_32_bits(rdata
->skb_dma
));
981 XGMAC_SET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, OWN
, 1);
982 XGMAC_SET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, INTE
, 1);
983 rdata
->interrupt
= 1;
984 if (rx_coalesce
&& (!rx_frames
|| ((i
+ 1) % rx_frames
))) {
985 /* Clear interrupt on completion bit */
986 XGMAC_SET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, INTE
,
988 rdata
->interrupt
= 0;
992 /* Make sure everything is written to the descriptors before
993 * telling the device about them
997 /* Update the total number of Rx descriptors */
998 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDRLR
, ring
->rdesc_count
- 1);
1000 /* Update the starting address of descriptor ring */
1001 rdata
= XGBE_GET_DESC_DATA(ring
, start_index
);
1002 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDLR_HI
,
1003 upper_32_bits(rdata
->rdesc_dma
));
1004 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDLR_LO
,
1005 lower_32_bits(rdata
->rdesc_dma
));
1007 /* Update the Rx Descriptor Tail Pointer */
1008 rdata
= XGBE_GET_DESC_DATA(ring
, start_index
+ ring
->rdesc_count
- 1);
1009 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDTR_LO
,
1010 lower_32_bits(rdata
->rdesc_dma
));
1012 DBGPR("<--rx_desc_init\n");
1015 static void xgbe_pre_xmit(struct xgbe_channel
*channel
)
1017 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1018 struct xgbe_ring
*ring
= channel
->tx_ring
;
1019 struct xgbe_ring_data
*rdata
;
1020 struct xgbe_ring_desc
*rdesc
;
1021 struct xgbe_packet_data
*packet
= &ring
->packet_data
;
1022 unsigned int csum
, tso
, vlan
;
1023 unsigned int tso_context
, vlan_context
;
1024 unsigned int tx_coalesce
, tx_frames
;
1025 int start_index
= ring
->cur
;
1028 DBGPR("-->xgbe_pre_xmit\n");
1030 csum
= XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1032 tso
= XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1034 vlan
= XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1037 if (tso
&& (packet
->mss
!= ring
->tx
.cur_mss
))
1042 if (vlan
&& (packet
->vlan_ctag
!= ring
->tx
.cur_vlan_ctag
))
1047 tx_coalesce
= (pdata
->tx_usecs
|| pdata
->tx_frames
) ? 1 : 0;
1048 tx_frames
= pdata
->tx_frames
;
1049 if (tx_coalesce
&& !channel
->tx_timer_active
)
1050 ring
->coalesce_count
= 0;
1052 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
1053 rdesc
= rdata
->rdesc
;
1055 /* Create a context descriptor if this is a TSO packet */
1056 if (tso_context
|| vlan_context
) {
1058 DBGPR(" TSO context descriptor, mss=%u\n",
1061 /* Set the MSS size */
1062 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_CONTEXT_DESC2
,
1065 /* Mark it as a CONTEXT descriptor */
1066 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_CONTEXT_DESC3
,
1069 /* Indicate this descriptor contains the MSS */
1070 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_CONTEXT_DESC3
,
1073 ring
->tx
.cur_mss
= packet
->mss
;
1077 DBGPR(" VLAN context descriptor, ctag=%u\n",
1080 /* Mark it as a CONTEXT descriptor */
1081 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_CONTEXT_DESC3
,
1084 /* Set the VLAN tag */
1085 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_CONTEXT_DESC3
,
1086 VT
, packet
->vlan_ctag
);
1088 /* Indicate this descriptor contains the VLAN tag */
1089 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_CONTEXT_DESC3
,
1092 ring
->tx
.cur_vlan_ctag
= packet
->vlan_ctag
;
1096 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
1097 rdesc
= rdata
->rdesc
;
1100 /* Update buffer address (for TSO this is the header) */
1101 rdesc
->desc0
= cpu_to_le32(lower_32_bits(rdata
->skb_dma
));
1102 rdesc
->desc1
= cpu_to_le32(upper_32_bits(rdata
->skb_dma
));
1104 /* Update the buffer length */
1105 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, HL_B1L
,
1106 rdata
->skb_dma_len
);
1108 /* VLAN tag insertion check */
1110 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, VTIR
,
1111 TX_NORMAL_DESC2_VLAN_INSERT
);
1113 /* Set IC bit based on Tx coalescing settings */
1114 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, IC
, 1);
1115 if (tx_coalesce
&& (!tx_frames
||
1116 (++ring
->coalesce_count
% tx_frames
)))
1118 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, IC
, 0);
1120 /* Mark it as First Descriptor */
1121 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, FD
, 1);
1123 /* Mark it as a NORMAL descriptor */
1124 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, CTXT
, 0);
1126 /* Set OWN bit if not the first descriptor */
1127 if (ring
->cur
!= start_index
)
1128 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, OWN
, 1);
1132 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, TSE
, 1);
1133 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, TCPPL
,
1134 packet
->tcp_payload_len
);
1135 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, TCPHDRLEN
,
1136 packet
->tcp_header_len
/ 4);
1138 /* Enable CRC and Pad Insertion */
1139 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, CPC
, 0);
1141 /* Enable HW CSUM */
1143 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
,
1146 /* Set the total length to be transmitted */
1147 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, FL
,
1151 for (i
= ring
->cur
- start_index
+ 1; i
< packet
->rdesc_count
; i
++) {
1153 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
1154 rdesc
= rdata
->rdesc
;
1156 /* Update buffer address */
1157 rdesc
->desc0
= cpu_to_le32(lower_32_bits(rdata
->skb_dma
));
1158 rdesc
->desc1
= cpu_to_le32(upper_32_bits(rdata
->skb_dma
));
1160 /* Update the buffer length */
1161 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, HL_B1L
,
1162 rdata
->skb_dma_len
);
1164 /* Set IC bit based on Tx coalescing settings */
1165 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, IC
, 1);
1166 if (tx_coalesce
&& (!tx_frames
||
1167 (++ring
->coalesce_count
% tx_frames
)))
1169 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, IC
, 0);
1172 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, OWN
, 1);
1174 /* Mark it as NORMAL descriptor */
1175 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, CTXT
, 0);
1177 /* Enable HW CSUM */
1179 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
,
1183 /* Set LAST bit for the last descriptor */
1184 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, LD
, 1);
1186 /* In case the Tx DMA engine is running, make sure everything
1187 * is written to the descriptor(s) before setting the OWN bit
1188 * for the first descriptor
1192 /* Set OWN bit for the first descriptor */
1193 rdata
= XGBE_GET_DESC_DATA(ring
, start_index
);
1194 rdesc
= rdata
->rdesc
;
1195 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, OWN
, 1);
1197 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
1198 xgbe_dump_tx_desc(ring
, start_index
, packet
->rdesc_count
, 1);
1201 /* Make sure ownership is written to the descriptor */
1204 /* Issue a poll command to Tx DMA by writing address
1205 * of next immediate free descriptor */
1207 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
1208 XGMAC_DMA_IOWRITE(channel
, DMA_CH_TDTR_LO
,
1209 lower_32_bits(rdata
->rdesc_dma
));
1211 /* Start the Tx coalescing timer */
1212 if (tx_coalesce
&& !channel
->tx_timer_active
) {
1213 channel
->tx_timer_active
= 1;
1214 hrtimer_start(&channel
->tx_timer
,
1215 ktime_set(0, pdata
->tx_usecs
* NSEC_PER_USEC
),
1219 DBGPR(" %s: descriptors %u to %u written\n",
1220 channel
->name
, start_index
& (ring
->rdesc_count
- 1),
1221 (ring
->cur
- 1) & (ring
->rdesc_count
- 1));
1223 DBGPR("<--xgbe_pre_xmit\n");
1226 static int xgbe_dev_read(struct xgbe_channel
*channel
)
1228 struct xgbe_ring
*ring
= channel
->rx_ring
;
1229 struct xgbe_ring_data
*rdata
;
1230 struct xgbe_ring_desc
*rdesc
;
1231 struct xgbe_packet_data
*packet
= &ring
->packet_data
;
1232 struct net_device
*netdev
= channel
->pdata
->netdev
;
1233 unsigned int err
, etlt
;
1235 DBGPR("-->xgbe_dev_read: cur = %d\n", ring
->cur
);
1237 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
1238 rdesc
= rdata
->rdesc
;
1240 /* Check for data availability */
1241 if (XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, OWN
))
1244 #ifdef XGMAC_ENABLE_RX_DESC_DUMP
1245 xgbe_dump_rx_desc(ring
, rdesc
, ring
->cur
);
1248 /* Get the packet length */
1249 rdata
->len
= XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, PL
);
1251 if (!XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, LD
)) {
1252 /* Not all the data has been transferred for this packet */
1253 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1258 /* This is the last of the data for this packet */
1259 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1262 /* Set checksum done indicator as appropriate */
1263 if (channel
->pdata
->netdev
->features
& NETIF_F_RXCSUM
)
1264 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1267 /* Check for errors (only valid in last descriptor) */
1268 err
= XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, ES
);
1269 etlt
= XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, ETLT
);
1270 DBGPR(" err=%u, etlt=%#x\n", err
, etlt
);
1272 if (!err
|| (err
&& !etlt
)) {
1273 if ((etlt
== 0x09) &&
1274 (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
1275 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1277 packet
->vlan_ctag
= XGMAC_GET_BITS_LE(rdesc
->desc0
,
1280 DBGPR(" vlan-ctag=0x%04x\n", packet
->vlan_ctag
);
1283 if ((etlt
== 0x05) || (etlt
== 0x06))
1284 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1287 XGMAC_SET_BITS(packet
->errors
, RX_PACKET_ERRORS
,
1291 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel
->name
,
1292 ring
->cur
& (ring
->rdesc_count
- 1), ring
->cur
);
1297 static int xgbe_is_context_desc(struct xgbe_ring_desc
*rdesc
)
1299 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1300 return XGMAC_GET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, CTXT
);
1303 static int xgbe_is_last_desc(struct xgbe_ring_desc
*rdesc
)
1305 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1306 return XGMAC_GET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, LD
);
1309 static int xgbe_enable_int(struct xgbe_channel
*channel
,
1310 enum xgbe_int int_id
)
1312 unsigned int dma_ch_ier
;
1314 dma_ch_ier
= XGMAC_DMA_IOREAD(channel
, DMA_CH_IER
);
1317 case XGMAC_INT_DMA_CH_SR_TI
:
1318 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TIE
, 1);
1320 case XGMAC_INT_DMA_CH_SR_TPS
:
1321 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TXSE
, 1);
1323 case XGMAC_INT_DMA_CH_SR_TBU
:
1324 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TBUE
, 1);
1326 case XGMAC_INT_DMA_CH_SR_RI
:
1327 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RIE
, 1);
1329 case XGMAC_INT_DMA_CH_SR_RBU
:
1330 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RBUE
, 1);
1332 case XGMAC_INT_DMA_CH_SR_RPS
:
1333 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RSE
, 1);
1335 case XGMAC_INT_DMA_CH_SR_TI_RI
:
1336 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TIE
, 1);
1337 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RIE
, 1);
1339 case XGMAC_INT_DMA_CH_SR_FBE
:
1340 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, FBEE
, 1);
1342 case XGMAC_INT_DMA_ALL
:
1343 dma_ch_ier
|= channel
->saved_ier
;
1349 XGMAC_DMA_IOWRITE(channel
, DMA_CH_IER
, dma_ch_ier
);
1354 static int xgbe_disable_int(struct xgbe_channel
*channel
,
1355 enum xgbe_int int_id
)
1357 unsigned int dma_ch_ier
;
1359 dma_ch_ier
= XGMAC_DMA_IOREAD(channel
, DMA_CH_IER
);
1362 case XGMAC_INT_DMA_CH_SR_TI
:
1363 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TIE
, 0);
1365 case XGMAC_INT_DMA_CH_SR_TPS
:
1366 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TXSE
, 0);
1368 case XGMAC_INT_DMA_CH_SR_TBU
:
1369 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TBUE
, 0);
1371 case XGMAC_INT_DMA_CH_SR_RI
:
1372 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RIE
, 0);
1374 case XGMAC_INT_DMA_CH_SR_RBU
:
1375 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RBUE
, 0);
1377 case XGMAC_INT_DMA_CH_SR_RPS
:
1378 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RSE
, 0);
1380 case XGMAC_INT_DMA_CH_SR_TI_RI
:
1381 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TIE
, 0);
1382 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RIE
, 0);
1384 case XGMAC_INT_DMA_CH_SR_FBE
:
1385 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, FBEE
, 0);
1387 case XGMAC_INT_DMA_ALL
:
1388 channel
->saved_ier
= dma_ch_ier
& XGBE_DMA_INTERRUPT_MASK
;
1389 dma_ch_ier
&= ~XGBE_DMA_INTERRUPT_MASK
;
1395 XGMAC_DMA_IOWRITE(channel
, DMA_CH_IER
, dma_ch_ier
);
1400 static int xgbe_exit(struct xgbe_prv_data
*pdata
)
1402 unsigned int count
= 2000;
1404 DBGPR("-->xgbe_exit\n");
1406 /* Issue a software reset */
1407 XGMAC_IOWRITE_BITS(pdata
, DMA_MR
, SWR
, 1);
1408 usleep_range(10, 15);
1410 /* Poll Until Poll Condition */
1411 while (count
-- && XGMAC_IOREAD_BITS(pdata
, DMA_MR
, SWR
))
1412 usleep_range(500, 600);
1417 DBGPR("<--xgbe_exit\n");
1422 static int xgbe_flush_tx_queues(struct xgbe_prv_data
*pdata
)
1424 unsigned int i
, count
;
1426 for (i
= 0; i
< pdata
->hw_feat
.tx_q_cnt
; i
++)
1427 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, FTQ
, 1);
1429 /* Poll Until Poll Condition */
1430 for (i
= 0; i
< pdata
->hw_feat
.tx_q_cnt
; i
++) {
1432 while (count
-- && XGMAC_MTL_IOREAD_BITS(pdata
, i
,
1434 usleep_range(500, 600);
1443 static void xgbe_config_dma_bus(struct xgbe_prv_data
*pdata
)
1445 /* Set enhanced addressing mode */
1446 XGMAC_IOWRITE_BITS(pdata
, DMA_SBMR
, EAME
, 1);
1448 /* Set the System Bus mode */
1449 XGMAC_IOWRITE_BITS(pdata
, DMA_SBMR
, UNDEF
, 1);
1450 XGMAC_IOWRITE_BITS(pdata
, DMA_SBMR
, BLEN_256
, 1);
1453 static void xgbe_config_dma_cache(struct xgbe_prv_data
*pdata
)
1455 unsigned int arcache
, awcache
;
1458 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, DRC
, pdata
->arcache
);
1459 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, DRD
, pdata
->axdomain
);
1460 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, TEC
, pdata
->arcache
);
1461 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, TED
, pdata
->axdomain
);
1462 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, THC
, pdata
->arcache
);
1463 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, THD
, pdata
->axdomain
);
1464 XGMAC_IOWRITE(pdata
, DMA_AXIARCR
, arcache
);
1467 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, DWC
, pdata
->awcache
);
1468 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, DWD
, pdata
->axdomain
);
1469 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, RPC
, pdata
->awcache
);
1470 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, RPD
, pdata
->axdomain
);
1471 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, RHC
, pdata
->awcache
);
1472 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, RHD
, pdata
->axdomain
);
1473 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, TDC
, pdata
->awcache
);
1474 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, TDD
, pdata
->axdomain
);
1475 XGMAC_IOWRITE(pdata
, DMA_AXIAWCR
, awcache
);
1478 static void xgbe_config_mtl_mode(struct xgbe_prv_data
*pdata
)
1482 /* Set Tx to weighted round robin scheduling algorithm (when
1483 * traffic class is using ETS algorithm)
1485 XGMAC_IOWRITE_BITS(pdata
, MTL_OMR
, ETSALG
, MTL_ETSALG_WRR
);
1487 /* Set Tx traffic classes to strict priority algorithm */
1488 for (i
= 0; i
< XGBE_TC_CNT
; i
++)
1489 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_TC_ETSCR
, TSA
, MTL_TSA_SP
);
1491 /* Set Rx to strict priority algorithm */
1492 XGMAC_IOWRITE_BITS(pdata
, MTL_OMR
, RAA
, MTL_RAA_SP
);
1495 static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size
,
1496 unsigned char queue_count
)
1498 unsigned int q_fifo_size
= 0;
1499 enum xgbe_mtl_fifo_size p_fifo
= XGMAC_MTL_FIFO_SIZE_256
;
1501 /* Calculate Tx/Rx fifo share per queue */
1502 switch (fifo_size
) {
1504 q_fifo_size
= XGBE_FIFO_SIZE_B(128);
1507 q_fifo_size
= XGBE_FIFO_SIZE_B(256);
1510 q_fifo_size
= XGBE_FIFO_SIZE_B(512);
1513 q_fifo_size
= XGBE_FIFO_SIZE_KB(1);
1516 q_fifo_size
= XGBE_FIFO_SIZE_KB(2);
1519 q_fifo_size
= XGBE_FIFO_SIZE_KB(4);
1522 q_fifo_size
= XGBE_FIFO_SIZE_KB(8);
1525 q_fifo_size
= XGBE_FIFO_SIZE_KB(16);
1528 q_fifo_size
= XGBE_FIFO_SIZE_KB(32);
1531 q_fifo_size
= XGBE_FIFO_SIZE_KB(64);
1534 q_fifo_size
= XGBE_FIFO_SIZE_KB(128);
1537 q_fifo_size
= XGBE_FIFO_SIZE_KB(256);
1540 q_fifo_size
= q_fifo_size
/ queue_count
;
1542 /* Set the queue fifo size programmable value */
1543 if (q_fifo_size
>= XGBE_FIFO_SIZE_KB(256))
1544 p_fifo
= XGMAC_MTL_FIFO_SIZE_256K
;
1545 else if (q_fifo_size
>= XGBE_FIFO_SIZE_KB(128))
1546 p_fifo
= XGMAC_MTL_FIFO_SIZE_128K
;
1547 else if (q_fifo_size
>= XGBE_FIFO_SIZE_KB(64))
1548 p_fifo
= XGMAC_MTL_FIFO_SIZE_64K
;
1549 else if (q_fifo_size
>= XGBE_FIFO_SIZE_KB(32))
1550 p_fifo
= XGMAC_MTL_FIFO_SIZE_32K
;
1551 else if (q_fifo_size
>= XGBE_FIFO_SIZE_KB(16))
1552 p_fifo
= XGMAC_MTL_FIFO_SIZE_16K
;
1553 else if (q_fifo_size
>= XGBE_FIFO_SIZE_KB(8))
1554 p_fifo
= XGMAC_MTL_FIFO_SIZE_8K
;
1555 else if (q_fifo_size
>= XGBE_FIFO_SIZE_KB(4))
1556 p_fifo
= XGMAC_MTL_FIFO_SIZE_4K
;
1557 else if (q_fifo_size
>= XGBE_FIFO_SIZE_KB(2))
1558 p_fifo
= XGMAC_MTL_FIFO_SIZE_2K
;
1559 else if (q_fifo_size
>= XGBE_FIFO_SIZE_KB(1))
1560 p_fifo
= XGMAC_MTL_FIFO_SIZE_1K
;
1561 else if (q_fifo_size
>= XGBE_FIFO_SIZE_B(512))
1562 p_fifo
= XGMAC_MTL_FIFO_SIZE_512
;
1563 else if (q_fifo_size
>= XGBE_FIFO_SIZE_B(256))
1564 p_fifo
= XGMAC_MTL_FIFO_SIZE_256
;
1569 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data
*pdata
)
1571 enum xgbe_mtl_fifo_size fifo_size
;
1574 fifo_size
= xgbe_calculate_per_queue_fifo(pdata
->hw_feat
.tx_fifo_size
,
1575 pdata
->hw_feat
.tx_q_cnt
);
1577 for (i
= 0; i
< pdata
->hw_feat
.tx_q_cnt
; i
++)
1578 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, TQS
, fifo_size
);
1580 netdev_notice(pdata
->netdev
, "%d Tx queues, %d byte fifo per queue\n",
1581 pdata
->hw_feat
.tx_q_cnt
, ((fifo_size
+ 1) * 256));
1584 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data
*pdata
)
1586 enum xgbe_mtl_fifo_size fifo_size
;
1589 fifo_size
= xgbe_calculate_per_queue_fifo(pdata
->hw_feat
.rx_fifo_size
,
1590 pdata
->hw_feat
.rx_q_cnt
);
1592 for (i
= 0; i
< pdata
->hw_feat
.rx_q_cnt
; i
++)
1593 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, RQS
, fifo_size
);
1595 netdev_notice(pdata
->netdev
, "%d Rx queues, %d byte fifo per queue\n",
1596 pdata
->hw_feat
.rx_q_cnt
, ((fifo_size
+ 1) * 256));
1599 static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data
*pdata
)
1601 unsigned int i
, reg
, reg_val
;
1602 unsigned int q_count
= pdata
->hw_feat
.rx_q_cnt
;
1604 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
1607 for (i
= 0; i
< q_count
;) {
1608 reg_val
|= (0x80 << ((i
++ % MTL_RQDCM_Q_PER_REG
) << 3));
1610 if ((i
% MTL_RQDCM_Q_PER_REG
) && (i
!= q_count
))
1613 XGMAC_IOWRITE(pdata
, reg
, reg_val
);
1615 reg
+= MTL_RQDCM_INC
;
1620 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data
*pdata
)
1624 for (i
= 0; i
< pdata
->hw_feat
.rx_q_cnt
; i
++) {
1625 /* Activate flow control when less than 4k left in fifo */
1626 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, RFA
, 2);
1628 /* De-activate flow control when more than 6k left in fifo */
1629 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, RFD
, 4);
1633 static void xgbe_config_mac_address(struct xgbe_prv_data
*pdata
)
1635 xgbe_set_mac_address(pdata
, pdata
->netdev
->dev_addr
);
1637 /* Filtering is done using perfect filtering and hash filtering */
1638 if (pdata
->hw_feat
.hash_table_size
) {
1639 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, HPF
, 1);
1640 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, HUC
, 1);
1641 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, HMC
, 1);
1645 static void xgbe_config_jumbo_enable(struct xgbe_prv_data
*pdata
)
1649 val
= (pdata
->netdev
->mtu
> XGMAC_STD_PACKET_MTU
) ? 1 : 0;
1651 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, JE
, val
);
1654 static void xgbe_config_checksum_offload(struct xgbe_prv_data
*pdata
)
1656 if (pdata
->netdev
->features
& NETIF_F_RXCSUM
)
1657 xgbe_enable_rx_csum(pdata
);
1659 xgbe_disable_rx_csum(pdata
);
1662 static void xgbe_config_vlan_support(struct xgbe_prv_data
*pdata
)
1664 /* Indicate that VLAN Tx CTAGs come from context descriptors */
1665 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANIR
, CSVL
, 0);
1666 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANIR
, VLTI
, 1);
1668 /* Set the current VLAN Hash Table register value */
1669 xgbe_update_vlan_hash_table(pdata
);
1671 if (pdata
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1672 xgbe_enable_rx_vlan_filtering(pdata
);
1674 xgbe_disable_rx_vlan_filtering(pdata
);
1676 if (pdata
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
1677 xgbe_enable_rx_vlan_stripping(pdata
);
1679 xgbe_disable_rx_vlan_stripping(pdata
);
1682 static void xgbe_tx_mmc_int(struct xgbe_prv_data
*pdata
)
1684 struct xgbe_mmc_stats
*stats
= &pdata
->mmc_stats
;
1685 unsigned int mmc_isr
= XGMAC_IOREAD(pdata
, MMC_TISR
);
1687 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXOCTETCOUNT_GB
))
1688 stats
->txoctetcount_gb
+=
1689 XGMAC_IOREAD(pdata
, MMC_TXOCTETCOUNT_GB_LO
);
1691 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXFRAMECOUNT_GB
))
1692 stats
->txframecount_gb
+=
1693 XGMAC_IOREAD(pdata
, MMC_TXFRAMECOUNT_GB_LO
);
1695 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXBROADCASTFRAMES_G
))
1696 stats
->txbroadcastframes_g
+=
1697 XGMAC_IOREAD(pdata
, MMC_TXBROADCASTFRAMES_G_LO
);
1699 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXMULTICASTFRAMES_G
))
1700 stats
->txmulticastframes_g
+=
1701 XGMAC_IOREAD(pdata
, MMC_TXMULTICASTFRAMES_G_LO
);
1703 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX64OCTETS_GB
))
1704 stats
->tx64octets_gb
+=
1705 XGMAC_IOREAD(pdata
, MMC_TX64OCTETS_GB_LO
);
1707 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX65TO127OCTETS_GB
))
1708 stats
->tx65to127octets_gb
+=
1709 XGMAC_IOREAD(pdata
, MMC_TX65TO127OCTETS_GB_LO
);
1711 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX128TO255OCTETS_GB
))
1712 stats
->tx128to255octets_gb
+=
1713 XGMAC_IOREAD(pdata
, MMC_TX128TO255OCTETS_GB_LO
);
1715 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX256TO511OCTETS_GB
))
1716 stats
->tx256to511octets_gb
+=
1717 XGMAC_IOREAD(pdata
, MMC_TX256TO511OCTETS_GB_LO
);
1719 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX512TO1023OCTETS_GB
))
1720 stats
->tx512to1023octets_gb
+=
1721 XGMAC_IOREAD(pdata
, MMC_TX512TO1023OCTETS_GB_LO
);
1723 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX1024TOMAXOCTETS_GB
))
1724 stats
->tx1024tomaxoctets_gb
+=
1725 XGMAC_IOREAD(pdata
, MMC_TX1024TOMAXOCTETS_GB_LO
);
1727 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXUNICASTFRAMES_GB
))
1728 stats
->txunicastframes_gb
+=
1729 XGMAC_IOREAD(pdata
, MMC_TXUNICASTFRAMES_GB_LO
);
1731 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXMULTICASTFRAMES_GB
))
1732 stats
->txmulticastframes_gb
+=
1733 XGMAC_IOREAD(pdata
, MMC_TXMULTICASTFRAMES_GB_LO
);
1735 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXBROADCASTFRAMES_GB
))
1736 stats
->txbroadcastframes_g
+=
1737 XGMAC_IOREAD(pdata
, MMC_TXBROADCASTFRAMES_GB_LO
);
1739 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXUNDERFLOWERROR
))
1740 stats
->txunderflowerror
+=
1741 XGMAC_IOREAD(pdata
, MMC_TXUNDERFLOWERROR_LO
);
1743 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXOCTETCOUNT_G
))
1744 stats
->txoctetcount_g
+=
1745 XGMAC_IOREAD(pdata
, MMC_TXOCTETCOUNT_G_LO
);
1747 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXFRAMECOUNT_G
))
1748 stats
->txframecount_g
+=
1749 XGMAC_IOREAD(pdata
, MMC_TXFRAMECOUNT_G_LO
);
1751 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXPAUSEFRAMES
))
1752 stats
->txpauseframes
+=
1753 XGMAC_IOREAD(pdata
, MMC_TXPAUSEFRAMES_LO
);
1755 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXVLANFRAMES_G
))
1756 stats
->txvlanframes_g
+=
1757 XGMAC_IOREAD(pdata
, MMC_TXVLANFRAMES_G_LO
);
1760 static void xgbe_rx_mmc_int(struct xgbe_prv_data
*pdata
)
1762 struct xgbe_mmc_stats
*stats
= &pdata
->mmc_stats
;
1763 unsigned int mmc_isr
= XGMAC_IOREAD(pdata
, MMC_RISR
);
1765 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXFRAMECOUNT_GB
))
1766 stats
->rxframecount_gb
+=
1767 XGMAC_IOREAD(pdata
, MMC_RXFRAMECOUNT_GB_LO
);
1769 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXOCTETCOUNT_GB
))
1770 stats
->rxoctetcount_gb
+=
1771 XGMAC_IOREAD(pdata
, MMC_RXOCTETCOUNT_GB_LO
);
1773 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXOCTETCOUNT_G
))
1774 stats
->rxoctetcount_g
+=
1775 XGMAC_IOREAD(pdata
, MMC_RXOCTETCOUNT_G_LO
);
1777 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXBROADCASTFRAMES_G
))
1778 stats
->rxbroadcastframes_g
+=
1779 XGMAC_IOREAD(pdata
, MMC_RXBROADCASTFRAMES_G_LO
);
1781 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXMULTICASTFRAMES_G
))
1782 stats
->rxmulticastframes_g
+=
1783 XGMAC_IOREAD(pdata
, MMC_RXMULTICASTFRAMES_G_LO
);
1785 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXCRCERROR
))
1786 stats
->rxcrcerror
+=
1787 XGMAC_IOREAD(pdata
, MMC_RXCRCERROR_LO
);
1789 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXRUNTERROR
))
1790 stats
->rxrunterror
+=
1791 XGMAC_IOREAD(pdata
, MMC_RXRUNTERROR
);
1793 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXJABBERERROR
))
1794 stats
->rxjabbererror
+=
1795 XGMAC_IOREAD(pdata
, MMC_RXJABBERERROR
);
1797 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXUNDERSIZE_G
))
1798 stats
->rxundersize_g
+=
1799 XGMAC_IOREAD(pdata
, MMC_RXUNDERSIZE_G
);
1801 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXOVERSIZE_G
))
1802 stats
->rxoversize_g
+=
1803 XGMAC_IOREAD(pdata
, MMC_RXOVERSIZE_G
);
1805 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX64OCTETS_GB
))
1806 stats
->rx64octets_gb
+=
1807 XGMAC_IOREAD(pdata
, MMC_RX64OCTETS_GB_LO
);
1809 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX65TO127OCTETS_GB
))
1810 stats
->rx65to127octets_gb
+=
1811 XGMAC_IOREAD(pdata
, MMC_RX65TO127OCTETS_GB_LO
);
1813 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX128TO255OCTETS_GB
))
1814 stats
->rx128to255octets_gb
+=
1815 XGMAC_IOREAD(pdata
, MMC_RX128TO255OCTETS_GB_LO
);
1817 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX256TO511OCTETS_GB
))
1818 stats
->rx256to511octets_gb
+=
1819 XGMAC_IOREAD(pdata
, MMC_RX256TO511OCTETS_GB_LO
);
1821 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX512TO1023OCTETS_GB
))
1822 stats
->rx512to1023octets_gb
+=
1823 XGMAC_IOREAD(pdata
, MMC_RX512TO1023OCTETS_GB_LO
);
1825 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX1024TOMAXOCTETS_GB
))
1826 stats
->rx1024tomaxoctets_gb
+=
1827 XGMAC_IOREAD(pdata
, MMC_RX1024TOMAXOCTETS_GB_LO
);
1829 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXUNICASTFRAMES_G
))
1830 stats
->rxunicastframes_g
+=
1831 XGMAC_IOREAD(pdata
, MMC_RXUNICASTFRAMES_G_LO
);
1833 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXLENGTHERROR
))
1834 stats
->rxlengtherror
+=
1835 XGMAC_IOREAD(pdata
, MMC_RXLENGTHERROR_LO
);
1837 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXOUTOFRANGETYPE
))
1838 stats
->rxoutofrangetype
+=
1839 XGMAC_IOREAD(pdata
, MMC_RXOUTOFRANGETYPE_LO
);
1841 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXPAUSEFRAMES
))
1842 stats
->rxpauseframes
+=
1843 XGMAC_IOREAD(pdata
, MMC_RXPAUSEFRAMES_LO
);
1845 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXFIFOOVERFLOW
))
1846 stats
->rxfifooverflow
+=
1847 XGMAC_IOREAD(pdata
, MMC_RXFIFOOVERFLOW_LO
);
1849 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXVLANFRAMES_GB
))
1850 stats
->rxvlanframes_gb
+=
1851 XGMAC_IOREAD(pdata
, MMC_RXVLANFRAMES_GB_LO
);
1853 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXWATCHDOGERROR
))
1854 stats
->rxwatchdogerror
+=
1855 XGMAC_IOREAD(pdata
, MMC_RXWATCHDOGERROR
);
1858 static void xgbe_read_mmc_stats(struct xgbe_prv_data
*pdata
)
1860 struct xgbe_mmc_stats
*stats
= &pdata
->mmc_stats
;
1862 /* Freeze counters */
1863 XGMAC_IOWRITE_BITS(pdata
, MMC_CR
, MCF
, 1);
1865 stats
->txoctetcount_gb
+=
1866 XGMAC_IOREAD(pdata
, MMC_TXOCTETCOUNT_GB_LO
);
1868 stats
->txframecount_gb
+=
1869 XGMAC_IOREAD(pdata
, MMC_TXFRAMECOUNT_GB_LO
);
1871 stats
->txbroadcastframes_g
+=
1872 XGMAC_IOREAD(pdata
, MMC_TXBROADCASTFRAMES_G_LO
);
1874 stats
->txmulticastframes_g
+=
1875 XGMAC_IOREAD(pdata
, MMC_TXMULTICASTFRAMES_G_LO
);
1877 stats
->tx64octets_gb
+=
1878 XGMAC_IOREAD(pdata
, MMC_TX64OCTETS_GB_LO
);
1880 stats
->tx65to127octets_gb
+=
1881 XGMAC_IOREAD(pdata
, MMC_TX65TO127OCTETS_GB_LO
);
1883 stats
->tx128to255octets_gb
+=
1884 XGMAC_IOREAD(pdata
, MMC_TX128TO255OCTETS_GB_LO
);
1886 stats
->tx256to511octets_gb
+=
1887 XGMAC_IOREAD(pdata
, MMC_TX256TO511OCTETS_GB_LO
);
1889 stats
->tx512to1023octets_gb
+=
1890 XGMAC_IOREAD(pdata
, MMC_TX512TO1023OCTETS_GB_LO
);
1892 stats
->tx1024tomaxoctets_gb
+=
1893 XGMAC_IOREAD(pdata
, MMC_TX1024TOMAXOCTETS_GB_LO
);
1895 stats
->txunicastframes_gb
+=
1896 XGMAC_IOREAD(pdata
, MMC_TXUNICASTFRAMES_GB_LO
);
1898 stats
->txmulticastframes_gb
+=
1899 XGMAC_IOREAD(pdata
, MMC_TXMULTICASTFRAMES_GB_LO
);
1901 stats
->txbroadcastframes_g
+=
1902 XGMAC_IOREAD(pdata
, MMC_TXBROADCASTFRAMES_GB_LO
);
1904 stats
->txunderflowerror
+=
1905 XGMAC_IOREAD(pdata
, MMC_TXUNDERFLOWERROR_LO
);
1907 stats
->txoctetcount_g
+=
1908 XGMAC_IOREAD(pdata
, MMC_TXOCTETCOUNT_G_LO
);
1910 stats
->txframecount_g
+=
1911 XGMAC_IOREAD(pdata
, MMC_TXFRAMECOUNT_G_LO
);
1913 stats
->txpauseframes
+=
1914 XGMAC_IOREAD(pdata
, MMC_TXPAUSEFRAMES_LO
);
1916 stats
->txvlanframes_g
+=
1917 XGMAC_IOREAD(pdata
, MMC_TXVLANFRAMES_G_LO
);
1919 stats
->rxframecount_gb
+=
1920 XGMAC_IOREAD(pdata
, MMC_RXFRAMECOUNT_GB_LO
);
1922 stats
->rxoctetcount_gb
+=
1923 XGMAC_IOREAD(pdata
, MMC_RXOCTETCOUNT_GB_LO
);
1925 stats
->rxoctetcount_g
+=
1926 XGMAC_IOREAD(pdata
, MMC_RXOCTETCOUNT_G_LO
);
1928 stats
->rxbroadcastframes_g
+=
1929 XGMAC_IOREAD(pdata
, MMC_RXBROADCASTFRAMES_G_LO
);
1931 stats
->rxmulticastframes_g
+=
1932 XGMAC_IOREAD(pdata
, MMC_RXMULTICASTFRAMES_G_LO
);
1934 stats
->rxcrcerror
+=
1935 XGMAC_IOREAD(pdata
, MMC_RXCRCERROR_LO
);
1937 stats
->rxrunterror
+=
1938 XGMAC_IOREAD(pdata
, MMC_RXRUNTERROR
);
1940 stats
->rxjabbererror
+=
1941 XGMAC_IOREAD(pdata
, MMC_RXJABBERERROR
);
1943 stats
->rxundersize_g
+=
1944 XGMAC_IOREAD(pdata
, MMC_RXUNDERSIZE_G
);
1946 stats
->rxoversize_g
+=
1947 XGMAC_IOREAD(pdata
, MMC_RXOVERSIZE_G
);
1949 stats
->rx64octets_gb
+=
1950 XGMAC_IOREAD(pdata
, MMC_RX64OCTETS_GB_LO
);
1952 stats
->rx65to127octets_gb
+=
1953 XGMAC_IOREAD(pdata
, MMC_RX65TO127OCTETS_GB_LO
);
1955 stats
->rx128to255octets_gb
+=
1956 XGMAC_IOREAD(pdata
, MMC_RX128TO255OCTETS_GB_LO
);
1958 stats
->rx256to511octets_gb
+=
1959 XGMAC_IOREAD(pdata
, MMC_RX256TO511OCTETS_GB_LO
);
1961 stats
->rx512to1023octets_gb
+=
1962 XGMAC_IOREAD(pdata
, MMC_RX512TO1023OCTETS_GB_LO
);
1964 stats
->rx1024tomaxoctets_gb
+=
1965 XGMAC_IOREAD(pdata
, MMC_RX1024TOMAXOCTETS_GB_LO
);
1967 stats
->rxunicastframes_g
+=
1968 XGMAC_IOREAD(pdata
, MMC_RXUNICASTFRAMES_G_LO
);
1970 stats
->rxlengtherror
+=
1971 XGMAC_IOREAD(pdata
, MMC_RXLENGTHERROR_LO
);
1973 stats
->rxoutofrangetype
+=
1974 XGMAC_IOREAD(pdata
, MMC_RXOUTOFRANGETYPE_LO
);
1976 stats
->rxpauseframes
+=
1977 XGMAC_IOREAD(pdata
, MMC_RXPAUSEFRAMES_LO
);
1979 stats
->rxfifooverflow
+=
1980 XGMAC_IOREAD(pdata
, MMC_RXFIFOOVERFLOW_LO
);
1982 stats
->rxvlanframes_gb
+=
1983 XGMAC_IOREAD(pdata
, MMC_RXVLANFRAMES_GB_LO
);
1985 stats
->rxwatchdogerror
+=
1986 XGMAC_IOREAD(pdata
, MMC_RXWATCHDOGERROR
);
1988 /* Un-freeze counters */
1989 XGMAC_IOWRITE_BITS(pdata
, MMC_CR
, MCF
, 0);
1992 static void xgbe_config_mmc(struct xgbe_prv_data
*pdata
)
1994 /* Set counters to reset on read */
1995 XGMAC_IOWRITE_BITS(pdata
, MMC_CR
, ROR
, 1);
1997 /* Reset the counters */
1998 XGMAC_IOWRITE_BITS(pdata
, MMC_CR
, CR
, 1);
2001 static void xgbe_enable_tx(struct xgbe_prv_data
*pdata
)
2003 struct xgbe_channel
*channel
;
2006 /* Enable each Tx DMA channel */
2007 channel
= pdata
->channel
;
2008 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2009 if (!channel
->tx_ring
)
2012 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, ST
, 1);
2015 /* Enable each Tx queue */
2016 for (i
= 0; i
< pdata
->hw_feat
.tx_q_cnt
; i
++)
2017 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, TXQEN
,
2021 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, TE
, 1);
2024 static void xgbe_disable_tx(struct xgbe_prv_data
*pdata
)
2026 struct xgbe_channel
*channel
;
2029 /* Disable MAC Tx */
2030 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, TE
, 0);
2032 /* Disable each Tx queue */
2033 for (i
= 0; i
< pdata
->hw_feat
.tx_q_cnt
; i
++)
2034 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, TXQEN
, 0);
2036 /* Disable each Tx DMA channel */
2037 channel
= pdata
->channel
;
2038 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2039 if (!channel
->tx_ring
)
2042 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, ST
, 0);
2046 static void xgbe_enable_rx(struct xgbe_prv_data
*pdata
)
2048 struct xgbe_channel
*channel
;
2049 unsigned int reg_val
, i
;
2051 /* Enable each Rx DMA channel */
2052 channel
= pdata
->channel
;
2053 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2054 if (!channel
->rx_ring
)
2057 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, SR
, 1);
2060 /* Enable each Rx queue */
2062 for (i
= 0; i
< pdata
->hw_feat
.rx_q_cnt
; i
++)
2063 reg_val
|= (0x02 << (i
<< 1));
2064 XGMAC_IOWRITE(pdata
, MAC_RQC0R
, reg_val
);
2067 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, DCRCC
, 1);
2068 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, CST
, 1);
2069 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, ACS
, 1);
2070 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, RE
, 1);
2073 static void xgbe_disable_rx(struct xgbe_prv_data
*pdata
)
2075 struct xgbe_channel
*channel
;
2078 /* Disable MAC Rx */
2079 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, DCRCC
, 0);
2080 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, CST
, 0);
2081 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, ACS
, 0);
2082 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, RE
, 0);
2084 /* Disable each Rx queue */
2085 XGMAC_IOWRITE(pdata
, MAC_RQC0R
, 0);
2087 /* Disable each Rx DMA channel */
2088 channel
= pdata
->channel
;
2089 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2090 if (!channel
->rx_ring
)
2093 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, SR
, 0);
2097 static void xgbe_powerup_tx(struct xgbe_prv_data
*pdata
)
2099 struct xgbe_channel
*channel
;
2102 /* Enable each Tx DMA channel */
2103 channel
= pdata
->channel
;
2104 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2105 if (!channel
->tx_ring
)
2108 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, ST
, 1);
2112 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, TE
, 1);
2115 static void xgbe_powerdown_tx(struct xgbe_prv_data
*pdata
)
2117 struct xgbe_channel
*channel
;
2120 /* Disable MAC Tx */
2121 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, TE
, 0);
2123 /* Disable each Tx DMA channel */
2124 channel
= pdata
->channel
;
2125 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2126 if (!channel
->tx_ring
)
2129 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, ST
, 0);
2133 static void xgbe_powerup_rx(struct xgbe_prv_data
*pdata
)
2135 struct xgbe_channel
*channel
;
2138 /* Enable each Rx DMA channel */
2139 channel
= pdata
->channel
;
2140 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2141 if (!channel
->rx_ring
)
2144 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, SR
, 1);
2148 static void xgbe_powerdown_rx(struct xgbe_prv_data
*pdata
)
2150 struct xgbe_channel
*channel
;
2153 /* Disable each Rx DMA channel */
2154 channel
= pdata
->channel
;
2155 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2156 if (!channel
->rx_ring
)
2159 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, SR
, 0);
2163 static int xgbe_init(struct xgbe_prv_data
*pdata
)
2165 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
2168 DBGPR("-->xgbe_init\n");
2170 /* Flush Tx queues */
2171 ret
= xgbe_flush_tx_queues(pdata
);
2176 * Initialize DMA related features
2178 xgbe_config_dma_bus(pdata
);
2179 xgbe_config_dma_cache(pdata
);
2180 xgbe_config_osp_mode(pdata
);
2181 xgbe_config_pblx8(pdata
);
2182 xgbe_config_tx_pbl_val(pdata
);
2183 xgbe_config_rx_pbl_val(pdata
);
2184 xgbe_config_rx_coalesce(pdata
);
2185 xgbe_config_tx_coalesce(pdata
);
2186 xgbe_config_rx_buffer_size(pdata
);
2187 xgbe_config_tso_mode(pdata
);
2188 desc_if
->wrapper_tx_desc_init(pdata
);
2189 desc_if
->wrapper_rx_desc_init(pdata
);
2190 xgbe_enable_dma_interrupts(pdata
);
2193 * Initialize MTL related features
2195 xgbe_config_mtl_mode(pdata
);
2196 xgbe_config_rx_queue_mapping(pdata
);
2197 /*TODO: Program the priorities mapped to the Selected Traffic Classes
2198 in MTL_TC_Prty_Map0-3 registers */
2199 xgbe_config_tsf_mode(pdata
, pdata
->tx_sf_mode
);
2200 xgbe_config_rsf_mode(pdata
, pdata
->rx_sf_mode
);
2201 xgbe_config_tx_threshold(pdata
, pdata
->tx_threshold
);
2202 xgbe_config_rx_threshold(pdata
, pdata
->rx_threshold
);
2203 xgbe_config_tx_fifo_size(pdata
);
2204 xgbe_config_rx_fifo_size(pdata
);
2205 xgbe_config_flow_control_threshold(pdata
);
2206 /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
2207 /*TODO: Error Packet and undersized good Packet forwarding enable
2210 xgbe_enable_mtl_interrupts(pdata
);
2212 /* Transmit Class Weight */
2213 XGMAC_IOWRITE_BITS(pdata
, MTL_Q_TCQWR
, QW
, 0x10);
2216 * Initialize MAC related features
2218 xgbe_config_mac_address(pdata
);
2219 xgbe_config_jumbo_enable(pdata
);
2220 xgbe_config_flow_control(pdata
);
2221 xgbe_config_checksum_offload(pdata
);
2222 xgbe_config_vlan_support(pdata
);
2223 xgbe_config_mmc(pdata
);
2224 xgbe_enable_mac_interrupts(pdata
);
2226 DBGPR("<--xgbe_init\n");
2231 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if
*hw_if
)
2233 DBGPR("-->xgbe_init_function_ptrs\n");
2235 hw_if
->tx_complete
= xgbe_tx_complete
;
2237 hw_if
->set_promiscuous_mode
= xgbe_set_promiscuous_mode
;
2238 hw_if
->set_all_multicast_mode
= xgbe_set_all_multicast_mode
;
2239 hw_if
->add_mac_addresses
= xgbe_add_mac_addresses
;
2240 hw_if
->set_mac_address
= xgbe_set_mac_address
;
2242 hw_if
->enable_rx_csum
= xgbe_enable_rx_csum
;
2243 hw_if
->disable_rx_csum
= xgbe_disable_rx_csum
;
2245 hw_if
->enable_rx_vlan_stripping
= xgbe_enable_rx_vlan_stripping
;
2246 hw_if
->disable_rx_vlan_stripping
= xgbe_disable_rx_vlan_stripping
;
2247 hw_if
->enable_rx_vlan_filtering
= xgbe_enable_rx_vlan_filtering
;
2248 hw_if
->disable_rx_vlan_filtering
= xgbe_disable_rx_vlan_filtering
;
2249 hw_if
->update_vlan_hash_table
= xgbe_update_vlan_hash_table
;
2251 hw_if
->read_mmd_regs
= xgbe_read_mmd_regs
;
2252 hw_if
->write_mmd_regs
= xgbe_write_mmd_regs
;
2254 hw_if
->set_gmii_speed
= xgbe_set_gmii_speed
;
2255 hw_if
->set_gmii_2500_speed
= xgbe_set_gmii_2500_speed
;
2256 hw_if
->set_xgmii_speed
= xgbe_set_xgmii_speed
;
2258 hw_if
->enable_tx
= xgbe_enable_tx
;
2259 hw_if
->disable_tx
= xgbe_disable_tx
;
2260 hw_if
->enable_rx
= xgbe_enable_rx
;
2261 hw_if
->disable_rx
= xgbe_disable_rx
;
2263 hw_if
->powerup_tx
= xgbe_powerup_tx
;
2264 hw_if
->powerdown_tx
= xgbe_powerdown_tx
;
2265 hw_if
->powerup_rx
= xgbe_powerup_rx
;
2266 hw_if
->powerdown_rx
= xgbe_powerdown_rx
;
2268 hw_if
->pre_xmit
= xgbe_pre_xmit
;
2269 hw_if
->dev_read
= xgbe_dev_read
;
2270 hw_if
->enable_int
= xgbe_enable_int
;
2271 hw_if
->disable_int
= xgbe_disable_int
;
2272 hw_if
->init
= xgbe_init
;
2273 hw_if
->exit
= xgbe_exit
;
2275 /* Descriptor related Sequences have to be initialized here */
2276 hw_if
->tx_desc_init
= xgbe_tx_desc_init
;
2277 hw_if
->rx_desc_init
= xgbe_rx_desc_init
;
2278 hw_if
->tx_desc_reset
= xgbe_tx_desc_reset
;
2279 hw_if
->rx_desc_reset
= xgbe_rx_desc_reset
;
2280 hw_if
->is_last_desc
= xgbe_is_last_desc
;
2281 hw_if
->is_context_desc
= xgbe_is_context_desc
;
2284 hw_if
->config_tx_flow_control
= xgbe_config_tx_flow_control
;
2285 hw_if
->config_rx_flow_control
= xgbe_config_rx_flow_control
;
2287 /* For RX coalescing */
2288 hw_if
->config_rx_coalesce
= xgbe_config_rx_coalesce
;
2289 hw_if
->config_tx_coalesce
= xgbe_config_tx_coalesce
;
2290 hw_if
->usec_to_riwt
= xgbe_usec_to_riwt
;
2291 hw_if
->riwt_to_usec
= xgbe_riwt_to_usec
;
2293 /* For RX and TX threshold config */
2294 hw_if
->config_rx_threshold
= xgbe_config_rx_threshold
;
2295 hw_if
->config_tx_threshold
= xgbe_config_tx_threshold
;
2297 /* For RX and TX Store and Forward Mode config */
2298 hw_if
->config_rsf_mode
= xgbe_config_rsf_mode
;
2299 hw_if
->config_tsf_mode
= xgbe_config_tsf_mode
;
2301 /* For TX DMA Operating on Second Frame config */
2302 hw_if
->config_osp_mode
= xgbe_config_osp_mode
;
2304 /* For RX and TX PBL config */
2305 hw_if
->config_rx_pbl_val
= xgbe_config_rx_pbl_val
;
2306 hw_if
->get_rx_pbl_val
= xgbe_get_rx_pbl_val
;
2307 hw_if
->config_tx_pbl_val
= xgbe_config_tx_pbl_val
;
2308 hw_if
->get_tx_pbl_val
= xgbe_get_tx_pbl_val
;
2309 hw_if
->config_pblx8
= xgbe_config_pblx8
;
2311 /* For MMC statistics support */
2312 hw_if
->tx_mmc_int
= xgbe_tx_mmc_int
;
2313 hw_if
->rx_mmc_int
= xgbe_rx_mmc_int
;
2314 hw_if
->read_mmc_stats
= xgbe_read_mmc_stats
;
2316 DBGPR("<--xgbe_init_function_ptrs\n");