amd-xgbe: Add dma-coherent to device bindings documentation
[deliverable/linux.git] / drivers / net / ethernet / amd / xgbe / xgbe-dev.c
1 /*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117 #include <linux/phy.h>
118 #include <linux/clk.h>
119 #include <linux/bitrev.h>
120 #include <linux/crc32.h>
121
122 #include "xgbe.h"
123 #include "xgbe-common.h"
124
125
126 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
127 unsigned int usec)
128 {
129 unsigned long rate;
130 unsigned int ret;
131
132 DBGPR("-->xgbe_usec_to_riwt\n");
133
134 rate = clk_get_rate(pdata->sysclock);
135
136 /*
137 * Convert the input usec value to the watchdog timer value. Each
138 * watchdog timer value is equivalent to 256 clock cycles.
139 * Calculate the required value as:
140 * ( usec * ( system_clock_mhz / 10^6 ) / 256
141 */
142 ret = (usec * (rate / 1000000)) / 256;
143
144 DBGPR("<--xgbe_usec_to_riwt\n");
145
146 return ret;
147 }
148
149 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
150 unsigned int riwt)
151 {
152 unsigned long rate;
153 unsigned int ret;
154
155 DBGPR("-->xgbe_riwt_to_usec\n");
156
157 rate = clk_get_rate(pdata->sysclock);
158
159 /*
160 * Convert the input watchdog timer value to the usec value. Each
161 * watchdog timer value is equivalent to 256 clock cycles.
162 * Calculate the required value as:
163 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
164 */
165 ret = (riwt * 256) / (rate / 1000000);
166
167 DBGPR("<--xgbe_riwt_to_usec\n");
168
169 return ret;
170 }
171
172 static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
173 {
174 struct xgbe_channel *channel;
175 unsigned int i;
176
177 channel = pdata->channel;
178 for (i = 0; i < pdata->channel_count; i++, channel++)
179 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
180 pdata->pblx8);
181
182 return 0;
183 }
184
185 static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
186 {
187 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
188 }
189
190 static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
191 {
192 struct xgbe_channel *channel;
193 unsigned int i;
194
195 channel = pdata->channel;
196 for (i = 0; i < pdata->channel_count; i++, channel++) {
197 if (!channel->tx_ring)
198 break;
199
200 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
201 pdata->tx_pbl);
202 }
203
204 return 0;
205 }
206
207 static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
208 {
209 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
210 }
211
212 static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
213 {
214 struct xgbe_channel *channel;
215 unsigned int i;
216
217 channel = pdata->channel;
218 for (i = 0; i < pdata->channel_count; i++, channel++) {
219 if (!channel->rx_ring)
220 break;
221
222 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
223 pdata->rx_pbl);
224 }
225
226 return 0;
227 }
228
229 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
230 {
231 struct xgbe_channel *channel;
232 unsigned int i;
233
234 channel = pdata->channel;
235 for (i = 0; i < pdata->channel_count; i++, channel++) {
236 if (!channel->tx_ring)
237 break;
238
239 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
240 pdata->tx_osp_mode);
241 }
242
243 return 0;
244 }
245
246 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
247 {
248 unsigned int i;
249
250 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
251 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
252
253 return 0;
254 }
255
256 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
257 {
258 unsigned int i;
259
260 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
261 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
262
263 return 0;
264 }
265
266 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
267 unsigned int val)
268 {
269 unsigned int i;
270
271 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
272 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
273
274 return 0;
275 }
276
277 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
278 unsigned int val)
279 {
280 unsigned int i;
281
282 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
283 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
284
285 return 0;
286 }
287
288 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
289 {
290 struct xgbe_channel *channel;
291 unsigned int i;
292
293 channel = pdata->channel;
294 for (i = 0; i < pdata->channel_count; i++, channel++) {
295 if (!channel->rx_ring)
296 break;
297
298 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
299 pdata->rx_riwt);
300 }
301
302 return 0;
303 }
304
305 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
306 {
307 return 0;
308 }
309
310 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
311 {
312 struct xgbe_channel *channel;
313 unsigned int i;
314
315 channel = pdata->channel;
316 for (i = 0; i < pdata->channel_count; i++, channel++) {
317 if (!channel->rx_ring)
318 break;
319
320 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
321 pdata->rx_buf_size);
322 }
323 }
324
325 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
326 {
327 struct xgbe_channel *channel;
328 unsigned int i;
329
330 channel = pdata->channel;
331 for (i = 0; i < pdata->channel_count; i++, channel++) {
332 if (!channel->tx_ring)
333 break;
334
335 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
336 }
337 }
338
339 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
340 {
341 unsigned int max_q_count, q_count;
342 unsigned int reg, reg_val;
343 unsigned int i;
344
345 /* Clear MTL flow control */
346 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
347 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
348
349 /* Clear MAC flow control */
350 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
351 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
352 reg = MAC_Q0TFCR;
353 for (i = 0; i < q_count; i++) {
354 reg_val = XGMAC_IOREAD(pdata, reg);
355 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
356 XGMAC_IOWRITE(pdata, reg, reg_val);
357
358 reg += MAC_QTFCR_INC;
359 }
360
361 return 0;
362 }
363
364 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
365 {
366 unsigned int max_q_count, q_count;
367 unsigned int reg, reg_val;
368 unsigned int i;
369
370 /* Set MTL flow control */
371 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
372 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
373
374 /* Set MAC flow control */
375 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
376 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
377 reg = MAC_Q0TFCR;
378 for (i = 0; i < q_count; i++) {
379 reg_val = XGMAC_IOREAD(pdata, reg);
380
381 /* Enable transmit flow control */
382 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
383 /* Set pause time */
384 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
385
386 XGMAC_IOWRITE(pdata, reg, reg_val);
387
388 reg += MAC_QTFCR_INC;
389 }
390
391 return 0;
392 }
393
394 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
395 {
396 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
397
398 return 0;
399 }
400
401 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
402 {
403 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
404
405 return 0;
406 }
407
408 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
409 {
410 if (pdata->tx_pause)
411 xgbe_enable_tx_flow_control(pdata);
412 else
413 xgbe_disable_tx_flow_control(pdata);
414
415 return 0;
416 }
417
418 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
419 {
420 if (pdata->rx_pause)
421 xgbe_enable_rx_flow_control(pdata);
422 else
423 xgbe_disable_rx_flow_control(pdata);
424
425 return 0;
426 }
427
428 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
429 {
430 xgbe_config_tx_flow_control(pdata);
431 xgbe_config_rx_flow_control(pdata);
432 }
433
434 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
435 {
436 struct xgbe_channel *channel;
437 unsigned int dma_ch_isr, dma_ch_ier;
438 unsigned int i;
439
440 channel = pdata->channel;
441 for (i = 0; i < pdata->channel_count; i++, channel++) {
442 /* Clear all the interrupts which are set */
443 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
444 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
445
446 /* Clear all interrupt enable bits */
447 dma_ch_ier = 0;
448
449 /* Enable following interrupts
450 * NIE - Normal Interrupt Summary Enable
451 * AIE - Abnormal Interrupt Summary Enable
452 * FBEE - Fatal Bus Error Enable
453 */
454 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
455 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
456 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
457
458 if (channel->tx_ring) {
459 /* Enable the following Tx interrupts
460 * TIE - Transmit Interrupt Enable (unless polling)
461 */
462 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
463 }
464 if (channel->rx_ring) {
465 /* Enable following Rx interrupts
466 * RBUE - Receive Buffer Unavailable Enable
467 * RIE - Receive Interrupt Enable
468 */
469 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
470 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
471 }
472
473 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
474 }
475 }
476
477 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
478 {
479 unsigned int mtl_q_isr;
480 unsigned int q_count, i;
481
482 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
483 for (i = 0; i < q_count; i++) {
484 /* Clear all the interrupts which are set */
485 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
486 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
487
488 /* No MTL interrupts to be enabled */
489 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
490 }
491 }
492
493 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
494 {
495 /* No MAC interrupts to be enabled */
496 XGMAC_IOWRITE(pdata, MAC_IER, 0);
497
498 /* Enable all counter interrupts */
499 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
500 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
501 }
502
503 static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
504 {
505 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
506
507 return 0;
508 }
509
510 static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
511 {
512 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
513
514 return 0;
515 }
516
517 static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
518 {
519 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
520
521 return 0;
522 }
523
524 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
525 unsigned int enable)
526 {
527 unsigned int val = enable ? 1 : 0;
528
529 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
530 return 0;
531
532 DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
533 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
534
535 return 0;
536 }
537
538 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
539 unsigned int enable)
540 {
541 unsigned int val = enable ? 1 : 0;
542
543 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
544 return 0;
545
546 DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
547 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
548
549 return 0;
550 }
551
552 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
553 struct netdev_hw_addr *ha, unsigned int *mac_reg)
554 {
555 unsigned int mac_addr_hi, mac_addr_lo;
556 u8 *mac_addr;
557
558 mac_addr_lo = 0;
559 mac_addr_hi = 0;
560
561 if (ha) {
562 mac_addr = (u8 *)&mac_addr_lo;
563 mac_addr[0] = ha->addr[0];
564 mac_addr[1] = ha->addr[1];
565 mac_addr[2] = ha->addr[2];
566 mac_addr[3] = ha->addr[3];
567 mac_addr = (u8 *)&mac_addr_hi;
568 mac_addr[0] = ha->addr[4];
569 mac_addr[1] = ha->addr[5];
570
571 DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
572 *mac_reg);
573
574 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
575 }
576
577 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
578 *mac_reg += MAC_MACA_INC;
579 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
580 *mac_reg += MAC_MACA_INC;
581 }
582
583 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
584 {
585 struct net_device *netdev = pdata->netdev;
586 struct netdev_hw_addr *ha;
587 unsigned int mac_reg;
588 unsigned int addn_macs;
589
590 mac_reg = MAC_MACA1HR;
591 addn_macs = pdata->hw_feat.addn_mac;
592
593 if (netdev_uc_count(netdev) > addn_macs) {
594 xgbe_set_promiscuous_mode(pdata, 1);
595 } else {
596 netdev_for_each_uc_addr(ha, netdev) {
597 xgbe_set_mac_reg(pdata, ha, &mac_reg);
598 addn_macs--;
599 }
600
601 if (netdev_mc_count(netdev) > addn_macs) {
602 xgbe_set_all_multicast_mode(pdata, 1);
603 } else {
604 netdev_for_each_mc_addr(ha, netdev) {
605 xgbe_set_mac_reg(pdata, ha, &mac_reg);
606 addn_macs--;
607 }
608 }
609 }
610
611 /* Clear remaining additional MAC address entries */
612 while (addn_macs--)
613 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
614 }
615
616 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
617 {
618 struct net_device *netdev = pdata->netdev;
619 struct netdev_hw_addr *ha;
620 unsigned int hash_reg;
621 unsigned int hash_table_shift, hash_table_count;
622 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
623 u32 crc;
624 unsigned int i;
625
626 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
627 hash_table_count = pdata->hw_feat.hash_table_size / 32;
628 memset(hash_table, 0, sizeof(hash_table));
629
630 /* Build the MAC Hash Table register values */
631 netdev_for_each_uc_addr(ha, netdev) {
632 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
633 crc >>= hash_table_shift;
634 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
635 }
636
637 netdev_for_each_mc_addr(ha, netdev) {
638 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
639 crc >>= hash_table_shift;
640 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
641 }
642
643 /* Set the MAC Hash Table registers */
644 hash_reg = MAC_HTR0;
645 for (i = 0; i < hash_table_count; i++) {
646 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
647 hash_reg += MAC_HTR_INC;
648 }
649 }
650
651 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
652 {
653 if (pdata->hw_feat.hash_table_size)
654 xgbe_set_mac_hash_table(pdata);
655 else
656 xgbe_set_mac_addn_addrs(pdata);
657
658 return 0;
659 }
660
661 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
662 {
663 unsigned int mac_addr_hi, mac_addr_lo;
664
665 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
666 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
667 (addr[1] << 8) | (addr[0] << 0);
668
669 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
670 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
671
672 return 0;
673 }
674
675 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
676 int mmd_reg)
677 {
678 unsigned int mmd_address;
679 int mmd_data;
680
681 if (mmd_reg & MII_ADDR_C45)
682 mmd_address = mmd_reg & ~MII_ADDR_C45;
683 else
684 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
685
686 /* The PCS registers are accessed using mmio. The underlying APB3
687 * management interface uses indirect addressing to access the MMD
688 * register sets. This requires accessing of the PCS register in two
689 * phases, an address phase and a data phase.
690 *
691 * The mmio interface is based on 32-bit offsets and values. All
692 * register offsets must therefore be adjusted by left shifting the
693 * offset 2 bits and reading 32 bits of data.
694 */
695 mutex_lock(&pdata->xpcs_mutex);
696 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
697 mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
698 mutex_unlock(&pdata->xpcs_mutex);
699
700 return mmd_data;
701 }
702
703 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
704 int mmd_reg, int mmd_data)
705 {
706 unsigned int mmd_address;
707
708 if (mmd_reg & MII_ADDR_C45)
709 mmd_address = mmd_reg & ~MII_ADDR_C45;
710 else
711 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
712
713 /* The PCS registers are accessed using mmio. The underlying APB3
714 * management interface uses indirect addressing to access the MMD
715 * register sets. This requires accessing of the PCS register in two
716 * phases, an address phase and a data phase.
717 *
718 * The mmio interface is based on 32-bit offsets and values. All
719 * register offsets must therefore be adjusted by left shifting the
720 * offset 2 bits and reading 32 bits of data.
721 */
722 mutex_lock(&pdata->xpcs_mutex);
723 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
724 XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
725 mutex_unlock(&pdata->xpcs_mutex);
726 }
727
728 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
729 {
730 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
731 }
732
733 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
734 {
735 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
736
737 return 0;
738 }
739
740 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
741 {
742 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
743
744 return 0;
745 }
746
747 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
748 {
749 /* Put the VLAN tag in the Rx descriptor */
750 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
751
752 /* Don't check the VLAN type */
753 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
754
755 /* Check only C-TAG (0x8100) packets */
756 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
757
758 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
759 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
760
761 /* Enable VLAN tag stripping */
762 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
763
764 return 0;
765 }
766
767 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
768 {
769 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
770
771 return 0;
772 }
773
774 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
775 {
776 /* Enable VLAN filtering */
777 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
778
779 /* Enable VLAN Hash Table filtering */
780 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
781
782 /* Disable VLAN tag inverse matching */
783 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
784
785 /* Only filter on the lower 12-bits of the VLAN tag */
786 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
787
788 /* In order for the VLAN Hash Table filtering to be effective,
789 * the VLAN tag identifier in the VLAN Tag Register must not
790 * be zero. Set the VLAN tag identifier to "1" to enable the
791 * VLAN Hash Table filtering. This implies that a VLAN tag of
792 * 1 will always pass filtering.
793 */
794 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
795
796 return 0;
797 }
798
799 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
800 {
801 /* Disable VLAN filtering */
802 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
803
804 return 0;
805 }
806
807 #ifndef CRCPOLY_LE
808 #define CRCPOLY_LE 0xedb88320
809 #endif
810 static u32 xgbe_vid_crc32_le(__le16 vid_le)
811 {
812 u32 poly = CRCPOLY_LE;
813 u32 crc = ~0;
814 u32 temp = 0;
815 unsigned char *data = (unsigned char *)&vid_le;
816 unsigned char data_byte = 0;
817 int i, bits;
818
819 bits = get_bitmask_order(VLAN_VID_MASK);
820 for (i = 0; i < bits; i++) {
821 if ((i % 8) == 0)
822 data_byte = data[i / 8];
823
824 temp = ((crc & 1) ^ data_byte) & 1;
825 crc >>= 1;
826 data_byte >>= 1;
827
828 if (temp)
829 crc ^= poly;
830 }
831
832 return crc;
833 }
834
835 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
836 {
837 u32 crc;
838 u16 vid;
839 __le16 vid_le;
840 u16 vlan_hash_table = 0;
841
842 /* Generate the VLAN Hash Table value */
843 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
844 /* Get the CRC32 value of the VLAN ID */
845 vid_le = cpu_to_le16(vid);
846 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
847
848 vlan_hash_table |= (1 << crc);
849 }
850
851 /* Set the VLAN Hash Table filtering register */
852 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
853
854 return 0;
855 }
856
857 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
858 {
859 struct xgbe_ring_desc *rdesc = rdata->rdesc;
860
861 /* Reset the Tx descriptor
862 * Set buffer 1 (lo) address to zero
863 * Set buffer 1 (hi) address to zero
864 * Reset all other control bits (IC, TTSE, B2L & B1L)
865 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
866 */
867 rdesc->desc0 = 0;
868 rdesc->desc1 = 0;
869 rdesc->desc2 = 0;
870 rdesc->desc3 = 0;
871 }
872
873 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
874 {
875 struct xgbe_ring *ring = channel->tx_ring;
876 struct xgbe_ring_data *rdata;
877 struct xgbe_ring_desc *rdesc;
878 int i;
879 int start_index = ring->cur;
880
881 DBGPR("-->tx_desc_init\n");
882
883 /* Initialze all descriptors */
884 for (i = 0; i < ring->rdesc_count; i++) {
885 rdata = XGBE_GET_DESC_DATA(ring, i);
886 rdesc = rdata->rdesc;
887
888 /* Initialize Tx descriptor
889 * Set buffer 1 (lo) address to zero
890 * Set buffer 1 (hi) address to zero
891 * Reset all other control bits (IC, TTSE, B2L & B1L)
892 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
893 * etc)
894 */
895 rdesc->desc0 = 0;
896 rdesc->desc1 = 0;
897 rdesc->desc2 = 0;
898 rdesc->desc3 = 0;
899 }
900
901 /* Make sure everything is written to the descriptor(s) before
902 * telling the device about them
903 */
904 wmb();
905
906 /* Update the total number of Tx descriptors */
907 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
908
909 /* Update the starting address of descriptor ring */
910 rdata = XGBE_GET_DESC_DATA(ring, start_index);
911 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
912 upper_32_bits(rdata->rdesc_dma));
913 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
914 lower_32_bits(rdata->rdesc_dma));
915
916 DBGPR("<--tx_desc_init\n");
917 }
918
919 static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
920 {
921 struct xgbe_ring_desc *rdesc = rdata->rdesc;
922
923 /* Reset the Rx descriptor
924 * Set buffer 1 (lo) address to dma address (lo)
925 * Set buffer 1 (hi) address to dma address (hi)
926 * Set buffer 2 (lo) address to zero
927 * Set buffer 2 (hi) address to zero and set control bits
928 * OWN and INTE
929 */
930 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
931 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
932 rdesc->desc2 = 0;
933
934 rdesc->desc3 = 0;
935 if (rdata->interrupt)
936 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
937
938 /* Since the Rx DMA engine is likely running, make sure everything
939 * is written to the descriptor(s) before setting the OWN bit
940 * for the descriptor
941 */
942 wmb();
943
944 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
945
946 /* Make sure ownership is written to the descriptor */
947 wmb();
948 }
949
950 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
951 {
952 struct xgbe_prv_data *pdata = channel->pdata;
953 struct xgbe_ring *ring = channel->rx_ring;
954 struct xgbe_ring_data *rdata;
955 struct xgbe_ring_desc *rdesc;
956 unsigned int start_index = ring->cur;
957 unsigned int rx_coalesce, rx_frames;
958 unsigned int i;
959
960 DBGPR("-->rx_desc_init\n");
961
962 rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
963 rx_frames = pdata->rx_frames;
964
965 /* Initialize all descriptors */
966 for (i = 0; i < ring->rdesc_count; i++) {
967 rdata = XGBE_GET_DESC_DATA(ring, i);
968 rdesc = rdata->rdesc;
969
970 /* Initialize Rx descriptor
971 * Set buffer 1 (lo) address to dma address (lo)
972 * Set buffer 1 (hi) address to dma address (hi)
973 * Set buffer 2 (lo) address to zero
974 * Set buffer 2 (hi) address to zero and set control
975 * bits OWN and INTE appropriateley
976 */
977 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
978 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
979 rdesc->desc2 = 0;
980 rdesc->desc3 = 0;
981 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
982 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
983 rdata->interrupt = 1;
984 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
985 /* Clear interrupt on completion bit */
986 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
987 0);
988 rdata->interrupt = 0;
989 }
990 }
991
992 /* Make sure everything is written to the descriptors before
993 * telling the device about them
994 */
995 wmb();
996
997 /* Update the total number of Rx descriptors */
998 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
999
1000 /* Update the starting address of descriptor ring */
1001 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1002 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1003 upper_32_bits(rdata->rdesc_dma));
1004 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1005 lower_32_bits(rdata->rdesc_dma));
1006
1007 /* Update the Rx Descriptor Tail Pointer */
1008 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
1009 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1010 lower_32_bits(rdata->rdesc_dma));
1011
1012 DBGPR("<--rx_desc_init\n");
1013 }
1014
1015 static void xgbe_pre_xmit(struct xgbe_channel *channel)
1016 {
1017 struct xgbe_prv_data *pdata = channel->pdata;
1018 struct xgbe_ring *ring = channel->tx_ring;
1019 struct xgbe_ring_data *rdata;
1020 struct xgbe_ring_desc *rdesc;
1021 struct xgbe_packet_data *packet = &ring->packet_data;
1022 unsigned int csum, tso, vlan;
1023 unsigned int tso_context, vlan_context;
1024 unsigned int tx_coalesce, tx_frames;
1025 int start_index = ring->cur;
1026 int i;
1027
1028 DBGPR("-->xgbe_pre_xmit\n");
1029
1030 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1031 CSUM_ENABLE);
1032 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1033 TSO_ENABLE);
1034 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1035 VLAN_CTAG);
1036
1037 if (tso && (packet->mss != ring->tx.cur_mss))
1038 tso_context = 1;
1039 else
1040 tso_context = 0;
1041
1042 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1043 vlan_context = 1;
1044 else
1045 vlan_context = 0;
1046
1047 tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0;
1048 tx_frames = pdata->tx_frames;
1049 if (tx_coalesce && !channel->tx_timer_active)
1050 ring->coalesce_count = 0;
1051
1052 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1053 rdesc = rdata->rdesc;
1054
1055 /* Create a context descriptor if this is a TSO packet */
1056 if (tso_context || vlan_context) {
1057 if (tso_context) {
1058 DBGPR(" TSO context descriptor, mss=%u\n",
1059 packet->mss);
1060
1061 /* Set the MSS size */
1062 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1063 MSS, packet->mss);
1064
1065 /* Mark it as a CONTEXT descriptor */
1066 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1067 CTXT, 1);
1068
1069 /* Indicate this descriptor contains the MSS */
1070 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1071 TCMSSV, 1);
1072
1073 ring->tx.cur_mss = packet->mss;
1074 }
1075
1076 if (vlan_context) {
1077 DBGPR(" VLAN context descriptor, ctag=%u\n",
1078 packet->vlan_ctag);
1079
1080 /* Mark it as a CONTEXT descriptor */
1081 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1082 CTXT, 1);
1083
1084 /* Set the VLAN tag */
1085 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1086 VT, packet->vlan_ctag);
1087
1088 /* Indicate this descriptor contains the VLAN tag */
1089 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1090 VLTV, 1);
1091
1092 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1093 }
1094
1095 ring->cur++;
1096 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1097 rdesc = rdata->rdesc;
1098 }
1099
1100 /* Update buffer address (for TSO this is the header) */
1101 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1102 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1103
1104 /* Update the buffer length */
1105 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1106 rdata->skb_dma_len);
1107
1108 /* VLAN tag insertion check */
1109 if (vlan)
1110 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1111 TX_NORMAL_DESC2_VLAN_INSERT);
1112
1113 /* Set IC bit based on Tx coalescing settings */
1114 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1115 if (tx_coalesce && (!tx_frames ||
1116 (++ring->coalesce_count % tx_frames)))
1117 /* Clear IC bit */
1118 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
1119
1120 /* Mark it as First Descriptor */
1121 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1122
1123 /* Mark it as a NORMAL descriptor */
1124 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1125
1126 /* Set OWN bit if not the first descriptor */
1127 if (ring->cur != start_index)
1128 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1129
1130 if (tso) {
1131 /* Enable TSO */
1132 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1133 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1134 packet->tcp_payload_len);
1135 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1136 packet->tcp_header_len / 4);
1137 } else {
1138 /* Enable CRC and Pad Insertion */
1139 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1140
1141 /* Enable HW CSUM */
1142 if (csum)
1143 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1144 CIC, 0x3);
1145
1146 /* Set the total length to be transmitted */
1147 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1148 packet->length);
1149 }
1150
1151 for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
1152 ring->cur++;
1153 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1154 rdesc = rdata->rdesc;
1155
1156 /* Update buffer address */
1157 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1158 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1159
1160 /* Update the buffer length */
1161 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1162 rdata->skb_dma_len);
1163
1164 /* Set IC bit based on Tx coalescing settings */
1165 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1166 if (tx_coalesce && (!tx_frames ||
1167 (++ring->coalesce_count % tx_frames)))
1168 /* Clear IC bit */
1169 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
1170
1171 /* Set OWN bit */
1172 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1173
1174 /* Mark it as NORMAL descriptor */
1175 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1176
1177 /* Enable HW CSUM */
1178 if (csum)
1179 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1180 CIC, 0x3);
1181 }
1182
1183 /* Set LAST bit for the last descriptor */
1184 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1185
1186 /* In case the Tx DMA engine is running, make sure everything
1187 * is written to the descriptor(s) before setting the OWN bit
1188 * for the first descriptor
1189 */
1190 wmb();
1191
1192 /* Set OWN bit for the first descriptor */
1193 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1194 rdesc = rdata->rdesc;
1195 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1196
1197 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
1198 xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
1199 #endif
1200
1201 /* Make sure ownership is written to the descriptor */
1202 wmb();
1203
1204 /* Issue a poll command to Tx DMA by writing address
1205 * of next immediate free descriptor */
1206 ring->cur++;
1207 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1208 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1209 lower_32_bits(rdata->rdesc_dma));
1210
1211 /* Start the Tx coalescing timer */
1212 if (tx_coalesce && !channel->tx_timer_active) {
1213 channel->tx_timer_active = 1;
1214 hrtimer_start(&channel->tx_timer,
1215 ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
1216 HRTIMER_MODE_REL);
1217 }
1218
1219 DBGPR(" %s: descriptors %u to %u written\n",
1220 channel->name, start_index & (ring->rdesc_count - 1),
1221 (ring->cur - 1) & (ring->rdesc_count - 1));
1222
1223 DBGPR("<--xgbe_pre_xmit\n");
1224 }
1225
1226 static int xgbe_dev_read(struct xgbe_channel *channel)
1227 {
1228 struct xgbe_ring *ring = channel->rx_ring;
1229 struct xgbe_ring_data *rdata;
1230 struct xgbe_ring_desc *rdesc;
1231 struct xgbe_packet_data *packet = &ring->packet_data;
1232 struct net_device *netdev = channel->pdata->netdev;
1233 unsigned int err, etlt;
1234
1235 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1236
1237 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1238 rdesc = rdata->rdesc;
1239
1240 /* Check for data availability */
1241 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1242 return 1;
1243
1244 #ifdef XGMAC_ENABLE_RX_DESC_DUMP
1245 xgbe_dump_rx_desc(ring, rdesc, ring->cur);
1246 #endif
1247
1248 /* Get the packet length */
1249 rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1250
1251 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1252 /* Not all the data has been transferred for this packet */
1253 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1254 INCOMPLETE, 1);
1255 return 0;
1256 }
1257
1258 /* This is the last of the data for this packet */
1259 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1260 INCOMPLETE, 0);
1261
1262 /* Set checksum done indicator as appropriate */
1263 if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
1264 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1265 CSUM_DONE, 1);
1266
1267 /* Check for errors (only valid in last descriptor) */
1268 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1269 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1270 DBGPR(" err=%u, etlt=%#x\n", err, etlt);
1271
1272 if (!err || (err && !etlt)) {
1273 if ((etlt == 0x09) &&
1274 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1275 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1276 VLAN_CTAG, 1);
1277 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1278 RX_NORMAL_DESC0,
1279 OVT);
1280 DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
1281 }
1282 } else {
1283 if ((etlt == 0x05) || (etlt == 0x06))
1284 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1285 CSUM_DONE, 0);
1286 else
1287 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1288 FRAME, 1);
1289 }
1290
1291 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1292 ring->cur & (ring->rdesc_count - 1), ring->cur);
1293
1294 return 0;
1295 }
1296
1297 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1298 {
1299 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1300 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1301 }
1302
1303 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1304 {
1305 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1306 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1307 }
1308
1309 static int xgbe_enable_int(struct xgbe_channel *channel,
1310 enum xgbe_int int_id)
1311 {
1312 unsigned int dma_ch_ier;
1313
1314 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1315
1316 switch (int_id) {
1317 case XGMAC_INT_DMA_CH_SR_TI:
1318 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
1319 break;
1320 case XGMAC_INT_DMA_CH_SR_TPS:
1321 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
1322 break;
1323 case XGMAC_INT_DMA_CH_SR_TBU:
1324 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
1325 break;
1326 case XGMAC_INT_DMA_CH_SR_RI:
1327 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
1328 break;
1329 case XGMAC_INT_DMA_CH_SR_RBU:
1330 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
1331 break;
1332 case XGMAC_INT_DMA_CH_SR_RPS:
1333 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
1334 break;
1335 case XGMAC_INT_DMA_CH_SR_TI_RI:
1336 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
1337 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
1338 break;
1339 case XGMAC_INT_DMA_CH_SR_FBE:
1340 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
1341 break;
1342 case XGMAC_INT_DMA_ALL:
1343 dma_ch_ier |= channel->saved_ier;
1344 break;
1345 default:
1346 return -1;
1347 }
1348
1349 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1350
1351 return 0;
1352 }
1353
1354 static int xgbe_disable_int(struct xgbe_channel *channel,
1355 enum xgbe_int int_id)
1356 {
1357 unsigned int dma_ch_ier;
1358
1359 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1360
1361 switch (int_id) {
1362 case XGMAC_INT_DMA_CH_SR_TI:
1363 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
1364 break;
1365 case XGMAC_INT_DMA_CH_SR_TPS:
1366 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
1367 break;
1368 case XGMAC_INT_DMA_CH_SR_TBU:
1369 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
1370 break;
1371 case XGMAC_INT_DMA_CH_SR_RI:
1372 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
1373 break;
1374 case XGMAC_INT_DMA_CH_SR_RBU:
1375 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
1376 break;
1377 case XGMAC_INT_DMA_CH_SR_RPS:
1378 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
1379 break;
1380 case XGMAC_INT_DMA_CH_SR_TI_RI:
1381 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
1382 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
1383 break;
1384 case XGMAC_INT_DMA_CH_SR_FBE:
1385 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
1386 break;
1387 case XGMAC_INT_DMA_ALL:
1388 channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
1389 dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
1390 break;
1391 default:
1392 return -1;
1393 }
1394
1395 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1396
1397 return 0;
1398 }
1399
1400 static int xgbe_exit(struct xgbe_prv_data *pdata)
1401 {
1402 unsigned int count = 2000;
1403
1404 DBGPR("-->xgbe_exit\n");
1405
1406 /* Issue a software reset */
1407 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
1408 usleep_range(10, 15);
1409
1410 /* Poll Until Poll Condition */
1411 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1412 usleep_range(500, 600);
1413
1414 if (!count)
1415 return -EBUSY;
1416
1417 DBGPR("<--xgbe_exit\n");
1418
1419 return 0;
1420 }
1421
1422 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1423 {
1424 unsigned int i, count;
1425
1426 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1427 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1428
1429 /* Poll Until Poll Condition */
1430 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) {
1431 count = 2000;
1432 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
1433 MTL_Q_TQOMR, FTQ))
1434 usleep_range(500, 600);
1435
1436 if (!count)
1437 return -EBUSY;
1438 }
1439
1440 return 0;
1441 }
1442
1443 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
1444 {
1445 /* Set enhanced addressing mode */
1446 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
1447
1448 /* Set the System Bus mode */
1449 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
1450 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
1451 }
1452
1453 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
1454 {
1455 unsigned int arcache, awcache;
1456
1457 arcache = 0;
1458 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
1459 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
1460 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
1461 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
1462 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
1463 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
1464 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
1465
1466 awcache = 0;
1467 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
1468 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
1469 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
1470 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
1471 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
1472 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
1473 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
1474 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
1475 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
1476 }
1477
1478 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1479 {
1480 unsigned int i;
1481
1482 /* Set Tx to weighted round robin scheduling algorithm (when
1483 * traffic class is using ETS algorithm)
1484 */
1485 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1486
1487 /* Set Tx traffic classes to strict priority algorithm */
1488 for (i = 0; i < XGBE_TC_CNT; i++)
1489 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP);
1490
1491 /* Set Rx to strict priority algorithm */
1492 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1493 }
1494
1495 static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
1496 unsigned char queue_count)
1497 {
1498 unsigned int q_fifo_size = 0;
1499 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1500
1501 /* Calculate Tx/Rx fifo share per queue */
1502 switch (fifo_size) {
1503 case 0:
1504 q_fifo_size = XGBE_FIFO_SIZE_B(128);
1505 break;
1506 case 1:
1507 q_fifo_size = XGBE_FIFO_SIZE_B(256);
1508 break;
1509 case 2:
1510 q_fifo_size = XGBE_FIFO_SIZE_B(512);
1511 break;
1512 case 3:
1513 q_fifo_size = XGBE_FIFO_SIZE_KB(1);
1514 break;
1515 case 4:
1516 q_fifo_size = XGBE_FIFO_SIZE_KB(2);
1517 break;
1518 case 5:
1519 q_fifo_size = XGBE_FIFO_SIZE_KB(4);
1520 break;
1521 case 6:
1522 q_fifo_size = XGBE_FIFO_SIZE_KB(8);
1523 break;
1524 case 7:
1525 q_fifo_size = XGBE_FIFO_SIZE_KB(16);
1526 break;
1527 case 8:
1528 q_fifo_size = XGBE_FIFO_SIZE_KB(32);
1529 break;
1530 case 9:
1531 q_fifo_size = XGBE_FIFO_SIZE_KB(64);
1532 break;
1533 case 10:
1534 q_fifo_size = XGBE_FIFO_SIZE_KB(128);
1535 break;
1536 case 11:
1537 q_fifo_size = XGBE_FIFO_SIZE_KB(256);
1538 break;
1539 }
1540 q_fifo_size = q_fifo_size / queue_count;
1541
1542 /* Set the queue fifo size programmable value */
1543 if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
1544 p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
1545 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
1546 p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
1547 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
1548 p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
1549 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
1550 p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
1551 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
1552 p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
1553 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
1554 p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
1555 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
1556 p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
1557 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
1558 p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
1559 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
1560 p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
1561 else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
1562 p_fifo = XGMAC_MTL_FIFO_SIZE_512;
1563 else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
1564 p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1565
1566 return p_fifo;
1567 }
1568
1569 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
1570 {
1571 enum xgbe_mtl_fifo_size fifo_size;
1572 unsigned int i;
1573
1574 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
1575 pdata->hw_feat.tx_q_cnt);
1576
1577 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1578 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
1579
1580 netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
1581 pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256));
1582 }
1583
1584 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
1585 {
1586 enum xgbe_mtl_fifo_size fifo_size;
1587 unsigned int i;
1588
1589 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
1590 pdata->hw_feat.rx_q_cnt);
1591
1592 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
1593 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
1594
1595 netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
1596 pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256));
1597 }
1598
1599 static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
1600 {
1601 unsigned int i, reg, reg_val;
1602 unsigned int q_count = pdata->hw_feat.rx_q_cnt;
1603
1604 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
1605 reg = MTL_RQDCM0R;
1606 reg_val = 0;
1607 for (i = 0; i < q_count;) {
1608 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
1609
1610 if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count))
1611 continue;
1612
1613 XGMAC_IOWRITE(pdata, reg, reg_val);
1614
1615 reg += MTL_RQDCM_INC;
1616 reg_val = 0;
1617 }
1618 }
1619
1620 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
1621 {
1622 unsigned int i;
1623
1624 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) {
1625 /* Activate flow control when less than 4k left in fifo */
1626 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
1627
1628 /* De-activate flow control when more than 6k left in fifo */
1629 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
1630 }
1631 }
1632
1633 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
1634 {
1635 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
1636
1637 /* Filtering is done using perfect filtering and hash filtering */
1638 if (pdata->hw_feat.hash_table_size) {
1639 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
1640 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
1641 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
1642 }
1643 }
1644
1645 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
1646 {
1647 unsigned int val;
1648
1649 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
1650
1651 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1652 }
1653
1654 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
1655 {
1656 if (pdata->netdev->features & NETIF_F_RXCSUM)
1657 xgbe_enable_rx_csum(pdata);
1658 else
1659 xgbe_disable_rx_csum(pdata);
1660 }
1661
1662 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
1663 {
1664 /* Indicate that VLAN Tx CTAGs come from context descriptors */
1665 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
1666 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
1667
1668 /* Set the current VLAN Hash Table register value */
1669 xgbe_update_vlan_hash_table(pdata);
1670
1671 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
1672 xgbe_enable_rx_vlan_filtering(pdata);
1673 else
1674 xgbe_disable_rx_vlan_filtering(pdata);
1675
1676 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1677 xgbe_enable_rx_vlan_stripping(pdata);
1678 else
1679 xgbe_disable_rx_vlan_stripping(pdata);
1680 }
1681
1682 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
1683 {
1684 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1685 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
1686
1687 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
1688 stats->txoctetcount_gb +=
1689 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
1690
1691 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
1692 stats->txframecount_gb +=
1693 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
1694
1695 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
1696 stats->txbroadcastframes_g +=
1697 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1698
1699 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
1700 stats->txmulticastframes_g +=
1701 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1702
1703 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
1704 stats->tx64octets_gb +=
1705 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
1706
1707 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
1708 stats->tx65to127octets_gb +=
1709 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
1710
1711 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
1712 stats->tx128to255octets_gb +=
1713 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
1714
1715 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
1716 stats->tx256to511octets_gb +=
1717 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
1718
1719 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
1720 stats->tx512to1023octets_gb +=
1721 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1722
1723 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
1724 stats->tx1024tomaxoctets_gb +=
1725 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1726
1727 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
1728 stats->txunicastframes_gb +=
1729 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1730
1731 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
1732 stats->txmulticastframes_gb +=
1733 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1734
1735 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
1736 stats->txbroadcastframes_g +=
1737 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1738
1739 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
1740 stats->txunderflowerror +=
1741 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
1742
1743 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
1744 stats->txoctetcount_g +=
1745 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
1746
1747 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
1748 stats->txframecount_g +=
1749 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
1750
1751 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
1752 stats->txpauseframes +=
1753 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
1754
1755 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
1756 stats->txvlanframes_g +=
1757 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
1758 }
1759
1760 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
1761 {
1762 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1763 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
1764
1765 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
1766 stats->rxframecount_gb +=
1767 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
1768
1769 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
1770 stats->rxoctetcount_gb +=
1771 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
1772
1773 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
1774 stats->rxoctetcount_g +=
1775 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
1776
1777 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
1778 stats->rxbroadcastframes_g +=
1779 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
1780
1781 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
1782 stats->rxmulticastframes_g +=
1783 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
1784
1785 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
1786 stats->rxcrcerror +=
1787 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
1788
1789 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
1790 stats->rxrunterror +=
1791 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
1792
1793 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
1794 stats->rxjabbererror +=
1795 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
1796
1797 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
1798 stats->rxundersize_g +=
1799 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
1800
1801 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
1802 stats->rxoversize_g +=
1803 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
1804
1805 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
1806 stats->rx64octets_gb +=
1807 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
1808
1809 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
1810 stats->rx65to127octets_gb +=
1811 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
1812
1813 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
1814 stats->rx128to255octets_gb +=
1815 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
1816
1817 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
1818 stats->rx256to511octets_gb +=
1819 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
1820
1821 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
1822 stats->rx512to1023octets_gb +=
1823 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
1824
1825 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
1826 stats->rx1024tomaxoctets_gb +=
1827 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
1828
1829 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
1830 stats->rxunicastframes_g +=
1831 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
1832
1833 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
1834 stats->rxlengtherror +=
1835 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
1836
1837 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
1838 stats->rxoutofrangetype +=
1839 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
1840
1841 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
1842 stats->rxpauseframes +=
1843 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
1844
1845 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
1846 stats->rxfifooverflow +=
1847 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
1848
1849 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
1850 stats->rxvlanframes_gb +=
1851 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
1852
1853 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
1854 stats->rxwatchdogerror +=
1855 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1856 }
1857
1858 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
1859 {
1860 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1861
1862 /* Freeze counters */
1863 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
1864
1865 stats->txoctetcount_gb +=
1866 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
1867
1868 stats->txframecount_gb +=
1869 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
1870
1871 stats->txbroadcastframes_g +=
1872 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1873
1874 stats->txmulticastframes_g +=
1875 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1876
1877 stats->tx64octets_gb +=
1878 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
1879
1880 stats->tx65to127octets_gb +=
1881 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
1882
1883 stats->tx128to255octets_gb +=
1884 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
1885
1886 stats->tx256to511octets_gb +=
1887 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
1888
1889 stats->tx512to1023octets_gb +=
1890 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1891
1892 stats->tx1024tomaxoctets_gb +=
1893 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1894
1895 stats->txunicastframes_gb +=
1896 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1897
1898 stats->txmulticastframes_gb +=
1899 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1900
1901 stats->txbroadcastframes_g +=
1902 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1903
1904 stats->txunderflowerror +=
1905 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
1906
1907 stats->txoctetcount_g +=
1908 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
1909
1910 stats->txframecount_g +=
1911 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
1912
1913 stats->txpauseframes +=
1914 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
1915
1916 stats->txvlanframes_g +=
1917 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
1918
1919 stats->rxframecount_gb +=
1920 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
1921
1922 stats->rxoctetcount_gb +=
1923 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
1924
1925 stats->rxoctetcount_g +=
1926 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
1927
1928 stats->rxbroadcastframes_g +=
1929 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
1930
1931 stats->rxmulticastframes_g +=
1932 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
1933
1934 stats->rxcrcerror +=
1935 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
1936
1937 stats->rxrunterror +=
1938 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
1939
1940 stats->rxjabbererror +=
1941 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
1942
1943 stats->rxundersize_g +=
1944 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
1945
1946 stats->rxoversize_g +=
1947 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
1948
1949 stats->rx64octets_gb +=
1950 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
1951
1952 stats->rx65to127octets_gb +=
1953 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
1954
1955 stats->rx128to255octets_gb +=
1956 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
1957
1958 stats->rx256to511octets_gb +=
1959 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
1960
1961 stats->rx512to1023octets_gb +=
1962 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
1963
1964 stats->rx1024tomaxoctets_gb +=
1965 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
1966
1967 stats->rxunicastframes_g +=
1968 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
1969
1970 stats->rxlengtherror +=
1971 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
1972
1973 stats->rxoutofrangetype +=
1974 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
1975
1976 stats->rxpauseframes +=
1977 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
1978
1979 stats->rxfifooverflow +=
1980 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
1981
1982 stats->rxvlanframes_gb +=
1983 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
1984
1985 stats->rxwatchdogerror +=
1986 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1987
1988 /* Un-freeze counters */
1989 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
1990 }
1991
1992 static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
1993 {
1994 /* Set counters to reset on read */
1995 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
1996
1997 /* Reset the counters */
1998 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
1999 }
2000
2001 static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
2002 {
2003 struct xgbe_channel *channel;
2004 unsigned int i;
2005
2006 /* Enable each Tx DMA channel */
2007 channel = pdata->channel;
2008 for (i = 0; i < pdata->channel_count; i++, channel++) {
2009 if (!channel->tx_ring)
2010 break;
2011
2012 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2013 }
2014
2015 /* Enable each Tx queue */
2016 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
2017 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
2018 MTL_Q_ENABLED);
2019
2020 /* Enable MAC Tx */
2021 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2022 }
2023
2024 static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
2025 {
2026 struct xgbe_channel *channel;
2027 unsigned int i;
2028
2029 /* Disable MAC Tx */
2030 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2031
2032 /* Disable each Tx queue */
2033 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
2034 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
2035
2036 /* Disable each Tx DMA channel */
2037 channel = pdata->channel;
2038 for (i = 0; i < pdata->channel_count; i++, channel++) {
2039 if (!channel->tx_ring)
2040 break;
2041
2042 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2043 }
2044 }
2045
2046 static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
2047 {
2048 struct xgbe_channel *channel;
2049 unsigned int reg_val, i;
2050
2051 /* Enable each Rx DMA channel */
2052 channel = pdata->channel;
2053 for (i = 0; i < pdata->channel_count; i++, channel++) {
2054 if (!channel->rx_ring)
2055 break;
2056
2057 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2058 }
2059
2060 /* Enable each Rx queue */
2061 reg_val = 0;
2062 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
2063 reg_val |= (0x02 << (i << 1));
2064 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
2065
2066 /* Enable MAC Rx */
2067 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
2068 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
2069 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
2070 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
2071 }
2072
2073 static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
2074 {
2075 struct xgbe_channel *channel;
2076 unsigned int i;
2077
2078 /* Disable MAC Rx */
2079 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
2080 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
2081 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
2082 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
2083
2084 /* Disable each Rx queue */
2085 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
2086
2087 /* Disable each Rx DMA channel */
2088 channel = pdata->channel;
2089 for (i = 0; i < pdata->channel_count; i++, channel++) {
2090 if (!channel->rx_ring)
2091 break;
2092
2093 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2094 }
2095 }
2096
2097 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
2098 {
2099 struct xgbe_channel *channel;
2100 unsigned int i;
2101
2102 /* Enable each Tx DMA channel */
2103 channel = pdata->channel;
2104 for (i = 0; i < pdata->channel_count; i++, channel++) {
2105 if (!channel->tx_ring)
2106 break;
2107
2108 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2109 }
2110
2111 /* Enable MAC Tx */
2112 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2113 }
2114
2115 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
2116 {
2117 struct xgbe_channel *channel;
2118 unsigned int i;
2119
2120 /* Disable MAC Tx */
2121 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2122
2123 /* Disable each Tx DMA channel */
2124 channel = pdata->channel;
2125 for (i = 0; i < pdata->channel_count; i++, channel++) {
2126 if (!channel->tx_ring)
2127 break;
2128
2129 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2130 }
2131 }
2132
2133 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
2134 {
2135 struct xgbe_channel *channel;
2136 unsigned int i;
2137
2138 /* Enable each Rx DMA channel */
2139 channel = pdata->channel;
2140 for (i = 0; i < pdata->channel_count; i++, channel++) {
2141 if (!channel->rx_ring)
2142 break;
2143
2144 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2145 }
2146 }
2147
2148 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
2149 {
2150 struct xgbe_channel *channel;
2151 unsigned int i;
2152
2153 /* Disable each Rx DMA channel */
2154 channel = pdata->channel;
2155 for (i = 0; i < pdata->channel_count; i++, channel++) {
2156 if (!channel->rx_ring)
2157 break;
2158
2159 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2160 }
2161 }
2162
2163 static int xgbe_init(struct xgbe_prv_data *pdata)
2164 {
2165 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2166 int ret;
2167
2168 DBGPR("-->xgbe_init\n");
2169
2170 /* Flush Tx queues */
2171 ret = xgbe_flush_tx_queues(pdata);
2172 if (ret)
2173 return ret;
2174
2175 /*
2176 * Initialize DMA related features
2177 */
2178 xgbe_config_dma_bus(pdata);
2179 xgbe_config_dma_cache(pdata);
2180 xgbe_config_osp_mode(pdata);
2181 xgbe_config_pblx8(pdata);
2182 xgbe_config_tx_pbl_val(pdata);
2183 xgbe_config_rx_pbl_val(pdata);
2184 xgbe_config_rx_coalesce(pdata);
2185 xgbe_config_tx_coalesce(pdata);
2186 xgbe_config_rx_buffer_size(pdata);
2187 xgbe_config_tso_mode(pdata);
2188 desc_if->wrapper_tx_desc_init(pdata);
2189 desc_if->wrapper_rx_desc_init(pdata);
2190 xgbe_enable_dma_interrupts(pdata);
2191
2192 /*
2193 * Initialize MTL related features
2194 */
2195 xgbe_config_mtl_mode(pdata);
2196 xgbe_config_rx_queue_mapping(pdata);
2197 /*TODO: Program the priorities mapped to the Selected Traffic Classes
2198 in MTL_TC_Prty_Map0-3 registers */
2199 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
2200 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
2201 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
2202 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
2203 xgbe_config_tx_fifo_size(pdata);
2204 xgbe_config_rx_fifo_size(pdata);
2205 xgbe_config_flow_control_threshold(pdata);
2206 /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
2207 /*TODO: Error Packet and undersized good Packet forwarding enable
2208 (FEP and FUP)
2209 */
2210 xgbe_enable_mtl_interrupts(pdata);
2211
2212 /* Transmit Class Weight */
2213 XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10);
2214
2215 /*
2216 * Initialize MAC related features
2217 */
2218 xgbe_config_mac_address(pdata);
2219 xgbe_config_jumbo_enable(pdata);
2220 xgbe_config_flow_control(pdata);
2221 xgbe_config_checksum_offload(pdata);
2222 xgbe_config_vlan_support(pdata);
2223 xgbe_config_mmc(pdata);
2224 xgbe_enable_mac_interrupts(pdata);
2225
2226 DBGPR("<--xgbe_init\n");
2227
2228 return 0;
2229 }
2230
2231 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2232 {
2233 DBGPR("-->xgbe_init_function_ptrs\n");
2234
2235 hw_if->tx_complete = xgbe_tx_complete;
2236
2237 hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
2238 hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
2239 hw_if->add_mac_addresses = xgbe_add_mac_addresses;
2240 hw_if->set_mac_address = xgbe_set_mac_address;
2241
2242 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
2243 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
2244
2245 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
2246 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
2247 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
2248 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
2249 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
2250
2251 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
2252 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
2253
2254 hw_if->set_gmii_speed = xgbe_set_gmii_speed;
2255 hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
2256 hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
2257
2258 hw_if->enable_tx = xgbe_enable_tx;
2259 hw_if->disable_tx = xgbe_disable_tx;
2260 hw_if->enable_rx = xgbe_enable_rx;
2261 hw_if->disable_rx = xgbe_disable_rx;
2262
2263 hw_if->powerup_tx = xgbe_powerup_tx;
2264 hw_if->powerdown_tx = xgbe_powerdown_tx;
2265 hw_if->powerup_rx = xgbe_powerup_rx;
2266 hw_if->powerdown_rx = xgbe_powerdown_rx;
2267
2268 hw_if->pre_xmit = xgbe_pre_xmit;
2269 hw_if->dev_read = xgbe_dev_read;
2270 hw_if->enable_int = xgbe_enable_int;
2271 hw_if->disable_int = xgbe_disable_int;
2272 hw_if->init = xgbe_init;
2273 hw_if->exit = xgbe_exit;
2274
2275 /* Descriptor related Sequences have to be initialized here */
2276 hw_if->tx_desc_init = xgbe_tx_desc_init;
2277 hw_if->rx_desc_init = xgbe_rx_desc_init;
2278 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
2279 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
2280 hw_if->is_last_desc = xgbe_is_last_desc;
2281 hw_if->is_context_desc = xgbe_is_context_desc;
2282
2283 /* For FLOW ctrl */
2284 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
2285 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
2286
2287 /* For RX coalescing */
2288 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
2289 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
2290 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
2291 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
2292
2293 /* For RX and TX threshold config */
2294 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
2295 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
2296
2297 /* For RX and TX Store and Forward Mode config */
2298 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
2299 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
2300
2301 /* For TX DMA Operating on Second Frame config */
2302 hw_if->config_osp_mode = xgbe_config_osp_mode;
2303
2304 /* For RX and TX PBL config */
2305 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
2306 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
2307 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
2308 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
2309 hw_if->config_pblx8 = xgbe_config_pblx8;
2310
2311 /* For MMC statistics support */
2312 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
2313 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
2314 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
2315
2316 DBGPR("<--xgbe_init_function_ptrs\n");
2317 }
This page took 0.12187 seconds and 5 git commands to generate.