stmmac: move the dma out from the main
[deliverable/linux.git] / drivers / net / stmmac / gmac.c
CommitLineData
47dd7a54
GC
1/*******************************************************************************
2 This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code.
5
6 Copyright (C) 2007-2009 STMicroelectronics Ltd
7
8 This program is free software; you can redistribute it and/or modify it
9 under the terms and conditions of the GNU General Public License,
10 version 2, as published by the Free Software Foundation.
11
12 This program is distributed in the hope it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
16
17 You should have received a copy of the GNU General Public License along with
18 this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20
21 The full GNU General Public License is included in this distribution in
22 the file called "COPYING".
23
24 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/
26
27#include <linux/netdevice.h>
28#include <linux/crc32.h>
29#include <linux/mii.h>
30#include <linux/phy.h>
31
32#include "stmmac.h"
33#include "gmac.h"
aec7ff27 34#include "dwmac_dma.h"
47dd7a54
GC
35
36#undef GMAC_DEBUG
37/*#define GMAC_DEBUG*/
38#undef FRAME_FILTER_DEBUG
39/*#define FRAME_FILTER_DEBUG*/
40#ifdef GMAC_DEBUG
41#define DBG(fmt, args...) printk(fmt, ## args)
42#else
43#define DBG(fmt, args...) do { } while (0)
44#endif
45
46static void gmac_dump_regs(unsigned long ioaddr)
47{
48 int i;
49 pr_info("\t----------------------------------------------\n"
50 "\t GMAC registers (base addr = 0x%8x)\n"
51 "\t----------------------------------------------\n",
52 (unsigned int)ioaddr);
53
54 for (i = 0; i < 55; i++) {
55 int offset = i * 4;
56 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
57 offset, readl(ioaddr + offset));
58 }
59 return;
60}
61
62static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
63{
64 u32 value = readl(ioaddr + DMA_BUS_MODE);
65 /* DMA SW reset */
66 value |= DMA_BUS_MODE_SFT_RESET;
67 writel(value, ioaddr + DMA_BUS_MODE);
68 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
69
70 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
71 ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
72 (pbl << DMA_BUS_MODE_RPBL_SHIFT));
73
74#ifdef CONFIG_STMMAC_DA
75 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
76#endif
77 writel(value, ioaddr + DMA_BUS_MODE);
78
79 /* Mask interrupts by writing to CSR7 */
80 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
81
82 /* The base address of the RX/TX descriptor lists must be written into
83 * DMA CSR3 and CSR4, respectively. */
84 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
85 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
86
87 return 0;
88}
89
90/* Transmit FIFO flush operation */
91static void gmac_flush_tx_fifo(unsigned long ioaddr)
92{
93 u32 csr6 = readl(ioaddr + DMA_CONTROL);
94 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
95
96 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
97}
98
99static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
100 int rxmode)
101{
102 u32 csr6 = readl(ioaddr + DMA_CONTROL);
103
104 if (txmode == SF_DMA_MODE) {
105 DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
106 /* Transmit COE type 2 cannot be done in cut-through mode. */
107 csr6 |= DMA_CONTROL_TSF;
108 /* Operating on second frame increase the performance
109 * especially when transmit store-and-forward is used.*/
110 csr6 |= DMA_CONTROL_OSF;
111 } else {
112 DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
113 " (threshold = %d)\n", txmode);
114 csr6 &= ~DMA_CONTROL_TSF;
115 csr6 &= DMA_CONTROL_TC_TX_MASK;
af901ca1 116 /* Set the transmit threshold */
47dd7a54
GC
117 if (txmode <= 32)
118 csr6 |= DMA_CONTROL_TTC_32;
119 else if (txmode <= 64)
120 csr6 |= DMA_CONTROL_TTC_64;
121 else if (txmode <= 128)
122 csr6 |= DMA_CONTROL_TTC_128;
123 else if (txmode <= 192)
124 csr6 |= DMA_CONTROL_TTC_192;
125 else
126 csr6 |= DMA_CONTROL_TTC_256;
127 }
128
129 if (rxmode == SF_DMA_MODE) {
130 DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
131 csr6 |= DMA_CONTROL_RSF;
132 } else {
133 DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
134 " (threshold = %d)\n", rxmode);
135 csr6 &= ~DMA_CONTROL_RSF;
136 csr6 &= DMA_CONTROL_TC_RX_MASK;
137 if (rxmode <= 32)
138 csr6 |= DMA_CONTROL_RTC_32;
139 else if (rxmode <= 64)
140 csr6 |= DMA_CONTROL_RTC_64;
141 else if (rxmode <= 96)
142 csr6 |= DMA_CONTROL_RTC_96;
143 else
144 csr6 |= DMA_CONTROL_RTC_128;
145 }
146
147 writel(csr6, ioaddr + DMA_CONTROL);
148 return;
149}
150
151/* Not yet implemented --- no RMON module */
152static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
153 unsigned long ioaddr)
154{
155 return;
156}
157
158static void gmac_dump_dma_regs(unsigned long ioaddr)
159{
160 int i;
161 pr_info(" DMA registers\n");
162 for (i = 0; i < 22; i++) {
163 if ((i < 9) || (i > 17)) {
164 int offset = i * 4;
165 pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
166 (DMA_BUS_MODE + offset),
167 readl(ioaddr + DMA_BUS_MODE + offset));
168 }
169 }
170 return;
171}
172
173static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
174 struct dma_desc *p, unsigned long ioaddr)
175{
176 int ret = 0;
177 struct net_device_stats *stats = (struct net_device_stats *)data;
178
179 if (unlikely(p->des01.etx.error_summary)) {
180 DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
181 if (unlikely(p->des01.etx.jabber_timeout)) {
182 DBG(KERN_ERR "\tjabber_timeout error\n");
183 x->tx_jabber++;
184 }
185
186 if (unlikely(p->des01.etx.frame_flushed)) {
187 DBG(KERN_ERR "\tframe_flushed error\n");
188 x->tx_frame_flushed++;
189 gmac_flush_tx_fifo(ioaddr);
190 }
191
192 if (unlikely(p->des01.etx.loss_carrier)) {
193 DBG(KERN_ERR "\tloss_carrier error\n");
194 x->tx_losscarrier++;
195 stats->tx_carrier_errors++;
196 }
197 if (unlikely(p->des01.etx.no_carrier)) {
198 DBG(KERN_ERR "\tno_carrier error\n");
199 x->tx_carrier++;
200 stats->tx_carrier_errors++;
201 }
202 if (unlikely(p->des01.etx.late_collision)) {
203 DBG(KERN_ERR "\tlate_collision error\n");
204 stats->collisions += p->des01.etx.collision_count;
205 }
206 if (unlikely(p->des01.etx.excessive_collisions)) {
207 DBG(KERN_ERR "\texcessive_collisions\n");
208 stats->collisions += p->des01.etx.collision_count;
209 }
210 if (unlikely(p->des01.etx.excessive_deferral)) {
211 DBG(KERN_INFO "\texcessive tx_deferral\n");
212 x->tx_deferred++;
213 }
214
215 if (unlikely(p->des01.etx.underflow_error)) {
216 DBG(KERN_ERR "\tunderflow error\n");
217 gmac_flush_tx_fifo(ioaddr);
218 x->tx_underflow++;
219 }
220
221 if (unlikely(p->des01.etx.ip_header_error)) {
222 DBG(KERN_ERR "\tTX IP header csum error\n");
223 x->tx_ip_header_error++;
224 }
225
226 if (unlikely(p->des01.etx.payload_error)) {
227 DBG(KERN_ERR "\tAddr/Payload csum error\n");
228 x->tx_payload_error++;
229 gmac_flush_tx_fifo(ioaddr);
230 }
231
232 ret = -1;
233 }
234
235 if (unlikely(p->des01.etx.deferred)) {
236 DBG(KERN_INFO "GMAC TX status: tx deferred\n");
237 x->tx_deferred++;
238 }
239#ifdef STMMAC_VLAN_TAG_USED
240 if (p->des01.etx.vlan_frame) {
241 DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
242 x->tx_vlan++;
243 }
244#endif
245
246 return ret;
247}
248
249static int gmac_get_tx_len(struct dma_desc *p)
250{
251 return p->des01.etx.buffer1_size;
252}
253
254static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
255{
256 int ret = good_frame;
257 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
258
259 /* bits 5 7 0 | Frame status
260 * ----------------------------------------------------------
261 * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
262 * 1 0 0 | IPv4/6 No CSUM errorS.
263 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
264 * 1 1 0 | IPv4/6 CSUM IP HR error
265 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
266 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
267 * 0 1 1 | COE bypassed.. no IPv4/6 frame
268 * 0 1 0 | Reserved.
269 */
270 if (status == 0x0) {
271 DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
272 ret = good_frame;
273 } else if (status == 0x4) {
274 DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
275 ret = good_frame;
276 } else if (status == 0x5) {
277 DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
278 ret = csum_none;
279 } else if (status == 0x6) {
280 DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
281 ret = csum_none;
282 } else if (status == 0x7) {
283 DBG(KERN_ERR
284 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
285 ret = csum_none;
286 } else if (status == 0x1) {
287 DBG(KERN_ERR
288 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
289 ret = discard_frame;
290 } else if (status == 0x3) {
291 DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
292 ret = discard_frame;
293 }
294 return ret;
295}
296
297static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
298 struct dma_desc *p)
299{
300 int ret = good_frame;
301 struct net_device_stats *stats = (struct net_device_stats *)data;
302
303 if (unlikely(p->des01.erx.error_summary)) {
304 DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
305 if (unlikely(p->des01.erx.descriptor_error)) {
306 DBG(KERN_ERR "\tdescriptor error\n");
307 x->rx_desc++;
308 stats->rx_length_errors++;
309 }
310 if (unlikely(p->des01.erx.overflow_error)) {
311 DBG(KERN_ERR "\toverflow error\n");
312 x->rx_gmac_overflow++;
313 }
314
315 if (unlikely(p->des01.erx.ipc_csum_error))
316 DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
317
318 if (unlikely(p->des01.erx.late_collision)) {
319 DBG(KERN_ERR "\tlate_collision error\n");
320 stats->collisions++;
321 stats->collisions++;
322 }
323 if (unlikely(p->des01.erx.receive_watchdog)) {
324 DBG(KERN_ERR "\treceive_watchdog error\n");
325 x->rx_watchdog++;
326 }
327 if (unlikely(p->des01.erx.error_gmii)) {
328 DBG(KERN_ERR "\tReceive Error\n");
329 x->rx_mii++;
330 }
331 if (unlikely(p->des01.erx.crc_error)) {
332 DBG(KERN_ERR "\tCRC error\n");
333 x->rx_crc++;
334 stats->rx_crc_errors++;
335 }
336 ret = discard_frame;
337 }
338
339 /* After a payload csum error, the ES bit is set.
340 * It doesn't match with the information reported into the databook.
341 * At any rate, we need to understand if the CSUM hw computation is ok
342 * and report this info to the upper layers. */
343 ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
344 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
345
346 if (unlikely(p->des01.erx.dribbling)) {
347 DBG(KERN_ERR "GMAC RX: dribbling error\n");
348 ret = discard_frame;
349 }
350 if (unlikely(p->des01.erx.sa_filter_fail)) {
351 DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
352 x->sa_rx_filter_fail++;
353 ret = discard_frame;
354 }
355 if (unlikely(p->des01.erx.da_filter_fail)) {
356 DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
357 x->da_rx_filter_fail++;
358 ret = discard_frame;
359 }
360 if (unlikely(p->des01.erx.length_error)) {
361 DBG(KERN_ERR "GMAC RX: length_error error\n");
362 x->rx_lenght++;
363 ret = discard_frame;
364 }
365#ifdef STMMAC_VLAN_TAG_USED
366 if (p->des01.erx.vlan_tag) {
367 DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
368 x->rx_vlan++;
369 }
370#endif
371 return ret;
372}
373
374static void gmac_irq_status(unsigned long ioaddr)
375{
376 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
377
378 /* Not used events (e.g. MMC interrupts) are not handled. */
379 if ((intr_status & mmc_tx_irq))
380 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
381 readl(ioaddr + GMAC_MMC_TX_INTR));
382 if (unlikely(intr_status & mmc_rx_irq))
383 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
384 readl(ioaddr + GMAC_MMC_RX_INTR));
385 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
386 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
387 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
388 if (unlikely(intr_status & pmt_irq)) {
389 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
390 /* clear the PMT bits 5 and 6 by reading the PMT
391 * status register. */
392 readl(ioaddr + GMAC_PMT);
393 }
394
395 return;
396}
397
398static void gmac_core_init(unsigned long ioaddr)
399{
400 u32 value = readl(ioaddr + GMAC_CONTROL);
401 value |= GMAC_CORE_INIT;
402 writel(value, ioaddr + GMAC_CONTROL);
403
47dd7a54
GC
404 /* Freeze MMC counters */
405 writel(0x8, ioaddr + GMAC_MMC_CTRL);
406 /* Mask GMAC interrupts */
407 writel(0x207, ioaddr + GMAC_INT_MASK);
408
409#ifdef STMMAC_VLAN_TAG_USED
410 /* Tag detection without filtering */
411 writel(0x0, ioaddr + GMAC_VLAN_TAG);
412#endif
413 return;
414}
415
416static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
417 unsigned int reg_n)
418{
419 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
420 GMAC_ADDR_LOW(reg_n));
421}
422
423static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
424 unsigned int reg_n)
425{
426 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
427 GMAC_ADDR_LOW(reg_n));
428}
429
430static void gmac_set_filter(struct net_device *dev)
431{
432 unsigned long ioaddr = dev->base_addr;
433 unsigned int value = 0;
434
435 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
60963479 436 __func__, dev->mc_count, dev->uc.count);
47dd7a54
GC
437
438 if (dev->flags & IFF_PROMISC)
439 value = GMAC_FRAME_FILTER_PR;
440 else if ((dev->mc_count > HASH_TABLE_SIZE)
441 || (dev->flags & IFF_ALLMULTI)) {
442 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
443 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
444 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
445 } else if (dev->mc_count > 0) {
446 int i;
447 u32 mc_filter[2];
448 struct dev_mc_list *mclist;
449
450 /* Hash filter for multicast */
451 value = GMAC_FRAME_FILTER_HMC;
452
453 memset(mc_filter, 0, sizeof(mc_filter));
454 for (i = 0, mclist = dev->mc_list;
455 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
456 /* The upper 6 bits of the calculated CRC are used to
457 index the contens of the hash table */
458 int bit_nr =
459 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
460 /* The most significant bit determines the register to
461 * use (H/L) while the other 5 bits determine the bit
462 * within the register. */
463 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
464 }
465 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
466 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
467 }
468
469 /* Handle multiple unicast addresses (perfect filtering)*/
60963479 470 if (dev->uc.count > GMAC_MAX_UNICAST_ADDRESSES)
47dd7a54
GC
471 /* Switch to promiscuous mode is more than 16 addrs
472 are required */
473 value |= GMAC_FRAME_FILTER_PR;
474 else {
60963479
GC
475 int reg = 1;
476 struct netdev_hw_addr *ha;
477
478 list_for_each_entry(ha, &dev->uc.list, list) {
479 gmac_set_umac_addr(ioaddr, ha->addr, reg);
480 reg++;
47dd7a54
GC
481 }
482 }
483
484#ifdef FRAME_FILTER_DEBUG
485 /* Enable Receive all mode (to debug filtering_fail errors) */
486 value |= GMAC_FRAME_FILTER_RA;
487#endif
488 writel(value, ioaddr + GMAC_FRAME_FILTER);
489
490 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
491 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
492 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
493
494 return;
495}
496
497static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
498 unsigned int fc, unsigned int pause_time)
499{
500 unsigned int flow = 0;
501
502 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
503 if (fc & FLOW_RX) {
504 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
505 flow |= GMAC_FLOW_CTRL_RFE;
506 }
507 if (fc & FLOW_TX) {
508 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
509 flow |= GMAC_FLOW_CTRL_TFE;
510 }
511
512 if (duplex) {
513 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
514 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
515 }
516
517 writel(flow, ioaddr + GMAC_FLOW_CTRL);
518 return;
519}
520
521static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
522{
523 unsigned int pmt = 0;
524
525 if (mode == WAKE_MAGIC) {
526 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
527 pmt |= power_down | magic_pkt_en;
528 } else if (mode == WAKE_UCAST) {
529 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
530 pmt |= global_unicast;
531 }
532
533 writel(pmt, ioaddr + GMAC_PMT);
534 return;
535}
536
537static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
538 int disable_rx_ic)
539{
540 int i;
541 for (i = 0; i < ring_size; i++) {
542 p->des01.erx.own = 1;
543 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
544 /* To support jumbo frames */
545 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
546 if (i == ring_size - 1)
547 p->des01.erx.end_ring = 1;
548 if (disable_rx_ic)
549 p->des01.erx.disable_ic = 1;
550 p++;
551 }
552 return;
553}
554
555static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
556{
557 int i;
558
559 for (i = 0; i < ring_size; i++) {
560 p->des01.etx.own = 0;
561 if (i == ring_size - 1)
562 p->des01.etx.end_ring = 1;
563 p++;
564 }
565
566 return;
567}
568
569static int gmac_get_tx_owner(struct dma_desc *p)
570{
571 return p->des01.etx.own;
572}
573
574static int gmac_get_rx_owner(struct dma_desc *p)
575{
576 return p->des01.erx.own;
577}
578
579static void gmac_set_tx_owner(struct dma_desc *p)
580{
581 p->des01.etx.own = 1;
582}
583
584static void gmac_set_rx_owner(struct dma_desc *p)
585{
586 p->des01.erx.own = 1;
587}
588
589static int gmac_get_tx_ls(struct dma_desc *p)
590{
591 return p->des01.etx.last_segment;
592}
593
594static void gmac_release_tx_desc(struct dma_desc *p)
595{
596 int ter = p->des01.etx.end_ring;
597
598 memset(p, 0, sizeof(struct dma_desc));
599 p->des01.etx.end_ring = ter;
600
601 return;
602}
603
604static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
605 int csum_flag)
606{
607 p->des01.etx.first_segment = is_fs;
608 if (unlikely(len > BUF_SIZE_4KiB)) {
609 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
610 p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
611 } else {
612 p->des01.etx.buffer1_size = len;
613 }
614 if (likely(csum_flag))
615 p->des01.etx.checksum_insertion = cic_full;
616}
617
618static void gmac_clear_tx_ic(struct dma_desc *p)
619{
620 p->des01.etx.interrupt = 0;
621}
622
623static void gmac_close_tx_desc(struct dma_desc *p)
624{
625 p->des01.etx.last_segment = 1;
626 p->des01.etx.interrupt = 1;
627}
628
629static int gmac_get_rx_frame_len(struct dma_desc *p)
630{
631 return p->des01.erx.frame_length;
632}
633
db98a0b0 634struct stmmac_ops gmac_ops = {
47dd7a54 635 .core_init = gmac_core_init,
db98a0b0
GC
636 .dump_regs = gmac_dump_regs,
637 .host_irq_status = gmac_irq_status,
638 .set_filter = gmac_set_filter,
639 .flow_ctrl = gmac_flow_ctrl,
640 .pmt = gmac_pmt,
641 .set_umac_addr = gmac_set_umac_addr,
642 .get_umac_addr = gmac_get_umac_addr,
643};
644
645struct stmmac_dma_ops gmac_dma_ops = {
646 .init = gmac_dma_init,
647 .dump_regs = gmac_dump_dma_regs,
47dd7a54
GC
648 .dma_mode = gmac_dma_operation_mode,
649 .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
aec7ff27
GC
650 .enable_dma_transmission = dwmac_enable_dma_transmission,
651 .enable_dma_irq = dwmac_enable_dma_irq,
652 .disable_dma_irq = dwmac_disable_dma_irq,
653 .start_tx = dwmac_dma_start_tx,
654 .stop_tx = dwmac_dma_stop_tx,
655 .start_rx = dwmac_dma_start_rx,
656 .stop_rx = dwmac_dma_stop_rx,
657 .dma_interrupt = dwmac_dma_interrupt,
db98a0b0
GC
658};
659
660struct stmmac_desc_ops gmac_desc_ops = {
47dd7a54
GC
661 .tx_status = gmac_get_tx_frame_status,
662 .rx_status = gmac_get_rx_frame_status,
663 .get_tx_len = gmac_get_tx_len,
47dd7a54
GC
664 .init_rx_desc = gmac_init_rx_desc,
665 .init_tx_desc = gmac_init_tx_desc,
666 .get_tx_owner = gmac_get_tx_owner,
667 .get_rx_owner = gmac_get_rx_owner,
668 .release_tx_desc = gmac_release_tx_desc,
669 .prepare_tx_desc = gmac_prepare_tx_desc,
670 .clear_tx_ic = gmac_clear_tx_ic,
671 .close_tx_desc = gmac_close_tx_desc,
672 .get_tx_ls = gmac_get_tx_ls,
673 .set_tx_owner = gmac_set_tx_owner,
674 .set_rx_owner = gmac_set_rx_owner,
675 .get_rx_frame_len = gmac_get_rx_frame_len,
47dd7a54
GC
676};
677
678struct mac_device_info *gmac_setup(unsigned long ioaddr)
679{
680 struct mac_device_info *mac;
681 u32 uid = readl(ioaddr + GMAC_VERSION);
682
683 pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
684 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
685
686 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
687
db98a0b0
GC
688 mac->mac = &gmac_ops;
689 mac->desc = &gmac_desc_ops;
690 mac->dma = &gmac_dma_ops;
691
692 mac->pmt = PMT_SUPPORTED;
693 mac->link.port = GMAC_CONTROL_PS;
694 mac->link.duplex = GMAC_CONTROL_DM;
695 mac->link.speed = GMAC_CONTROL_FES;
696 mac->mii.addr = GMAC_MII_ADDR;
697 mac->mii.data = GMAC_MII_DATA;
47dd7a54
GC
698
699 return mac;
700}
This page took 0.068947 seconds and 5 git commands to generate.