Merge tag 'for-f2fs-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[deliverable/linux.git] / drivers / net / ethernet / sun / sunbmac.c
1 /* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters.
2 *
3 * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6 #include <linux/module.h>
7
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/fcntl.h>
11 #include <linux/interrupt.h>
12 #include <linux/ioport.h>
13 #include <linux/in.h>
14 #include <linux/string.h>
15 #include <linux/delay.h>
16 #include <linux/crc32.h>
17 #include <linux/errno.h>
18 #include <linux/ethtool.h>
19 #include <linux/mii.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/bitops.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/gfp.h>
28
29 #include <asm/auxio.h>
30 #include <asm/byteorder.h>
31 #include <asm/dma.h>
32 #include <asm/idprom.h>
33 #include <asm/io.h>
34 #include <asm/openprom.h>
35 #include <asm/oplib.h>
36 #include <asm/pgtable.h>
37
38 #include "sunbmac.h"
39
40 #define DRV_NAME "sunbmac"
41 #define DRV_VERSION "2.1"
42 #define DRV_RELDATE "August 26, 2008"
43 #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
44
45 static char version[] =
46 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
47
48 MODULE_VERSION(DRV_VERSION);
49 MODULE_AUTHOR(DRV_AUTHOR);
50 MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver");
51 MODULE_LICENSE("GPL");
52
53 #undef DEBUG_PROBE
54 #undef DEBUG_TX
55 #undef DEBUG_IRQ
56
57 #ifdef DEBUG_PROBE
58 #define DP(x) printk x
59 #else
60 #define DP(x)
61 #endif
62
63 #ifdef DEBUG_TX
64 #define DTX(x) printk x
65 #else
66 #define DTX(x)
67 #endif
68
69 #ifdef DEBUG_IRQ
70 #define DIRQ(x) printk x
71 #else
72 #define DIRQ(x)
73 #endif
74
75 #define DEFAULT_JAMSIZE 4 /* Toe jam */
76
77 #define QEC_RESET_TRIES 200
78
79 static int qec_global_reset(void __iomem *gregs)
80 {
81 int tries = QEC_RESET_TRIES;
82
83 sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
84 while (--tries) {
85 if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) {
86 udelay(20);
87 continue;
88 }
89 break;
90 }
91 if (tries)
92 return 0;
93 printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n");
94 return -1;
95 }
96
97 static void qec_init(struct bigmac *bp)
98 {
99 struct platform_device *qec_op = bp->qec_op;
100 void __iomem *gregs = bp->gregs;
101 u8 bsizes = bp->bigmac_bursts;
102 u32 regval;
103
104 /* 64byte bursts do not work at the moment, do
105 * not even try to enable them. -DaveM
106 */
107 if (bsizes & DMA_BURST32)
108 regval = GLOB_CTRL_B32;
109 else
110 regval = GLOB_CTRL_B16;
111 sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL);
112 sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE);
113
114 /* All of memsize is given to bigmac. */
115 sbus_writel(resource_size(&qec_op->resource[1]),
116 gregs + GLOB_MSIZE);
117
118 /* Half to the transmitter, half to the receiver. */
119 sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
120 gregs + GLOB_TSIZE);
121 sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
122 gregs + GLOB_RSIZE);
123 }
124
125 #define TX_RESET_TRIES 32
126 #define RX_RESET_TRIES 32
127
128 static void bigmac_tx_reset(void __iomem *bregs)
129 {
130 int tries = TX_RESET_TRIES;
131
132 sbus_writel(0, bregs + BMAC_TXCFG);
133
134 /* The fifo threshold bit is read-only and does
135 * not clear. -DaveM
136 */
137 while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 &&
138 --tries != 0)
139 udelay(20);
140
141 if (!tries) {
142 printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n");
143 printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n",
144 sbus_readl(bregs + BMAC_TXCFG));
145 }
146 }
147
148 static void bigmac_rx_reset(void __iomem *bregs)
149 {
150 int tries = RX_RESET_TRIES;
151
152 sbus_writel(0, bregs + BMAC_RXCFG);
153 while (sbus_readl(bregs + BMAC_RXCFG) && --tries)
154 udelay(20);
155
156 if (!tries) {
157 printk(KERN_ERR "BIGMAC: Receiver will not reset.\n");
158 printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n",
159 sbus_readl(bregs + BMAC_RXCFG));
160 }
161 }
162
163 /* Reset the transmitter and receiver. */
164 static void bigmac_stop(struct bigmac *bp)
165 {
166 bigmac_tx_reset(bp->bregs);
167 bigmac_rx_reset(bp->bregs);
168 }
169
170 static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs)
171 {
172 struct net_device_stats *stats = &bp->enet_stats;
173
174 stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR);
175 sbus_writel(0, bregs + BMAC_RCRCECTR);
176
177 stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR);
178 sbus_writel(0, bregs + BMAC_UNALECTR);
179
180 stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR);
181 sbus_writel(0, bregs + BMAC_GLECTR);
182
183 stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR);
184
185 stats->collisions +=
186 (sbus_readl(bregs + BMAC_EXCTR) +
187 sbus_readl(bregs + BMAC_LTCTR));
188 sbus_writel(0, bregs + BMAC_EXCTR);
189 sbus_writel(0, bregs + BMAC_LTCTR);
190 }
191
192 static void bigmac_clean_rings(struct bigmac *bp)
193 {
194 int i;
195
196 for (i = 0; i < RX_RING_SIZE; i++) {
197 if (bp->rx_skbs[i] != NULL) {
198 dev_kfree_skb_any(bp->rx_skbs[i]);
199 bp->rx_skbs[i] = NULL;
200 }
201 }
202
203 for (i = 0; i < TX_RING_SIZE; i++) {
204 if (bp->tx_skbs[i] != NULL) {
205 dev_kfree_skb_any(bp->tx_skbs[i]);
206 bp->tx_skbs[i] = NULL;
207 }
208 }
209 }
210
211 static void bigmac_init_rings(struct bigmac *bp, int from_irq)
212 {
213 struct bmac_init_block *bb = bp->bmac_block;
214 int i;
215 gfp_t gfp_flags = GFP_KERNEL;
216
217 if (from_irq || in_interrupt())
218 gfp_flags = GFP_ATOMIC;
219
220 bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0;
221
222 /* Free any skippy bufs left around in the rings. */
223 bigmac_clean_rings(bp);
224
225 /* Now get new skbufs for the receive ring. */
226 for (i = 0; i < RX_RING_SIZE; i++) {
227 struct sk_buff *skb;
228
229 skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags);
230 if (!skb)
231 continue;
232
233 bp->rx_skbs[i] = skb;
234
235 /* Because we reserve afterwards. */
236 skb_put(skb, ETH_FRAME_LEN);
237 skb_reserve(skb, 34);
238
239 bb->be_rxd[i].rx_addr =
240 dma_map_single(&bp->bigmac_op->dev,
241 skb->data,
242 RX_BUF_ALLOC_SIZE - 34,
243 DMA_FROM_DEVICE);
244 bb->be_rxd[i].rx_flags =
245 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
246 }
247
248 for (i = 0; i < TX_RING_SIZE; i++)
249 bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0;
250 }
251
252 #define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK)
253 #define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB)
254
255 static void idle_transceiver(void __iomem *tregs)
256 {
257 int i = 20;
258
259 while (i--) {
260 sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL);
261 sbus_readl(tregs + TCVR_MPAL);
262 sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL);
263 sbus_readl(tregs + TCVR_MPAL);
264 }
265 }
266
267 static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit)
268 {
269 if (bp->tcvr_type == internal) {
270 bit = (bit & 1) << 3;
271 sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO),
272 tregs + TCVR_MPAL);
273 sbus_readl(tregs + TCVR_MPAL);
274 sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
275 tregs + TCVR_MPAL);
276 sbus_readl(tregs + TCVR_MPAL);
277 } else if (bp->tcvr_type == external) {
278 bit = (bit & 1) << 2;
279 sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB,
280 tregs + TCVR_MPAL);
281 sbus_readl(tregs + TCVR_MPAL);
282 sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK,
283 tregs + TCVR_MPAL);
284 sbus_readl(tregs + TCVR_MPAL);
285 } else {
286 printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n");
287 }
288 }
289
290 static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs)
291 {
292 int retval = 0;
293
294 if (bp->tcvr_type == internal) {
295 sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
296 sbus_readl(tregs + TCVR_MPAL);
297 sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
298 tregs + TCVR_MPAL);
299 sbus_readl(tregs + TCVR_MPAL);
300 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
301 } else if (bp->tcvr_type == external) {
302 sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
303 sbus_readl(tregs + TCVR_MPAL);
304 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
305 sbus_readl(tregs + TCVR_MPAL);
306 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
307 } else {
308 printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n");
309 }
310 return retval;
311 }
312
313 static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs)
314 {
315 int retval = 0;
316
317 if (bp->tcvr_type == internal) {
318 sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
319 sbus_readl(tregs + TCVR_MPAL);
320 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
321 sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
322 sbus_readl(tregs + TCVR_MPAL);
323 } else if (bp->tcvr_type == external) {
324 sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
325 sbus_readl(tregs + TCVR_MPAL);
326 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
327 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
328 sbus_readl(tregs + TCVR_MPAL);
329 } else {
330 printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n");
331 }
332 return retval;
333 }
334
335 static void put_tcvr_byte(struct bigmac *bp,
336 void __iomem *tregs,
337 unsigned int byte)
338 {
339 int shift = 4;
340
341 do {
342 write_tcvr_bit(bp, tregs, ((byte >> shift) & 1));
343 shift -= 1;
344 } while (shift >= 0);
345 }
346
347 static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs,
348 int reg, unsigned short val)
349 {
350 int shift;
351
352 reg &= 0xff;
353 val &= 0xffff;
354 switch(bp->tcvr_type) {
355 case internal:
356 case external:
357 break;
358
359 default:
360 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
361 return;
362 }
363
364 idle_transceiver(tregs);
365 write_tcvr_bit(bp, tregs, 0);
366 write_tcvr_bit(bp, tregs, 1);
367 write_tcvr_bit(bp, tregs, 0);
368 write_tcvr_bit(bp, tregs, 1);
369
370 put_tcvr_byte(bp, tregs,
371 ((bp->tcvr_type == internal) ?
372 BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
373
374 put_tcvr_byte(bp, tregs, reg);
375
376 write_tcvr_bit(bp, tregs, 1);
377 write_tcvr_bit(bp, tregs, 0);
378
379 shift = 15;
380 do {
381 write_tcvr_bit(bp, tregs, (val >> shift) & 1);
382 shift -= 1;
383 } while (shift >= 0);
384 }
385
386 static unsigned short bigmac_tcvr_read(struct bigmac *bp,
387 void __iomem *tregs,
388 int reg)
389 {
390 unsigned short retval = 0;
391
392 reg &= 0xff;
393 switch(bp->tcvr_type) {
394 case internal:
395 case external:
396 break;
397
398 default:
399 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
400 return 0xffff;
401 }
402
403 idle_transceiver(tregs);
404 write_tcvr_bit(bp, tregs, 0);
405 write_tcvr_bit(bp, tregs, 1);
406 write_tcvr_bit(bp, tregs, 1);
407 write_tcvr_bit(bp, tregs, 0);
408
409 put_tcvr_byte(bp, tregs,
410 ((bp->tcvr_type == internal) ?
411 BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
412
413 put_tcvr_byte(bp, tregs, reg);
414
415 if (bp->tcvr_type == external) {
416 int shift = 15;
417
418 (void) read_tcvr_bit2(bp, tregs);
419 (void) read_tcvr_bit2(bp, tregs);
420
421 do {
422 int tmp;
423
424 tmp = read_tcvr_bit2(bp, tregs);
425 retval |= ((tmp & 1) << shift);
426 shift -= 1;
427 } while (shift >= 0);
428
429 (void) read_tcvr_bit2(bp, tregs);
430 (void) read_tcvr_bit2(bp, tregs);
431 (void) read_tcvr_bit2(bp, tregs);
432 } else {
433 int shift = 15;
434
435 (void) read_tcvr_bit(bp, tregs);
436 (void) read_tcvr_bit(bp, tregs);
437
438 do {
439 int tmp;
440
441 tmp = read_tcvr_bit(bp, tregs);
442 retval |= ((tmp & 1) << shift);
443 shift -= 1;
444 } while (shift >= 0);
445
446 (void) read_tcvr_bit(bp, tregs);
447 (void) read_tcvr_bit(bp, tregs);
448 (void) read_tcvr_bit(bp, tregs);
449 }
450 return retval;
451 }
452
453 static void bigmac_tcvr_init(struct bigmac *bp)
454 {
455 void __iomem *tregs = bp->tregs;
456 u32 mpal;
457
458 idle_transceiver(tregs);
459 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
460 tregs + TCVR_MPAL);
461 sbus_readl(tregs + TCVR_MPAL);
462
463 /* Only the bit for the present transceiver (internal or
464 * external) will stick, set them both and see what stays.
465 */
466 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
467 sbus_readl(tregs + TCVR_MPAL);
468 udelay(20);
469
470 mpal = sbus_readl(tregs + TCVR_MPAL);
471 if (mpal & MGMT_PAL_EXT_MDIO) {
472 bp->tcvr_type = external;
473 sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
474 tregs + TCVR_TPAL);
475 sbus_readl(tregs + TCVR_TPAL);
476 } else if (mpal & MGMT_PAL_INT_MDIO) {
477 bp->tcvr_type = internal;
478 sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK |
479 TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
480 tregs + TCVR_TPAL);
481 sbus_readl(tregs + TCVR_TPAL);
482 } else {
483 printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor "
484 "external MDIO available!\n");
485 printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n",
486 sbus_readl(tregs + TCVR_MPAL),
487 sbus_readl(tregs + TCVR_TPAL));
488 }
489 }
490
491 static int bigmac_init_hw(struct bigmac *, int);
492
493 static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
494 {
495 if (bp->sw_bmcr & BMCR_SPEED100) {
496 int timeout;
497
498 /* Reset the PHY. */
499 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
500 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
501 bp->sw_bmcr = (BMCR_RESET);
502 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
503
504 timeout = 64;
505 while (--timeout) {
506 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
507 if ((bp->sw_bmcr & BMCR_RESET) == 0)
508 break;
509 udelay(20);
510 }
511 if (timeout == 0)
512 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
513
514 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
515
516 /* Now we try 10baseT. */
517 bp->sw_bmcr &= ~(BMCR_SPEED100);
518 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
519 return 0;
520 }
521
522 /* We've tried them all. */
523 return -1;
524 }
525
526 static void bigmac_timer(unsigned long data)
527 {
528 struct bigmac *bp = (struct bigmac *) data;
529 void __iomem *tregs = bp->tregs;
530 int restart_timer = 0;
531
532 bp->timer_ticks++;
533 if (bp->timer_state == ltrywait) {
534 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR);
535 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
536 if (bp->sw_bmsr & BMSR_LSTATUS) {
537 printk(KERN_INFO "%s: Link is now up at %s.\n",
538 bp->dev->name,
539 (bp->sw_bmcr & BMCR_SPEED100) ?
540 "100baseT" : "10baseT");
541 bp->timer_state = asleep;
542 restart_timer = 0;
543 } else {
544 if (bp->timer_ticks >= 4) {
545 int ret;
546
547 ret = try_next_permutation(bp, tregs);
548 if (ret == -1) {
549 printk(KERN_ERR "%s: Link down, cable problem?\n",
550 bp->dev->name);
551 ret = bigmac_init_hw(bp, 0);
552 if (ret) {
553 printk(KERN_ERR "%s: Error, cannot re-init the "
554 "BigMAC.\n", bp->dev->name);
555 }
556 return;
557 }
558 bp->timer_ticks = 0;
559 restart_timer = 1;
560 } else {
561 restart_timer = 1;
562 }
563 }
564 } else {
565 /* Can't happens.... */
566 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
567 bp->dev->name);
568 restart_timer = 0;
569 bp->timer_ticks = 0;
570 bp->timer_state = asleep; /* foo on you */
571 }
572
573 if (restart_timer != 0) {
574 bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
575 add_timer(&bp->bigmac_timer);
576 }
577 }
578
579 /* Well, really we just force the chip into 100baseT then
580 * 10baseT, each time checking for a link status.
581 */
582 static void bigmac_begin_auto_negotiation(struct bigmac *bp)
583 {
584 void __iomem *tregs = bp->tregs;
585 int timeout;
586
587 /* Grab new software copies of PHY registers. */
588 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR);
589 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
590
591 /* Reset the PHY. */
592 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
593 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
594 bp->sw_bmcr = (BMCR_RESET);
595 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
596
597 timeout = 64;
598 while (--timeout) {
599 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
600 if ((bp->sw_bmcr & BMCR_RESET) == 0)
601 break;
602 udelay(20);
603 }
604 if (timeout == 0)
605 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
606
607 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
608
609 /* First we try 100baseT. */
610 bp->sw_bmcr |= BMCR_SPEED100;
611 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
612
613 bp->timer_state = ltrywait;
614 bp->timer_ticks = 0;
615 bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
616 bp->bigmac_timer.data = (unsigned long) bp;
617 bp->bigmac_timer.function = bigmac_timer;
618 add_timer(&bp->bigmac_timer);
619 }
620
621 static int bigmac_init_hw(struct bigmac *bp, int from_irq)
622 {
623 void __iomem *gregs = bp->gregs;
624 void __iomem *cregs = bp->creg;
625 void __iomem *bregs = bp->bregs;
626 unsigned char *e = &bp->dev->dev_addr[0];
627
628 /* Latch current counters into statistics. */
629 bigmac_get_counters(bp, bregs);
630
631 /* Reset QEC. */
632 qec_global_reset(gregs);
633
634 /* Init QEC. */
635 qec_init(bp);
636
637 /* Alloc and reset the tx/rx descriptor chains. */
638 bigmac_init_rings(bp, from_irq);
639
640 /* Initialize the PHY. */
641 bigmac_tcvr_init(bp);
642
643 /* Stop transmitter and receiver. */
644 bigmac_stop(bp);
645
646 /* Set hardware ethernet address. */
647 sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2);
648 sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1);
649 sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0);
650
651 /* Clear the hash table until mc upload occurs. */
652 sbus_writel(0, bregs + BMAC_HTABLE3);
653 sbus_writel(0, bregs + BMAC_HTABLE2);
654 sbus_writel(0, bregs + BMAC_HTABLE1);
655 sbus_writel(0, bregs + BMAC_HTABLE0);
656
657 /* Enable Big Mac hash table filter. */
658 sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO,
659 bregs + BMAC_RXCFG);
660 udelay(20);
661
662 /* Ok, configure the Big Mac transmitter. */
663 sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG);
664
665 /* The HME docs recommend to use the 10LSB of our MAC here. */
666 sbus_writel(((e[5] | e[4] << 8) & 0x3ff),
667 bregs + BMAC_RSEED);
668
669 /* Enable the output drivers no matter what. */
670 sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV,
671 bregs + BMAC_XIFCFG);
672
673 /* Tell the QEC where the ring descriptors are. */
674 sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0),
675 cregs + CREG_RXDS);
676 sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0),
677 cregs + CREG_TXDS);
678
679 /* Setup the FIFO pointers into QEC local memory. */
680 sbus_writel(0, cregs + CREG_RXRBUFPTR);
681 sbus_writel(0, cregs + CREG_RXWBUFPTR);
682 sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
683 cregs + CREG_TXRBUFPTR);
684 sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
685 cregs + CREG_TXWBUFPTR);
686
687 /* Tell bigmac what interrupts we don't want to hear about. */
688 sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME,
689 bregs + BMAC_IMASK);
690
691 /* Enable the various other irq's. */
692 sbus_writel(0, cregs + CREG_RIMASK);
693 sbus_writel(0, cregs + CREG_TIMASK);
694 sbus_writel(0, cregs + CREG_QMASK);
695 sbus_writel(0, cregs + CREG_BMASK);
696
697 /* Set jam size to a reasonable default. */
698 sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE);
699
700 /* Clear collision counter. */
701 sbus_writel(0, cregs + CREG_CCNT);
702
703 /* Enable transmitter and receiver. */
704 sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE,
705 bregs + BMAC_TXCFG);
706 sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE,
707 bregs + BMAC_RXCFG);
708
709 /* Ok, start detecting link speed/duplex. */
710 bigmac_begin_auto_negotiation(bp);
711
712 /* Success. */
713 return 0;
714 }
715
716 /* Error interrupts get sent here. */
717 static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status)
718 {
719 printk(KERN_ERR "bigmac_is_medium_rare: ");
720 if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) {
721 if (qec_status & GLOB_STAT_ER)
722 printk("QEC_ERROR, ");
723 if (qec_status & GLOB_STAT_BM)
724 printk("QEC_BMAC_ERROR, ");
725 }
726 if (bmac_status & CREG_STAT_ERRORS) {
727 if (bmac_status & CREG_STAT_BERROR)
728 printk("BMAC_ERROR, ");
729 if (bmac_status & CREG_STAT_TXDERROR)
730 printk("TXD_ERROR, ");
731 if (bmac_status & CREG_STAT_TXLERR)
732 printk("TX_LATE_ERROR, ");
733 if (bmac_status & CREG_STAT_TXPERR)
734 printk("TX_PARITY_ERROR, ");
735 if (bmac_status & CREG_STAT_TXSERR)
736 printk("TX_SBUS_ERROR, ");
737
738 if (bmac_status & CREG_STAT_RXDROP)
739 printk("RX_DROP_ERROR, ");
740
741 if (bmac_status & CREG_STAT_RXSMALL)
742 printk("RX_SMALL_ERROR, ");
743 if (bmac_status & CREG_STAT_RXLERR)
744 printk("RX_LATE_ERROR, ");
745 if (bmac_status & CREG_STAT_RXPERR)
746 printk("RX_PARITY_ERROR, ");
747 if (bmac_status & CREG_STAT_RXSERR)
748 printk("RX_SBUS_ERROR, ");
749 }
750
751 printk(" RESET\n");
752 bigmac_init_hw(bp, 1);
753 }
754
755 /* BigMAC transmit complete service routines. */
756 static void bigmac_tx(struct bigmac *bp)
757 {
758 struct be_txd *txbase = &bp->bmac_block->be_txd[0];
759 struct net_device *dev = bp->dev;
760 int elem;
761
762 spin_lock(&bp->lock);
763
764 elem = bp->tx_old;
765 DTX(("bigmac_tx: tx_old[%d] ", elem));
766 while (elem != bp->tx_new) {
767 struct sk_buff *skb;
768 struct be_txd *this = &txbase[elem];
769
770 DTX(("this(%p) [flags(%08x)addr(%08x)]",
771 this, this->tx_flags, this->tx_addr));
772
773 if (this->tx_flags & TXD_OWN)
774 break;
775 skb = bp->tx_skbs[elem];
776 bp->enet_stats.tx_packets++;
777 bp->enet_stats.tx_bytes += skb->len;
778 dma_unmap_single(&bp->bigmac_op->dev,
779 this->tx_addr, skb->len,
780 DMA_TO_DEVICE);
781
782 DTX(("skb(%p) ", skb));
783 bp->tx_skbs[elem] = NULL;
784 dev_kfree_skb_irq(skb);
785
786 elem = NEXT_TX(elem);
787 }
788 DTX((" DONE, tx_old=%d\n", elem));
789 bp->tx_old = elem;
790
791 if (netif_queue_stopped(dev) &&
792 TX_BUFFS_AVAIL(bp) > 0)
793 netif_wake_queue(bp->dev);
794
795 spin_unlock(&bp->lock);
796 }
797
798 /* BigMAC receive complete service routines. */
799 static void bigmac_rx(struct bigmac *bp)
800 {
801 struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0];
802 struct be_rxd *this;
803 int elem = bp->rx_new, drops = 0;
804 u32 flags;
805
806 this = &rxbase[elem];
807 while (!((flags = this->rx_flags) & RXD_OWN)) {
808 struct sk_buff *skb;
809 int len = (flags & RXD_LENGTH); /* FCS not included */
810
811 /* Check for errors. */
812 if (len < ETH_ZLEN) {
813 bp->enet_stats.rx_errors++;
814 bp->enet_stats.rx_length_errors++;
815
816 drop_it:
817 /* Return it to the BigMAC. */
818 bp->enet_stats.rx_dropped++;
819 this->rx_flags =
820 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
821 goto next;
822 }
823 skb = bp->rx_skbs[elem];
824 if (len > RX_COPY_THRESHOLD) {
825 struct sk_buff *new_skb;
826
827 /* Now refill the entry, if we can. */
828 new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
829 if (new_skb == NULL) {
830 drops++;
831 goto drop_it;
832 }
833 dma_unmap_single(&bp->bigmac_op->dev,
834 this->rx_addr,
835 RX_BUF_ALLOC_SIZE - 34,
836 DMA_FROM_DEVICE);
837 bp->rx_skbs[elem] = new_skb;
838 skb_put(new_skb, ETH_FRAME_LEN);
839 skb_reserve(new_skb, 34);
840 this->rx_addr =
841 dma_map_single(&bp->bigmac_op->dev,
842 new_skb->data,
843 RX_BUF_ALLOC_SIZE - 34,
844 DMA_FROM_DEVICE);
845 this->rx_flags =
846 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
847
848 /* Trim the original skb for the netif. */
849 skb_trim(skb, len);
850 } else {
851 struct sk_buff *copy_skb = netdev_alloc_skb(bp->dev, len + 2);
852
853 if (copy_skb == NULL) {
854 drops++;
855 goto drop_it;
856 }
857 skb_reserve(copy_skb, 2);
858 skb_put(copy_skb, len);
859 dma_sync_single_for_cpu(&bp->bigmac_op->dev,
860 this->rx_addr, len,
861 DMA_FROM_DEVICE);
862 skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
863 dma_sync_single_for_device(&bp->bigmac_op->dev,
864 this->rx_addr, len,
865 DMA_FROM_DEVICE);
866
867 /* Reuse original ring buffer. */
868 this->rx_flags =
869 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
870
871 skb = copy_skb;
872 }
873
874 /* No checksums done by the BigMAC ;-( */
875 skb->protocol = eth_type_trans(skb, bp->dev);
876 netif_rx(skb);
877 bp->enet_stats.rx_packets++;
878 bp->enet_stats.rx_bytes += len;
879 next:
880 elem = NEXT_RX(elem);
881 this = &rxbase[elem];
882 }
883 bp->rx_new = elem;
884 if (drops)
885 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name);
886 }
887
888 static irqreturn_t bigmac_interrupt(int irq, void *dev_id)
889 {
890 struct bigmac *bp = (struct bigmac *) dev_id;
891 u32 qec_status, bmac_status;
892
893 DIRQ(("bigmac_interrupt: "));
894
895 /* Latch status registers now. */
896 bmac_status = sbus_readl(bp->creg + CREG_STAT);
897 qec_status = sbus_readl(bp->gregs + GLOB_STAT);
898
899 DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status));
900 if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) ||
901 (bmac_status & CREG_STAT_ERRORS))
902 bigmac_is_medium_rare(bp, qec_status, bmac_status);
903
904 if (bmac_status & CREG_STAT_TXIRQ)
905 bigmac_tx(bp);
906
907 if (bmac_status & CREG_STAT_RXIRQ)
908 bigmac_rx(bp);
909
910 return IRQ_HANDLED;
911 }
912
913 static int bigmac_open(struct net_device *dev)
914 {
915 struct bigmac *bp = netdev_priv(dev);
916 int ret;
917
918 ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp);
919 if (ret) {
920 printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
921 return ret;
922 }
923 init_timer(&bp->bigmac_timer);
924 ret = bigmac_init_hw(bp, 0);
925 if (ret)
926 free_irq(dev->irq, bp);
927 return ret;
928 }
929
930 static int bigmac_close(struct net_device *dev)
931 {
932 struct bigmac *bp = netdev_priv(dev);
933
934 del_timer(&bp->bigmac_timer);
935 bp->timer_state = asleep;
936 bp->timer_ticks = 0;
937
938 bigmac_stop(bp);
939 bigmac_clean_rings(bp);
940 free_irq(dev->irq, bp);
941 return 0;
942 }
943
944 static void bigmac_tx_timeout(struct net_device *dev)
945 {
946 struct bigmac *bp = netdev_priv(dev);
947
948 bigmac_init_hw(bp, 0);
949 netif_wake_queue(dev);
950 }
951
952 /* Put a packet on the wire. */
953 static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
954 {
955 struct bigmac *bp = netdev_priv(dev);
956 int len, entry;
957 u32 mapping;
958
959 len = skb->len;
960 mapping = dma_map_single(&bp->bigmac_op->dev, skb->data,
961 len, DMA_TO_DEVICE);
962
963 /* Avoid a race... */
964 spin_lock_irq(&bp->lock);
965 entry = bp->tx_new;
966 DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry));
967 bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE;
968 bp->tx_skbs[entry] = skb;
969 bp->bmac_block->be_txd[entry].tx_addr = mapping;
970 bp->bmac_block->be_txd[entry].tx_flags =
971 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
972 bp->tx_new = NEXT_TX(entry);
973 if (TX_BUFFS_AVAIL(bp) <= 0)
974 netif_stop_queue(dev);
975 spin_unlock_irq(&bp->lock);
976
977 /* Get it going. */
978 sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
979
980
981 return NETDEV_TX_OK;
982 }
983
984 static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
985 {
986 struct bigmac *bp = netdev_priv(dev);
987
988 bigmac_get_counters(bp, bp->bregs);
989 return &bp->enet_stats;
990 }
991
992 static void bigmac_set_multicast(struct net_device *dev)
993 {
994 struct bigmac *bp = netdev_priv(dev);
995 void __iomem *bregs = bp->bregs;
996 struct netdev_hw_addr *ha;
997 u32 tmp, crc;
998
999 /* Disable the receiver. The bit self-clears when
1000 * the operation is complete.
1001 */
1002 tmp = sbus_readl(bregs + BMAC_RXCFG);
1003 tmp &= ~(BIGMAC_RXCFG_ENABLE);
1004 sbus_writel(tmp, bregs + BMAC_RXCFG);
1005 while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
1006 udelay(20);
1007
1008 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1009 sbus_writel(0xffff, bregs + BMAC_HTABLE0);
1010 sbus_writel(0xffff, bregs + BMAC_HTABLE1);
1011 sbus_writel(0xffff, bregs + BMAC_HTABLE2);
1012 sbus_writel(0xffff, bregs + BMAC_HTABLE3);
1013 } else if (dev->flags & IFF_PROMISC) {
1014 tmp = sbus_readl(bregs + BMAC_RXCFG);
1015 tmp |= BIGMAC_RXCFG_PMISC;
1016 sbus_writel(tmp, bregs + BMAC_RXCFG);
1017 } else {
1018 u16 hash_table[4] = { 0 };
1019
1020 netdev_for_each_mc_addr(ha, dev) {
1021 crc = ether_crc_le(6, ha->addr);
1022 crc >>= 26;
1023 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1024 }
1025 sbus_writel(hash_table[0], bregs + BMAC_HTABLE0);
1026 sbus_writel(hash_table[1], bregs + BMAC_HTABLE1);
1027 sbus_writel(hash_table[2], bregs + BMAC_HTABLE2);
1028 sbus_writel(hash_table[3], bregs + BMAC_HTABLE3);
1029 }
1030
1031 /* Re-enable the receiver. */
1032 tmp = sbus_readl(bregs + BMAC_RXCFG);
1033 tmp |= BIGMAC_RXCFG_ENABLE;
1034 sbus_writel(tmp, bregs + BMAC_RXCFG);
1035 }
1036
1037 /* Ethtool support... */
1038 static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1039 {
1040 strlcpy(info->driver, "sunbmac", sizeof(info->driver));
1041 strlcpy(info->version, "2.0", sizeof(info->version));
1042 }
1043
1044 static u32 bigmac_get_link(struct net_device *dev)
1045 {
1046 struct bigmac *bp = netdev_priv(dev);
1047
1048 spin_lock_irq(&bp->lock);
1049 bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, MII_BMSR);
1050 spin_unlock_irq(&bp->lock);
1051
1052 return (bp->sw_bmsr & BMSR_LSTATUS);
1053 }
1054
1055 static const struct ethtool_ops bigmac_ethtool_ops = {
1056 .get_drvinfo = bigmac_get_drvinfo,
1057 .get_link = bigmac_get_link,
1058 };
1059
1060 static const struct net_device_ops bigmac_ops = {
1061 .ndo_open = bigmac_open,
1062 .ndo_stop = bigmac_close,
1063 .ndo_start_xmit = bigmac_start_xmit,
1064 .ndo_get_stats = bigmac_get_stats,
1065 .ndo_set_rx_mode = bigmac_set_multicast,
1066 .ndo_tx_timeout = bigmac_tx_timeout,
1067 .ndo_change_mtu = eth_change_mtu,
1068 .ndo_set_mac_address = eth_mac_addr,
1069 .ndo_validate_addr = eth_validate_addr,
1070 };
1071
1072 static int bigmac_ether_init(struct platform_device *op,
1073 struct platform_device *qec_op)
1074 {
1075 static int version_printed;
1076 struct net_device *dev;
1077 u8 bsizes, bsizes_more;
1078 struct bigmac *bp;
1079 int i;
1080
1081 /* Get a new device struct for this interface. */
1082 dev = alloc_etherdev(sizeof(struct bigmac));
1083 if (!dev)
1084 return -ENOMEM;
1085
1086 if (version_printed++ == 0)
1087 printk(KERN_INFO "%s", version);
1088
1089 for (i = 0; i < 6; i++)
1090 dev->dev_addr[i] = idprom->id_ethaddr[i];
1091
1092 /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */
1093 bp = netdev_priv(dev);
1094 bp->qec_op = qec_op;
1095 bp->bigmac_op = op;
1096
1097 SET_NETDEV_DEV(dev, &op->dev);
1098
1099 spin_lock_init(&bp->lock);
1100
1101 /* Map in QEC global control registers. */
1102 bp->gregs = of_ioremap(&qec_op->resource[0], 0,
1103 GLOB_REG_SIZE, "BigMAC QEC GLobal Regs");
1104 if (!bp->gregs) {
1105 printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n");
1106 goto fail_and_cleanup;
1107 }
1108
1109 /* Make sure QEC is in BigMAC mode. */
1110 if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) {
1111 printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n");
1112 goto fail_and_cleanup;
1113 }
1114
1115 /* Reset the QEC. */
1116 if (qec_global_reset(bp->gregs))
1117 goto fail_and_cleanup;
1118
1119 /* Get supported SBUS burst sizes. */
1120 bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
1121 bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
1122
1123 bsizes &= 0xff;
1124 if (bsizes_more != 0xff)
1125 bsizes &= bsizes_more;
1126 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
1127 (bsizes & DMA_BURST32) == 0)
1128 bsizes = (DMA_BURST32 - 1);
1129 bp->bigmac_bursts = bsizes;
1130
1131 /* Perform QEC initialization. */
1132 qec_init(bp);
1133
1134 /* Map in the BigMAC channel registers. */
1135 bp->creg = of_ioremap(&op->resource[0], 0,
1136 CREG_REG_SIZE, "BigMAC QEC Channel Regs");
1137 if (!bp->creg) {
1138 printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n");
1139 goto fail_and_cleanup;
1140 }
1141
1142 /* Map in the BigMAC control registers. */
1143 bp->bregs = of_ioremap(&op->resource[1], 0,
1144 BMAC_REG_SIZE, "BigMAC Primary Regs");
1145 if (!bp->bregs) {
1146 printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n");
1147 goto fail_and_cleanup;
1148 }
1149
1150 /* Map in the BigMAC transceiver registers, this is how you poke at
1151 * the BigMAC's PHY.
1152 */
1153 bp->tregs = of_ioremap(&op->resource[2], 0,
1154 TCVR_REG_SIZE, "BigMAC Transceiver Regs");
1155 if (!bp->tregs) {
1156 printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n");
1157 goto fail_and_cleanup;
1158 }
1159
1160 /* Stop the BigMAC. */
1161 bigmac_stop(bp);
1162
1163 /* Allocate transmit/receive descriptor DVMA block. */
1164 bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
1165 PAGE_SIZE,
1166 &bp->bblock_dvma, GFP_ATOMIC);
1167 if (bp->bmac_block == NULL || bp->bblock_dvma == 0)
1168 goto fail_and_cleanup;
1169
1170 /* Get the board revision of this BigMAC. */
1171 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
1172 "board-version", 1);
1173
1174 /* Init auto-negotiation timer state. */
1175 init_timer(&bp->bigmac_timer);
1176 bp->timer_state = asleep;
1177 bp->timer_ticks = 0;
1178
1179 /* Backlink to generic net device struct. */
1180 bp->dev = dev;
1181
1182 /* Set links to our BigMAC open and close routines. */
1183 dev->ethtool_ops = &bigmac_ethtool_ops;
1184 dev->netdev_ops = &bigmac_ops;
1185 dev->watchdog_timeo = 5*HZ;
1186
1187 /* Finish net device registration. */
1188 dev->irq = bp->bigmac_op->archdata.irqs[0];
1189 dev->dma = 0;
1190
1191 if (register_netdev(dev)) {
1192 printk(KERN_ERR "BIGMAC: Cannot register device.\n");
1193 goto fail_and_cleanup;
1194 }
1195
1196 dev_set_drvdata(&bp->bigmac_op->dev, bp);
1197
1198 printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n",
1199 dev->name, dev->dev_addr);
1200
1201 return 0;
1202
1203 fail_and_cleanup:
1204 /* Something went wrong, undo whatever we did so far. */
1205 /* Free register mappings if any. */
1206 if (bp->gregs)
1207 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
1208 if (bp->creg)
1209 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
1210 if (bp->bregs)
1211 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
1212 if (bp->tregs)
1213 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
1214
1215 if (bp->bmac_block)
1216 dma_free_coherent(&bp->bigmac_op->dev,
1217 PAGE_SIZE,
1218 bp->bmac_block,
1219 bp->bblock_dvma);
1220
1221 /* This also frees the co-located private data */
1222 free_netdev(dev);
1223 return -ENODEV;
1224 }
1225
1226 /* QEC can be the parent of either QuadEthernet or a BigMAC. We want
1227 * the latter.
1228 */
1229 static int bigmac_sbus_probe(struct platform_device *op)
1230 {
1231 struct device *parent = op->dev.parent;
1232 struct platform_device *qec_op;
1233
1234 qec_op = to_platform_device(parent);
1235
1236 return bigmac_ether_init(op, qec_op);
1237 }
1238
1239 static int bigmac_sbus_remove(struct platform_device *op)
1240 {
1241 struct bigmac *bp = platform_get_drvdata(op);
1242 struct device *parent = op->dev.parent;
1243 struct net_device *net_dev = bp->dev;
1244 struct platform_device *qec_op;
1245
1246 qec_op = to_platform_device(parent);
1247
1248 unregister_netdev(net_dev);
1249
1250 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
1251 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
1252 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
1253 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
1254 dma_free_coherent(&op->dev,
1255 PAGE_SIZE,
1256 bp->bmac_block,
1257 bp->bblock_dvma);
1258
1259 free_netdev(net_dev);
1260
1261 return 0;
1262 }
1263
1264 static const struct of_device_id bigmac_sbus_match[] = {
1265 {
1266 .name = "be",
1267 },
1268 {},
1269 };
1270
1271 MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
1272
1273 static struct platform_driver bigmac_sbus_driver = {
1274 .driver = {
1275 .name = "sunbmac",
1276 .of_match_table = bigmac_sbus_match,
1277 },
1278 .probe = bigmac_sbus_probe,
1279 .remove = bigmac_sbus_remove,
1280 };
1281
1282 module_platform_driver(bigmac_sbus_driver);
This page took 0.090092 seconds and 5 git commands to generate.