igb: add support for SGMII-based MDIO PHYs
[deliverable/linux.git] / drivers / net / dm9000.c
1 /*
2 * Davicom DM9000 Fast Ethernet driver for Linux.
3 * Copyright (C) 1997 Sten Wang
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
16 *
17 * Additional updates, Copyright:
18 * Ben Dooks <ben@simtec.co.uk>
19 * Sascha Hauer <s.hauer@pengutronix.de>
20 */
21
22 #include <linux/module.h>
23 #include <linux/ioport.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/init.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/crc32.h>
30 #include <linux/mii.h>
31 #include <linux/ethtool.h>
32 #include <linux/dm9000.h>
33 #include <linux/delay.h>
34 #include <linux/platform_device.h>
35 #include <linux/irq.h>
36 #include <linux/slab.h>
37
38 #include <asm/delay.h>
39 #include <asm/irq.h>
40 #include <asm/io.h>
41
42 #include "dm9000.h"
43
44 /* Board/System/Debug information/definition ---------------- */
45
46 #define DM9000_PHY 0x40 /* PHY address 0x01 */
47
48 #define CARDNAME "dm9000"
49 #define DRV_VERSION "1.31"
50
51 /*
52 * Transmit timeout, default 5 seconds.
53 */
54 static int watchdog = 5000;
55 module_param(watchdog, int, 0400);
56 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
57
58 /* DM9000 register address locking.
59 *
60 * The DM9000 uses an address register to control where data written
61 * to the data register goes. This means that the address register
62 * must be preserved over interrupts or similar calls.
63 *
64 * During interrupt and other critical calls, a spinlock is used to
65 * protect the system, but the calls themselves save the address
66 * in the address register in case they are interrupting another
67 * access to the device.
68 *
69 * For general accesses a lock is provided so that calls which are
70 * allowed to sleep are serialised so that the address register does
71 * not need to be saved. This lock also serves to serialise access
72 * to the EEPROM and PHY access registers which are shared between
73 * these two devices.
74 */
75
76 /* The driver supports the original DM9000E, and now the two newer
77 * devices, DM9000A and DM9000B.
78 */
79
80 enum dm9000_type {
81 TYPE_DM9000E, /* original DM9000 */
82 TYPE_DM9000A,
83 TYPE_DM9000B
84 };
85
86 /* Structure/enum declaration ------------------------------- */
87 typedef struct board_info {
88
89 void __iomem *io_addr; /* Register I/O base address */
90 void __iomem *io_data; /* Data I/O address */
91 u16 irq; /* IRQ */
92
93 u16 tx_pkt_cnt;
94 u16 queue_pkt_len;
95 u16 queue_start_addr;
96 u16 queue_ip_summed;
97 u16 dbug_cnt;
98 u8 io_mode; /* 0:word, 2:byte */
99 u8 phy_addr;
100 u8 imr_all;
101
102 unsigned int flags;
103 unsigned int in_suspend :1;
104 unsigned int wake_supported :1;
105 int debug_level;
106
107 enum dm9000_type type;
108
109 void (*inblk)(void __iomem *port, void *data, int length);
110 void (*outblk)(void __iomem *port, void *data, int length);
111 void (*dumpblk)(void __iomem *port, int length);
112
113 struct device *dev; /* parent device */
114
115 struct resource *addr_res; /* resources found */
116 struct resource *data_res;
117 struct resource *addr_req; /* resources requested */
118 struct resource *data_req;
119 struct resource *irq_res;
120
121 int irq_wake;
122
123 struct mutex addr_lock; /* phy and eeprom access lock */
124
125 struct delayed_work phy_poll;
126 struct net_device *ndev;
127
128 spinlock_t lock;
129
130 struct mii_if_info mii;
131 u32 msg_enable;
132 u32 wake_state;
133
134 int rx_csum;
135 int can_csum;
136 int ip_summed;
137 } board_info_t;
138
139 /* debug code */
140
141 #define dm9000_dbg(db, lev, msg...) do { \
142 if ((lev) < CONFIG_DM9000_DEBUGLEVEL && \
143 (lev) < db->debug_level) { \
144 dev_dbg(db->dev, msg); \
145 } \
146 } while (0)
147
148 static inline board_info_t *to_dm9000_board(struct net_device *dev)
149 {
150 return netdev_priv(dev);
151 }
152
153 /* DM9000 network board routine ---------------------------- */
154
155 static void
156 dm9000_reset(board_info_t * db)
157 {
158 dev_dbg(db->dev, "resetting device\n");
159
160 /* RESET device */
161 writeb(DM9000_NCR, db->io_addr);
162 udelay(200);
163 writeb(NCR_RST, db->io_data);
164 udelay(200);
165 }
166
167 /*
168 * Read a byte from I/O port
169 */
170 static u8
171 ior(board_info_t * db, int reg)
172 {
173 writeb(reg, db->io_addr);
174 return readb(db->io_data);
175 }
176
177 /*
178 * Write a byte to I/O port
179 */
180
181 static void
182 iow(board_info_t * db, int reg, int value)
183 {
184 writeb(reg, db->io_addr);
185 writeb(value, db->io_data);
186 }
187
188 /* routines for sending block to chip */
189
190 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
191 {
192 writesb(reg, data, count);
193 }
194
195 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
196 {
197 writesw(reg, data, (count+1) >> 1);
198 }
199
200 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
201 {
202 writesl(reg, data, (count+3) >> 2);
203 }
204
205 /* input block from chip to memory */
206
207 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
208 {
209 readsb(reg, data, count);
210 }
211
212
213 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
214 {
215 readsw(reg, data, (count+1) >> 1);
216 }
217
218 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
219 {
220 readsl(reg, data, (count+3) >> 2);
221 }
222
223 /* dump block from chip to null */
224
225 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
226 {
227 int i;
228 int tmp;
229
230 for (i = 0; i < count; i++)
231 tmp = readb(reg);
232 }
233
234 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
235 {
236 int i;
237 int tmp;
238
239 count = (count + 1) >> 1;
240
241 for (i = 0; i < count; i++)
242 tmp = readw(reg);
243 }
244
245 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
246 {
247 int i;
248 int tmp;
249
250 count = (count + 3) >> 2;
251
252 for (i = 0; i < count; i++)
253 tmp = readl(reg);
254 }
255
256 /* dm9000_set_io
257 *
258 * select the specified set of io routines to use with the
259 * device
260 */
261
262 static void dm9000_set_io(struct board_info *db, int byte_width)
263 {
264 /* use the size of the data resource to work out what IO
265 * routines we want to use
266 */
267
268 switch (byte_width) {
269 case 1:
270 db->dumpblk = dm9000_dumpblk_8bit;
271 db->outblk = dm9000_outblk_8bit;
272 db->inblk = dm9000_inblk_8bit;
273 break;
274
275
276 case 3:
277 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
278 case 2:
279 db->dumpblk = dm9000_dumpblk_16bit;
280 db->outblk = dm9000_outblk_16bit;
281 db->inblk = dm9000_inblk_16bit;
282 break;
283
284 case 4:
285 default:
286 db->dumpblk = dm9000_dumpblk_32bit;
287 db->outblk = dm9000_outblk_32bit;
288 db->inblk = dm9000_inblk_32bit;
289 break;
290 }
291 }
292
293 static void dm9000_schedule_poll(board_info_t *db)
294 {
295 if (db->type == TYPE_DM9000E)
296 schedule_delayed_work(&db->phy_poll, HZ * 2);
297 }
298
299 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
300 {
301 board_info_t *dm = to_dm9000_board(dev);
302
303 if (!netif_running(dev))
304 return -EINVAL;
305
306 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
307 }
308
309 static unsigned int
310 dm9000_read_locked(board_info_t *db, int reg)
311 {
312 unsigned long flags;
313 unsigned int ret;
314
315 spin_lock_irqsave(&db->lock, flags);
316 ret = ior(db, reg);
317 spin_unlock_irqrestore(&db->lock, flags);
318
319 return ret;
320 }
321
322 static int dm9000_wait_eeprom(board_info_t *db)
323 {
324 unsigned int status;
325 int timeout = 8; /* wait max 8msec */
326
327 /* The DM9000 data sheets say we should be able to
328 * poll the ERRE bit in EPCR to wait for the EEPROM
329 * operation. From testing several chips, this bit
330 * does not seem to work.
331 *
332 * We attempt to use the bit, but fall back to the
333 * timeout (which is why we do not return an error
334 * on expiry) to say that the EEPROM operation has
335 * completed.
336 */
337
338 while (1) {
339 status = dm9000_read_locked(db, DM9000_EPCR);
340
341 if ((status & EPCR_ERRE) == 0)
342 break;
343
344 msleep(1);
345
346 if (timeout-- < 0) {
347 dev_dbg(db->dev, "timeout waiting EEPROM\n");
348 break;
349 }
350 }
351
352 return 0;
353 }
354
355 /*
356 * Read a word data from EEPROM
357 */
358 static void
359 dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
360 {
361 unsigned long flags;
362
363 if (db->flags & DM9000_PLATF_NO_EEPROM) {
364 to[0] = 0xff;
365 to[1] = 0xff;
366 return;
367 }
368
369 mutex_lock(&db->addr_lock);
370
371 spin_lock_irqsave(&db->lock, flags);
372
373 iow(db, DM9000_EPAR, offset);
374 iow(db, DM9000_EPCR, EPCR_ERPRR);
375
376 spin_unlock_irqrestore(&db->lock, flags);
377
378 dm9000_wait_eeprom(db);
379
380 /* delay for at-least 150uS */
381 msleep(1);
382
383 spin_lock_irqsave(&db->lock, flags);
384
385 iow(db, DM9000_EPCR, 0x0);
386
387 to[0] = ior(db, DM9000_EPDRL);
388 to[1] = ior(db, DM9000_EPDRH);
389
390 spin_unlock_irqrestore(&db->lock, flags);
391
392 mutex_unlock(&db->addr_lock);
393 }
394
395 /*
396 * Write a word data to SROM
397 */
398 static void
399 dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
400 {
401 unsigned long flags;
402
403 if (db->flags & DM9000_PLATF_NO_EEPROM)
404 return;
405
406 mutex_lock(&db->addr_lock);
407
408 spin_lock_irqsave(&db->lock, flags);
409 iow(db, DM9000_EPAR, offset);
410 iow(db, DM9000_EPDRH, data[1]);
411 iow(db, DM9000_EPDRL, data[0]);
412 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
413 spin_unlock_irqrestore(&db->lock, flags);
414
415 dm9000_wait_eeprom(db);
416
417 mdelay(1); /* wait at least 150uS to clear */
418
419 spin_lock_irqsave(&db->lock, flags);
420 iow(db, DM9000_EPCR, 0);
421 spin_unlock_irqrestore(&db->lock, flags);
422
423 mutex_unlock(&db->addr_lock);
424 }
425
426 /* ethtool ops */
427
428 static void dm9000_get_drvinfo(struct net_device *dev,
429 struct ethtool_drvinfo *info)
430 {
431 board_info_t *dm = to_dm9000_board(dev);
432
433 strcpy(info->driver, CARDNAME);
434 strcpy(info->version, DRV_VERSION);
435 strcpy(info->bus_info, to_platform_device(dm->dev)->name);
436 }
437
438 static u32 dm9000_get_msglevel(struct net_device *dev)
439 {
440 board_info_t *dm = to_dm9000_board(dev);
441
442 return dm->msg_enable;
443 }
444
445 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
446 {
447 board_info_t *dm = to_dm9000_board(dev);
448
449 dm->msg_enable = value;
450 }
451
452 static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
453 {
454 board_info_t *dm = to_dm9000_board(dev);
455
456 mii_ethtool_gset(&dm->mii, cmd);
457 return 0;
458 }
459
460 static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
461 {
462 board_info_t *dm = to_dm9000_board(dev);
463
464 return mii_ethtool_sset(&dm->mii, cmd);
465 }
466
467 static int dm9000_nway_reset(struct net_device *dev)
468 {
469 board_info_t *dm = to_dm9000_board(dev);
470 return mii_nway_restart(&dm->mii);
471 }
472
473 static uint32_t dm9000_get_rx_csum(struct net_device *dev)
474 {
475 board_info_t *dm = to_dm9000_board(dev);
476 return dm->rx_csum;
477 }
478
479 static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
480 {
481 board_info_t *dm = to_dm9000_board(dev);
482
483 if (dm->can_csum) {
484 dm->rx_csum = data;
485 iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
486
487 return 0;
488 }
489
490 return -EOPNOTSUPP;
491 }
492
493 static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
494 {
495 board_info_t *dm = to_dm9000_board(dev);
496 unsigned long flags;
497 int ret;
498
499 spin_lock_irqsave(&dm->lock, flags);
500 ret = dm9000_set_rx_csum_unlocked(dev, data);
501 spin_unlock_irqrestore(&dm->lock, flags);
502
503 return ret;
504 }
505
506 static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
507 {
508 board_info_t *dm = to_dm9000_board(dev);
509 int ret = -EOPNOTSUPP;
510
511 if (dm->can_csum)
512 ret = ethtool_op_set_tx_csum(dev, data);
513 return ret;
514 }
515
516 static u32 dm9000_get_link(struct net_device *dev)
517 {
518 board_info_t *dm = to_dm9000_board(dev);
519 u32 ret;
520
521 if (dm->flags & DM9000_PLATF_EXT_PHY)
522 ret = mii_link_ok(&dm->mii);
523 else
524 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
525
526 return ret;
527 }
528
529 #define DM_EEPROM_MAGIC (0x444D394B)
530
531 static int dm9000_get_eeprom_len(struct net_device *dev)
532 {
533 return 128;
534 }
535
536 static int dm9000_get_eeprom(struct net_device *dev,
537 struct ethtool_eeprom *ee, u8 *data)
538 {
539 board_info_t *dm = to_dm9000_board(dev);
540 int offset = ee->offset;
541 int len = ee->len;
542 int i;
543
544 /* EEPROM access is aligned to two bytes */
545
546 if ((len & 1) != 0 || (offset & 1) != 0)
547 return -EINVAL;
548
549 if (dm->flags & DM9000_PLATF_NO_EEPROM)
550 return -ENOENT;
551
552 ee->magic = DM_EEPROM_MAGIC;
553
554 for (i = 0; i < len; i += 2)
555 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
556
557 return 0;
558 }
559
560 static int dm9000_set_eeprom(struct net_device *dev,
561 struct ethtool_eeprom *ee, u8 *data)
562 {
563 board_info_t *dm = to_dm9000_board(dev);
564 int offset = ee->offset;
565 int len = ee->len;
566 int i;
567
568 /* EEPROM access is aligned to two bytes */
569
570 if ((len & 1) != 0 || (offset & 1) != 0)
571 return -EINVAL;
572
573 if (dm->flags & DM9000_PLATF_NO_EEPROM)
574 return -ENOENT;
575
576 if (ee->magic != DM_EEPROM_MAGIC)
577 return -EINVAL;
578
579 for (i = 0; i < len; i += 2)
580 dm9000_write_eeprom(dm, (offset + i) / 2, data + i);
581
582 return 0;
583 }
584
585 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
586 {
587 board_info_t *dm = to_dm9000_board(dev);
588
589 memset(w, 0, sizeof(struct ethtool_wolinfo));
590
591 /* note, we could probably support wake-phy too */
592 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
593 w->wolopts = dm->wake_state;
594 }
595
596 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
597 {
598 board_info_t *dm = to_dm9000_board(dev);
599 unsigned long flags;
600 u32 opts = w->wolopts;
601 u32 wcr = 0;
602
603 if (!dm->wake_supported)
604 return -EOPNOTSUPP;
605
606 if (opts & ~WAKE_MAGIC)
607 return -EINVAL;
608
609 if (opts & WAKE_MAGIC)
610 wcr |= WCR_MAGICEN;
611
612 mutex_lock(&dm->addr_lock);
613
614 spin_lock_irqsave(&dm->lock, flags);
615 iow(dm, DM9000_WCR, wcr);
616 spin_unlock_irqrestore(&dm->lock, flags);
617
618 mutex_unlock(&dm->addr_lock);
619
620 if (dm->wake_state != opts) {
621 /* change in wol state, update IRQ state */
622
623 if (!dm->wake_state)
624 set_irq_wake(dm->irq_wake, 1);
625 else if (dm->wake_state & !opts)
626 set_irq_wake(dm->irq_wake, 0);
627 }
628
629 dm->wake_state = opts;
630 return 0;
631 }
632
633 static const struct ethtool_ops dm9000_ethtool_ops = {
634 .get_drvinfo = dm9000_get_drvinfo,
635 .get_settings = dm9000_get_settings,
636 .set_settings = dm9000_set_settings,
637 .get_msglevel = dm9000_get_msglevel,
638 .set_msglevel = dm9000_set_msglevel,
639 .nway_reset = dm9000_nway_reset,
640 .get_link = dm9000_get_link,
641 .get_wol = dm9000_get_wol,
642 .set_wol = dm9000_set_wol,
643 .get_eeprom_len = dm9000_get_eeprom_len,
644 .get_eeprom = dm9000_get_eeprom,
645 .set_eeprom = dm9000_set_eeprom,
646 .get_rx_csum = dm9000_get_rx_csum,
647 .set_rx_csum = dm9000_set_rx_csum,
648 .get_tx_csum = ethtool_op_get_tx_csum,
649 .set_tx_csum = dm9000_set_tx_csum,
650 };
651
652 static void dm9000_show_carrier(board_info_t *db,
653 unsigned carrier, unsigned nsr)
654 {
655 struct net_device *ndev = db->ndev;
656 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
657
658 if (carrier)
659 dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
660 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
661 (ncr & NCR_FDX) ? "full" : "half");
662 else
663 dev_info(db->dev, "%s: link down\n", ndev->name);
664 }
665
666 static void
667 dm9000_poll_work(struct work_struct *w)
668 {
669 struct delayed_work *dw = to_delayed_work(w);
670 board_info_t *db = container_of(dw, board_info_t, phy_poll);
671 struct net_device *ndev = db->ndev;
672
673 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
674 !(db->flags & DM9000_PLATF_EXT_PHY)) {
675 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
676 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
677 unsigned new_carrier;
678
679 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
680
681 if (old_carrier != new_carrier) {
682 if (netif_msg_link(db))
683 dm9000_show_carrier(db, new_carrier, nsr);
684
685 if (!new_carrier)
686 netif_carrier_off(ndev);
687 else
688 netif_carrier_on(ndev);
689 }
690 } else
691 mii_check_media(&db->mii, netif_msg_link(db), 0);
692
693 if (netif_running(ndev))
694 dm9000_schedule_poll(db);
695 }
696
697 /* dm9000_release_board
698 *
699 * release a board, and any mapped resources
700 */
701
702 static void
703 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
704 {
705 /* unmap our resources */
706
707 iounmap(db->io_addr);
708 iounmap(db->io_data);
709
710 /* release the resources */
711
712 release_resource(db->data_req);
713 kfree(db->data_req);
714
715 release_resource(db->addr_req);
716 kfree(db->addr_req);
717 }
718
719 static unsigned char dm9000_type_to_char(enum dm9000_type type)
720 {
721 switch (type) {
722 case TYPE_DM9000E: return 'e';
723 case TYPE_DM9000A: return 'a';
724 case TYPE_DM9000B: return 'b';
725 }
726
727 return '?';
728 }
729
730 /*
731 * Set DM9000 multicast address
732 */
733 static void
734 dm9000_hash_table_unlocked(struct net_device *dev)
735 {
736 board_info_t *db = netdev_priv(dev);
737 struct netdev_hw_addr *ha;
738 int i, oft;
739 u32 hash_val;
740 u16 hash_table[4];
741 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
742
743 dm9000_dbg(db, 1, "entering %s\n", __func__);
744
745 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
746 iow(db, oft, dev->dev_addr[i]);
747
748 /* Clear Hash Table */
749 for (i = 0; i < 4; i++)
750 hash_table[i] = 0x0;
751
752 /* broadcast address */
753 hash_table[3] = 0x8000;
754
755 if (dev->flags & IFF_PROMISC)
756 rcr |= RCR_PRMSC;
757
758 if (dev->flags & IFF_ALLMULTI)
759 rcr |= RCR_ALL;
760
761 /* the multicast address in Hash Table : 64 bits */
762 netdev_for_each_mc_addr(ha, dev) {
763 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
764 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
765 }
766
767 /* Write the hash table to MAC MD table */
768 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
769 iow(db, oft++, hash_table[i]);
770 iow(db, oft++, hash_table[i] >> 8);
771 }
772
773 iow(db, DM9000_RCR, rcr);
774 }
775
776 static void
777 dm9000_hash_table(struct net_device *dev)
778 {
779 board_info_t *db = netdev_priv(dev);
780 unsigned long flags;
781
782 spin_lock_irqsave(&db->lock, flags);
783 dm9000_hash_table_unlocked(dev);
784 spin_unlock_irqrestore(&db->lock, flags);
785 }
786
787 /*
788 * Initialize dm9000 board
789 */
790 static void
791 dm9000_init_dm9000(struct net_device *dev)
792 {
793 board_info_t *db = netdev_priv(dev);
794 unsigned int imr;
795 unsigned int ncr;
796
797 dm9000_dbg(db, 1, "entering %s\n", __func__);
798
799 /* I/O mode */
800 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
801
802 /* Checksum mode */
803 dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
804
805 /* GPIO0 on pre-activate PHY */
806 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
807 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
808 iow(db, DM9000_GPR, 0); /* Enable PHY */
809
810 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
811
812 /* if wol is needed, then always set NCR_WAKEEN otherwise we end
813 * up dumping the wake events if we disable this. There is already
814 * a wake-mask in DM9000_WCR */
815 if (db->wake_supported)
816 ncr |= NCR_WAKEEN;
817
818 iow(db, DM9000_NCR, ncr);
819
820 /* Program operating register */
821 iow(db, DM9000_TCR, 0); /* TX Polling clear */
822 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
823 iow(db, DM9000_FCR, 0xff); /* Flow Control */
824 iow(db, DM9000_SMCR, 0); /* Special Mode */
825 /* clear TX status */
826 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
827 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
828
829 /* Set address filter table */
830 dm9000_hash_table_unlocked(dev);
831
832 imr = IMR_PAR | IMR_PTM | IMR_PRM;
833 if (db->type != TYPE_DM9000E)
834 imr |= IMR_LNKCHNG;
835
836 db->imr_all = imr;
837
838 /* Enable TX/RX interrupt mask */
839 iow(db, DM9000_IMR, imr);
840
841 /* Init Driver variable */
842 db->tx_pkt_cnt = 0;
843 db->queue_pkt_len = 0;
844 dev->trans_start = jiffies;
845 }
846
847 /* Our watchdog timed out. Called by the networking layer */
848 static void dm9000_timeout(struct net_device *dev)
849 {
850 board_info_t *db = netdev_priv(dev);
851 u8 reg_save;
852 unsigned long flags;
853
854 /* Save previous register address */
855 reg_save = readb(db->io_addr);
856 spin_lock_irqsave(&db->lock, flags);
857
858 netif_stop_queue(dev);
859 dm9000_reset(db);
860 dm9000_init_dm9000(dev);
861 /* We can accept TX packets again */
862 dev->trans_start = jiffies; /* prevent tx timeout */
863 netif_wake_queue(dev);
864
865 /* Restore previous register address */
866 writeb(reg_save, db->io_addr);
867 spin_unlock_irqrestore(&db->lock, flags);
868 }
869
870 static void dm9000_send_packet(struct net_device *dev,
871 int ip_summed,
872 u16 pkt_len)
873 {
874 board_info_t *dm = to_dm9000_board(dev);
875
876 /* The DM9000 is not smart enough to leave fragmented packets alone. */
877 if (dm->ip_summed != ip_summed) {
878 if (ip_summed == CHECKSUM_NONE)
879 iow(dm, DM9000_TCCR, 0);
880 else
881 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
882 dm->ip_summed = ip_summed;
883 }
884
885 /* Set TX length to DM9000 */
886 iow(dm, DM9000_TXPLL, pkt_len);
887 iow(dm, DM9000_TXPLH, pkt_len >> 8);
888
889 /* Issue TX polling command */
890 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
891 }
892
893 /*
894 * Hardware start transmission.
895 * Send a packet to media from the upper layer.
896 */
897 static int
898 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
899 {
900 unsigned long flags;
901 board_info_t *db = netdev_priv(dev);
902
903 dm9000_dbg(db, 3, "%s:\n", __func__);
904
905 if (db->tx_pkt_cnt > 1)
906 return NETDEV_TX_BUSY;
907
908 spin_lock_irqsave(&db->lock, flags);
909
910 /* Move data to DM9000 TX RAM */
911 writeb(DM9000_MWCMD, db->io_addr);
912
913 (db->outblk)(db->io_data, skb->data, skb->len);
914 dev->stats.tx_bytes += skb->len;
915
916 db->tx_pkt_cnt++;
917 /* TX control: First packet immediately send, second packet queue */
918 if (db->tx_pkt_cnt == 1) {
919 dm9000_send_packet(dev, skb->ip_summed, skb->len);
920 } else {
921 /* Second packet */
922 db->queue_pkt_len = skb->len;
923 db->queue_ip_summed = skb->ip_summed;
924 netif_stop_queue(dev);
925 }
926
927 spin_unlock_irqrestore(&db->lock, flags);
928
929 /* free this SKB */
930 dev_kfree_skb(skb);
931
932 return NETDEV_TX_OK;
933 }
934
935 /*
936 * DM9000 interrupt handler
937 * receive the packet to upper layer, free the transmitted packet
938 */
939
940 static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
941 {
942 int tx_status = ior(db, DM9000_NSR); /* Got TX status */
943
944 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
945 /* One packet sent complete */
946 db->tx_pkt_cnt--;
947 dev->stats.tx_packets++;
948
949 if (netif_msg_tx_done(db))
950 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
951
952 /* Queue packet check & send */
953 if (db->tx_pkt_cnt > 0)
954 dm9000_send_packet(dev, db->queue_ip_summed,
955 db->queue_pkt_len);
956 netif_wake_queue(dev);
957 }
958 }
959
960 struct dm9000_rxhdr {
961 u8 RxPktReady;
962 u8 RxStatus;
963 __le16 RxLen;
964 } __packed;
965
966 /*
967 * Received a packet and pass to upper layer
968 */
969 static void
970 dm9000_rx(struct net_device *dev)
971 {
972 board_info_t *db = netdev_priv(dev);
973 struct dm9000_rxhdr rxhdr;
974 struct sk_buff *skb;
975 u8 rxbyte, *rdptr;
976 bool GoodPacket;
977 int RxLen;
978
979 /* Check packet ready or not */
980 do {
981 ior(db, DM9000_MRCMDX); /* Dummy read */
982
983 /* Get most updated data */
984 rxbyte = readb(db->io_data);
985
986 /* Status check: this byte must be 0 or 1 */
987 if (rxbyte & DM9000_PKT_ERR) {
988 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
989 iow(db, DM9000_RCR, 0x00); /* Stop Device */
990 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
991 return;
992 }
993
994 if (!(rxbyte & DM9000_PKT_RDY))
995 return;
996
997 /* A packet ready now & Get status/length */
998 GoodPacket = true;
999 writeb(DM9000_MRCMD, db->io_addr);
1000
1001 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1002
1003 RxLen = le16_to_cpu(rxhdr.RxLen);
1004
1005 if (netif_msg_rx_status(db))
1006 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1007 rxhdr.RxStatus, RxLen);
1008
1009 /* Packet Status check */
1010 if (RxLen < 0x40) {
1011 GoodPacket = false;
1012 if (netif_msg_rx_err(db))
1013 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1014 }
1015
1016 if (RxLen > DM9000_PKT_MAX) {
1017 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1018 }
1019
1020 /* rxhdr.RxStatus is identical to RSR register. */
1021 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1022 RSR_PLE | RSR_RWTO |
1023 RSR_LCS | RSR_RF)) {
1024 GoodPacket = false;
1025 if (rxhdr.RxStatus & RSR_FOE) {
1026 if (netif_msg_rx_err(db))
1027 dev_dbg(db->dev, "fifo error\n");
1028 dev->stats.rx_fifo_errors++;
1029 }
1030 if (rxhdr.RxStatus & RSR_CE) {
1031 if (netif_msg_rx_err(db))
1032 dev_dbg(db->dev, "crc error\n");
1033 dev->stats.rx_crc_errors++;
1034 }
1035 if (rxhdr.RxStatus & RSR_RF) {
1036 if (netif_msg_rx_err(db))
1037 dev_dbg(db->dev, "length error\n");
1038 dev->stats.rx_length_errors++;
1039 }
1040 }
1041
1042 /* Move data from DM9000 */
1043 if (GoodPacket &&
1044 ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
1045 skb_reserve(skb, 2);
1046 rdptr = (u8 *) skb_put(skb, RxLen - 4);
1047
1048 /* Read received packet from RX SRAM */
1049
1050 (db->inblk)(db->io_data, rdptr, RxLen);
1051 dev->stats.rx_bytes += RxLen;
1052
1053 /* Pass to upper layer */
1054 skb->protocol = eth_type_trans(skb, dev);
1055 if (db->rx_csum) {
1056 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1057 skb->ip_summed = CHECKSUM_UNNECESSARY;
1058 else
1059 skb->ip_summed = CHECKSUM_NONE;
1060 }
1061 netif_rx(skb);
1062 dev->stats.rx_packets++;
1063
1064 } else {
1065 /* need to dump the packet's data */
1066
1067 (db->dumpblk)(db->io_data, RxLen);
1068 }
1069 } while (rxbyte & DM9000_PKT_RDY);
1070 }
1071
1072 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1073 {
1074 struct net_device *dev = dev_id;
1075 board_info_t *db = netdev_priv(dev);
1076 int int_status;
1077 unsigned long flags;
1078 u8 reg_save;
1079
1080 dm9000_dbg(db, 3, "entering %s\n", __func__);
1081
1082 /* A real interrupt coming */
1083
1084 /* holders of db->lock must always block IRQs */
1085 spin_lock_irqsave(&db->lock, flags);
1086
1087 /* Save previous register address */
1088 reg_save = readb(db->io_addr);
1089
1090 /* Disable all interrupts */
1091 iow(db, DM9000_IMR, IMR_PAR);
1092
1093 /* Got DM9000 interrupt status */
1094 int_status = ior(db, DM9000_ISR); /* Got ISR */
1095 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
1096
1097 if (netif_msg_intr(db))
1098 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1099
1100 /* Received the coming packet */
1101 if (int_status & ISR_PRS)
1102 dm9000_rx(dev);
1103
1104 /* Trnasmit Interrupt check */
1105 if (int_status & ISR_PTS)
1106 dm9000_tx_done(dev, db);
1107
1108 if (db->type != TYPE_DM9000E) {
1109 if (int_status & ISR_LNKCHNG) {
1110 /* fire a link-change request */
1111 schedule_delayed_work(&db->phy_poll, 1);
1112 }
1113 }
1114
1115 /* Re-enable interrupt mask */
1116 iow(db, DM9000_IMR, db->imr_all);
1117
1118 /* Restore previous register address */
1119 writeb(reg_save, db->io_addr);
1120
1121 spin_unlock_irqrestore(&db->lock, flags);
1122
1123 return IRQ_HANDLED;
1124 }
1125
1126 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1127 {
1128 struct net_device *dev = dev_id;
1129 board_info_t *db = netdev_priv(dev);
1130 unsigned long flags;
1131 unsigned nsr, wcr;
1132
1133 spin_lock_irqsave(&db->lock, flags);
1134
1135 nsr = ior(db, DM9000_NSR);
1136 wcr = ior(db, DM9000_WCR);
1137
1138 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1139
1140 if (nsr & NSR_WAKEST) {
1141 /* clear, so we can avoid */
1142 iow(db, DM9000_NSR, NSR_WAKEST);
1143
1144 if (wcr & WCR_LINKST)
1145 dev_info(db->dev, "wake by link status change\n");
1146 if (wcr & WCR_SAMPLEST)
1147 dev_info(db->dev, "wake by sample packet\n");
1148 if (wcr & WCR_MAGICST )
1149 dev_info(db->dev, "wake by magic packet\n");
1150 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1151 dev_err(db->dev, "wake signalled with no reason? "
1152 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1153
1154 }
1155
1156 spin_unlock_irqrestore(&db->lock, flags);
1157
1158 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1159 }
1160
1161 #ifdef CONFIG_NET_POLL_CONTROLLER
1162 /*
1163 *Used by netconsole
1164 */
1165 static void dm9000_poll_controller(struct net_device *dev)
1166 {
1167 disable_irq(dev->irq);
1168 dm9000_interrupt(dev->irq, dev);
1169 enable_irq(dev->irq);
1170 }
1171 #endif
1172
1173 /*
1174 * Open the interface.
1175 * The interface is opened whenever "ifconfig" actives it.
1176 */
1177 static int
1178 dm9000_open(struct net_device *dev)
1179 {
1180 board_info_t *db = netdev_priv(dev);
1181 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1182
1183 if (netif_msg_ifup(db))
1184 dev_dbg(db->dev, "enabling %s\n", dev->name);
1185
1186 /* If there is no IRQ type specified, default to something that
1187 * may work, and tell the user that this is a problem */
1188
1189 if (irqflags == IRQF_TRIGGER_NONE)
1190 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1191
1192 irqflags |= IRQF_SHARED;
1193
1194 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1195 return -EAGAIN;
1196
1197 /* Initialize DM9000 board */
1198 dm9000_reset(db);
1199 dm9000_init_dm9000(dev);
1200
1201 /* Init driver variable */
1202 db->dbug_cnt = 0;
1203
1204 mii_check_media(&db->mii, netif_msg_link(db), 1);
1205 netif_start_queue(dev);
1206
1207 dm9000_schedule_poll(db);
1208
1209 return 0;
1210 }
1211
1212 /*
1213 * Sleep, either by using msleep() or if we are suspending, then
1214 * use mdelay() to sleep.
1215 */
1216 static void dm9000_msleep(board_info_t *db, unsigned int ms)
1217 {
1218 if (db->in_suspend)
1219 mdelay(ms);
1220 else
1221 msleep(ms);
1222 }
1223
1224 /*
1225 * Read a word from phyxcer
1226 */
1227 static int
1228 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1229 {
1230 board_info_t *db = netdev_priv(dev);
1231 unsigned long flags;
1232 unsigned int reg_save;
1233 int ret;
1234
1235 mutex_lock(&db->addr_lock);
1236
1237 spin_lock_irqsave(&db->lock,flags);
1238
1239 /* Save previous register address */
1240 reg_save = readb(db->io_addr);
1241
1242 /* Fill the phyxcer register into REG_0C */
1243 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1244
1245 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
1246
1247 writeb(reg_save, db->io_addr);
1248 spin_unlock_irqrestore(&db->lock,flags);
1249
1250 dm9000_msleep(db, 1); /* Wait read complete */
1251
1252 spin_lock_irqsave(&db->lock,flags);
1253 reg_save = readb(db->io_addr);
1254
1255 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
1256
1257 /* The read data keeps on REG_0D & REG_0E */
1258 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
1259
1260 /* restore the previous address */
1261 writeb(reg_save, db->io_addr);
1262 spin_unlock_irqrestore(&db->lock,flags);
1263
1264 mutex_unlock(&db->addr_lock);
1265
1266 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
1267 return ret;
1268 }
1269
1270 /*
1271 * Write a word to phyxcer
1272 */
1273 static void
1274 dm9000_phy_write(struct net_device *dev,
1275 int phyaddr_unused, int reg, int value)
1276 {
1277 board_info_t *db = netdev_priv(dev);
1278 unsigned long flags;
1279 unsigned long reg_save;
1280
1281 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
1282 mutex_lock(&db->addr_lock);
1283
1284 spin_lock_irqsave(&db->lock,flags);
1285
1286 /* Save previous register address */
1287 reg_save = readb(db->io_addr);
1288
1289 /* Fill the phyxcer register into REG_0C */
1290 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1291
1292 /* Fill the written data into REG_0D & REG_0E */
1293 iow(db, DM9000_EPDRL, value);
1294 iow(db, DM9000_EPDRH, value >> 8);
1295
1296 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
1297
1298 writeb(reg_save, db->io_addr);
1299 spin_unlock_irqrestore(&db->lock, flags);
1300
1301 dm9000_msleep(db, 1); /* Wait write complete */
1302
1303 spin_lock_irqsave(&db->lock,flags);
1304 reg_save = readb(db->io_addr);
1305
1306 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
1307
1308 /* restore the previous address */
1309 writeb(reg_save, db->io_addr);
1310
1311 spin_unlock_irqrestore(&db->lock, flags);
1312 mutex_unlock(&db->addr_lock);
1313 }
1314
1315 static void
1316 dm9000_shutdown(struct net_device *dev)
1317 {
1318 board_info_t *db = netdev_priv(dev);
1319
1320 /* RESET device */
1321 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
1322 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
1323 iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
1324 iow(db, DM9000_RCR, 0x00); /* Disable RX */
1325 }
1326
1327 /*
1328 * Stop the interface.
1329 * The interface is stopped when it is brought.
1330 */
1331 static int
1332 dm9000_stop(struct net_device *ndev)
1333 {
1334 board_info_t *db = netdev_priv(ndev);
1335
1336 if (netif_msg_ifdown(db))
1337 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1338
1339 cancel_delayed_work_sync(&db->phy_poll);
1340
1341 netif_stop_queue(ndev);
1342 netif_carrier_off(ndev);
1343
1344 /* free interrupt */
1345 free_irq(ndev->irq, ndev);
1346
1347 dm9000_shutdown(ndev);
1348
1349 return 0;
1350 }
1351
1352 static const struct net_device_ops dm9000_netdev_ops = {
1353 .ndo_open = dm9000_open,
1354 .ndo_stop = dm9000_stop,
1355 .ndo_start_xmit = dm9000_start_xmit,
1356 .ndo_tx_timeout = dm9000_timeout,
1357 .ndo_set_multicast_list = dm9000_hash_table,
1358 .ndo_do_ioctl = dm9000_ioctl,
1359 .ndo_change_mtu = eth_change_mtu,
1360 .ndo_validate_addr = eth_validate_addr,
1361 .ndo_set_mac_address = eth_mac_addr,
1362 #ifdef CONFIG_NET_POLL_CONTROLLER
1363 .ndo_poll_controller = dm9000_poll_controller,
1364 #endif
1365 };
1366
1367 /*
1368 * Search DM9000 board, allocate space and register it
1369 */
1370 static int __devinit
1371 dm9000_probe(struct platform_device *pdev)
1372 {
1373 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
1374 struct board_info *db; /* Point a board information structure */
1375 struct net_device *ndev;
1376 const unsigned char *mac_src;
1377 int ret = 0;
1378 int iosize;
1379 int i;
1380 u32 id_val;
1381
1382 /* Init network device */
1383 ndev = alloc_etherdev(sizeof(struct board_info));
1384 if (!ndev) {
1385 dev_err(&pdev->dev, "could not allocate device.\n");
1386 return -ENOMEM;
1387 }
1388
1389 SET_NETDEV_DEV(ndev, &pdev->dev);
1390
1391 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1392
1393 /* setup board info structure */
1394 db = netdev_priv(ndev);
1395
1396 db->dev = &pdev->dev;
1397 db->ndev = ndev;
1398
1399 spin_lock_init(&db->lock);
1400 mutex_init(&db->addr_lock);
1401
1402 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1403
1404 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1405 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1406 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1407
1408 if (db->addr_res == NULL || db->data_res == NULL ||
1409 db->irq_res == NULL) {
1410 dev_err(db->dev, "insufficient resources\n");
1411 ret = -ENOENT;
1412 goto out;
1413 }
1414
1415 db->irq_wake = platform_get_irq(pdev, 1);
1416 if (db->irq_wake >= 0) {
1417 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1418
1419 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1420 IRQF_SHARED, dev_name(db->dev), ndev);
1421 if (ret) {
1422 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1423 } else {
1424
1425 /* test to see if irq is really wakeup capable */
1426 ret = set_irq_wake(db->irq_wake, 1);
1427 if (ret) {
1428 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1429 db->irq_wake, ret);
1430 ret = 0;
1431 } else {
1432 set_irq_wake(db->irq_wake, 0);
1433 db->wake_supported = 1;
1434 }
1435 }
1436 }
1437
1438 iosize = resource_size(db->addr_res);
1439 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1440 pdev->name);
1441
1442 if (db->addr_req == NULL) {
1443 dev_err(db->dev, "cannot claim address reg area\n");
1444 ret = -EIO;
1445 goto out;
1446 }
1447
1448 db->io_addr = ioremap(db->addr_res->start, iosize);
1449
1450 if (db->io_addr == NULL) {
1451 dev_err(db->dev, "failed to ioremap address reg\n");
1452 ret = -EINVAL;
1453 goto out;
1454 }
1455
1456 iosize = resource_size(db->data_res);
1457 db->data_req = request_mem_region(db->data_res->start, iosize,
1458 pdev->name);
1459
1460 if (db->data_req == NULL) {
1461 dev_err(db->dev, "cannot claim data reg area\n");
1462 ret = -EIO;
1463 goto out;
1464 }
1465
1466 db->io_data = ioremap(db->data_res->start, iosize);
1467
1468 if (db->io_data == NULL) {
1469 dev_err(db->dev, "failed to ioremap data reg\n");
1470 ret = -EINVAL;
1471 goto out;
1472 }
1473
1474 /* fill in parameters for net-dev structure */
1475 ndev->base_addr = (unsigned long)db->io_addr;
1476 ndev->irq = db->irq_res->start;
1477
1478 /* ensure at least we have a default set of IO routines */
1479 dm9000_set_io(db, iosize);
1480
1481 /* check to see if anything is being over-ridden */
1482 if (pdata != NULL) {
1483 /* check to see if the driver wants to over-ride the
1484 * default IO width */
1485
1486 if (pdata->flags & DM9000_PLATF_8BITONLY)
1487 dm9000_set_io(db, 1);
1488
1489 if (pdata->flags & DM9000_PLATF_16BITONLY)
1490 dm9000_set_io(db, 2);
1491
1492 if (pdata->flags & DM9000_PLATF_32BITONLY)
1493 dm9000_set_io(db, 4);
1494
1495 /* check to see if there are any IO routine
1496 * over-rides */
1497
1498 if (pdata->inblk != NULL)
1499 db->inblk = pdata->inblk;
1500
1501 if (pdata->outblk != NULL)
1502 db->outblk = pdata->outblk;
1503
1504 if (pdata->dumpblk != NULL)
1505 db->dumpblk = pdata->dumpblk;
1506
1507 db->flags = pdata->flags;
1508 }
1509
1510 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1511 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1512 #endif
1513
1514 dm9000_reset(db);
1515
1516 /* try multiple times, DM9000 sometimes gets the read wrong */
1517 for (i = 0; i < 8; i++) {
1518 id_val = ior(db, DM9000_VIDL);
1519 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1520 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1521 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1522
1523 if (id_val == DM9000_ID)
1524 break;
1525 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1526 }
1527
1528 if (id_val != DM9000_ID) {
1529 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1530 ret = -ENODEV;
1531 goto out;
1532 }
1533
1534 /* Identify what type of DM9000 we are working on */
1535
1536 id_val = ior(db, DM9000_CHIPR);
1537 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1538
1539 switch (id_val) {
1540 case CHIPR_DM9000A:
1541 db->type = TYPE_DM9000A;
1542 break;
1543 case CHIPR_DM9000B:
1544 db->type = TYPE_DM9000B;
1545 break;
1546 default:
1547 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1548 db->type = TYPE_DM9000E;
1549 }
1550
1551 /* dm9000a/b are capable of hardware checksum offload */
1552 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1553 db->can_csum = 1;
1554 db->rx_csum = 1;
1555 ndev->features |= NETIF_F_IP_CSUM;
1556 }
1557
1558 /* from this point we assume that we have found a DM9000 */
1559
1560 /* driver system function */
1561 ether_setup(ndev);
1562
1563 ndev->netdev_ops = &dm9000_netdev_ops;
1564 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1565 ndev->ethtool_ops = &dm9000_ethtool_ops;
1566
1567 db->msg_enable = NETIF_MSG_LINK;
1568 db->mii.phy_id_mask = 0x1f;
1569 db->mii.reg_num_mask = 0x1f;
1570 db->mii.force_media = 0;
1571 db->mii.full_duplex = 0;
1572 db->mii.dev = ndev;
1573 db->mii.mdio_read = dm9000_phy_read;
1574 db->mii.mdio_write = dm9000_phy_write;
1575
1576 mac_src = "eeprom";
1577
1578 /* try reading the node address from the attached EEPROM */
1579 for (i = 0; i < 6; i += 2)
1580 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1581
1582 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1583 mac_src = "platform data";
1584 memcpy(ndev->dev_addr, pdata->dev_addr, 6);
1585 }
1586
1587 if (!is_valid_ether_addr(ndev->dev_addr)) {
1588 /* try reading from mac */
1589
1590 mac_src = "chip";
1591 for (i = 0; i < 6; i++)
1592 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1593 }
1594
1595 if (!is_valid_ether_addr(ndev->dev_addr))
1596 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1597 "set using ifconfig\n", ndev->name);
1598
1599 platform_set_drvdata(pdev, ndev);
1600 ret = register_netdev(ndev);
1601
1602 if (ret == 0)
1603 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1604 ndev->name, dm9000_type_to_char(db->type),
1605 db->io_addr, db->io_data, ndev->irq,
1606 ndev->dev_addr, mac_src);
1607 return 0;
1608
1609 out:
1610 dev_err(db->dev, "not found (%d).\n", ret);
1611
1612 dm9000_release_board(pdev, db);
1613 free_netdev(ndev);
1614
1615 return ret;
1616 }
1617
1618 static int
1619 dm9000_drv_suspend(struct device *dev)
1620 {
1621 struct platform_device *pdev = to_platform_device(dev);
1622 struct net_device *ndev = platform_get_drvdata(pdev);
1623 board_info_t *db;
1624
1625 if (ndev) {
1626 db = netdev_priv(ndev);
1627 db->in_suspend = 1;
1628
1629 if (!netif_running(ndev))
1630 return 0;
1631
1632 netif_device_detach(ndev);
1633
1634 /* only shutdown if not using WoL */
1635 if (!db->wake_state)
1636 dm9000_shutdown(ndev);
1637 }
1638 return 0;
1639 }
1640
1641 static int
1642 dm9000_drv_resume(struct device *dev)
1643 {
1644 struct platform_device *pdev = to_platform_device(dev);
1645 struct net_device *ndev = platform_get_drvdata(pdev);
1646 board_info_t *db = netdev_priv(ndev);
1647
1648 if (ndev) {
1649 if (netif_running(ndev)) {
1650 /* reset if we were not in wake mode to ensure if
1651 * the device was powered off it is in a known state */
1652 if (!db->wake_state) {
1653 dm9000_reset(db);
1654 dm9000_init_dm9000(ndev);
1655 }
1656
1657 netif_device_attach(ndev);
1658 }
1659
1660 db->in_suspend = 0;
1661 }
1662 return 0;
1663 }
1664
1665 static const struct dev_pm_ops dm9000_drv_pm_ops = {
1666 .suspend = dm9000_drv_suspend,
1667 .resume = dm9000_drv_resume,
1668 };
1669
1670 static int __devexit
1671 dm9000_drv_remove(struct platform_device *pdev)
1672 {
1673 struct net_device *ndev = platform_get_drvdata(pdev);
1674
1675 platform_set_drvdata(pdev, NULL);
1676
1677 unregister_netdev(ndev);
1678 dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev));
1679 free_netdev(ndev); /* free device structure */
1680
1681 dev_dbg(&pdev->dev, "released and freed device\n");
1682 return 0;
1683 }
1684
1685 static struct platform_driver dm9000_driver = {
1686 .driver = {
1687 .name = "dm9000",
1688 .owner = THIS_MODULE,
1689 .pm = &dm9000_drv_pm_ops,
1690 },
1691 .probe = dm9000_probe,
1692 .remove = __devexit_p(dm9000_drv_remove),
1693 };
1694
1695 static int __init
1696 dm9000_init(void)
1697 {
1698 printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
1699
1700 return platform_driver_register(&dm9000_driver);
1701 }
1702
1703 static void __exit
1704 dm9000_cleanup(void)
1705 {
1706 platform_driver_unregister(&dm9000_driver);
1707 }
1708
1709 module_init(dm9000_init);
1710 module_exit(dm9000_cleanup);
1711
1712 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1713 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1714 MODULE_LICENSE("GPL");
1715 MODULE_ALIAS("platform:dm9000");
This page took 0.068319 seconds and 5 git commands to generate.