at86rf230: mask irq's before deregister device
[deliverable/linux.git] / drivers / net / ethernet / davicom / dm9000.c
1 /*
2 * Davicom DM9000 Fast Ethernet driver for Linux.
3 * Copyright (C) 1997 Sten Wang
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
16 *
17 * Additional updates, Copyright:
18 * Ben Dooks <ben@simtec.co.uk>
19 * Sascha Hauer <s.hauer@pengutronix.de>
20 */
21
22 #include <linux/module.h>
23 #include <linux/ioport.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/interrupt.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/crc32.h>
30 #include <linux/mii.h>
31 #include <linux/of.h>
32 #include <linux/of_net.h>
33 #include <linux/ethtool.h>
34 #include <linux/dm9000.h>
35 #include <linux/delay.h>
36 #include <linux/platform_device.h>
37 #include <linux/irq.h>
38 #include <linux/slab.h>
39
40 #include <asm/delay.h>
41 #include <asm/irq.h>
42 #include <asm/io.h>
43
44 #include "dm9000.h"
45
46 /* Board/System/Debug information/definition ---------------- */
47
48 #define DM9000_PHY 0x40 /* PHY address 0x01 */
49
50 #define CARDNAME "dm9000"
51 #define DRV_VERSION "1.31"
52
53 /*
54 * Transmit timeout, default 5 seconds.
55 */
56 static int watchdog = 5000;
57 module_param(watchdog, int, 0400);
58 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
59
60 /*
61 * Debug messages level
62 */
63 static int debug;
64 module_param(debug, int, 0644);
65 MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)");
66
67 /* DM9000 register address locking.
68 *
69 * The DM9000 uses an address register to control where data written
70 * to the data register goes. This means that the address register
71 * must be preserved over interrupts or similar calls.
72 *
73 * During interrupt and other critical calls, a spinlock is used to
74 * protect the system, but the calls themselves save the address
75 * in the address register in case they are interrupting another
76 * access to the device.
77 *
78 * For general accesses a lock is provided so that calls which are
79 * allowed to sleep are serialised so that the address register does
80 * not need to be saved. This lock also serves to serialise access
81 * to the EEPROM and PHY access registers which are shared between
82 * these two devices.
83 */
84
85 /* The driver supports the original DM9000E, and now the two newer
86 * devices, DM9000A and DM9000B.
87 */
88
89 enum dm9000_type {
90 TYPE_DM9000E, /* original DM9000 */
91 TYPE_DM9000A,
92 TYPE_DM9000B
93 };
94
95 /* Structure/enum declaration ------------------------------- */
96 typedef struct board_info {
97
98 void __iomem *io_addr; /* Register I/O base address */
99 void __iomem *io_data; /* Data I/O address */
100 u16 irq; /* IRQ */
101
102 u16 tx_pkt_cnt;
103 u16 queue_pkt_len;
104 u16 queue_start_addr;
105 u16 queue_ip_summed;
106 u16 dbug_cnt;
107 u8 io_mode; /* 0:word, 2:byte */
108 u8 phy_addr;
109 u8 imr_all;
110
111 unsigned int flags;
112 unsigned int in_suspend:1;
113 unsigned int wake_supported:1;
114
115 enum dm9000_type type;
116
117 void (*inblk)(void __iomem *port, void *data, int length);
118 void (*outblk)(void __iomem *port, void *data, int length);
119 void (*dumpblk)(void __iomem *port, int length);
120
121 struct device *dev; /* parent device */
122
123 struct resource *addr_res; /* resources found */
124 struct resource *data_res;
125 struct resource *addr_req; /* resources requested */
126 struct resource *data_req;
127 struct resource *irq_res;
128
129 int irq_wake;
130
131 struct mutex addr_lock; /* phy and eeprom access lock */
132
133 struct delayed_work phy_poll;
134 struct net_device *ndev;
135
136 spinlock_t lock;
137
138 struct mii_if_info mii;
139 u32 msg_enable;
140 u32 wake_state;
141
142 int ip_summed;
143 } board_info_t;
144
145 /* debug code */
146
147 #define dm9000_dbg(db, lev, msg...) do { \
148 if ((lev) < debug) { \
149 dev_dbg(db->dev, msg); \
150 } \
151 } while (0)
152
153 static inline board_info_t *to_dm9000_board(struct net_device *dev)
154 {
155 return netdev_priv(dev);
156 }
157
158 /* DM9000 network board routine ---------------------------- */
159
160 /*
161 * Read a byte from I/O port
162 */
163 static u8
164 ior(board_info_t *db, int reg)
165 {
166 writeb(reg, db->io_addr);
167 return readb(db->io_data);
168 }
169
170 /*
171 * Write a byte to I/O port
172 */
173
174 static void
175 iow(board_info_t *db, int reg, int value)
176 {
177 writeb(reg, db->io_addr);
178 writeb(value, db->io_data);
179 }
180
181 static void
182 dm9000_reset(board_info_t *db)
183 {
184 dev_dbg(db->dev, "resetting device\n");
185
186 /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
187 * The essential point is that we have to do a double reset, and the
188 * instruction is to set LBK into MAC internal loopback mode.
189 */
190 iow(db, DM9000_NCR, 0x03);
191 udelay(100); /* Application note says at least 20 us */
192 if (ior(db, DM9000_NCR) & 1)
193 dev_err(db->dev, "dm9000 did not respond to first reset\n");
194
195 iow(db, DM9000_NCR, 0);
196 iow(db, DM9000_NCR, 0x03);
197 udelay(100);
198 if (ior(db, DM9000_NCR) & 1)
199 dev_err(db->dev, "dm9000 did not respond to second reset\n");
200 }
201
202 /* routines for sending block to chip */
203
204 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
205 {
206 iowrite8_rep(reg, data, count);
207 }
208
209 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
210 {
211 iowrite16_rep(reg, data, (count+1) >> 1);
212 }
213
214 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
215 {
216 iowrite32_rep(reg, data, (count+3) >> 2);
217 }
218
219 /* input block from chip to memory */
220
221 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
222 {
223 ioread8_rep(reg, data, count);
224 }
225
226
227 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
228 {
229 ioread16_rep(reg, data, (count+1) >> 1);
230 }
231
232 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
233 {
234 ioread32_rep(reg, data, (count+3) >> 2);
235 }
236
237 /* dump block from chip to null */
238
239 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
240 {
241 int i;
242 int tmp;
243
244 for (i = 0; i < count; i++)
245 tmp = readb(reg);
246 }
247
248 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
249 {
250 int i;
251 int tmp;
252
253 count = (count + 1) >> 1;
254
255 for (i = 0; i < count; i++)
256 tmp = readw(reg);
257 }
258
259 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
260 {
261 int i;
262 int tmp;
263
264 count = (count + 3) >> 2;
265
266 for (i = 0; i < count; i++)
267 tmp = readl(reg);
268 }
269
270 /*
271 * Sleep, either by using msleep() or if we are suspending, then
272 * use mdelay() to sleep.
273 */
274 static void dm9000_msleep(board_info_t *db, unsigned int ms)
275 {
276 if (db->in_suspend)
277 mdelay(ms);
278 else
279 msleep(ms);
280 }
281
282 /* Read a word from phyxcer */
283 static int
284 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
285 {
286 board_info_t *db = netdev_priv(dev);
287 unsigned long flags;
288 unsigned int reg_save;
289 int ret;
290
291 mutex_lock(&db->addr_lock);
292
293 spin_lock_irqsave(&db->lock, flags);
294
295 /* Save previous register address */
296 reg_save = readb(db->io_addr);
297
298 /* Fill the phyxcer register into REG_0C */
299 iow(db, DM9000_EPAR, DM9000_PHY | reg);
300
301 /* Issue phyxcer read command */
302 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
303
304 writeb(reg_save, db->io_addr);
305 spin_unlock_irqrestore(&db->lock, flags);
306
307 dm9000_msleep(db, 1); /* Wait read complete */
308
309 spin_lock_irqsave(&db->lock, flags);
310 reg_save = readb(db->io_addr);
311
312 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
313
314 /* The read data keeps on REG_0D & REG_0E */
315 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
316
317 /* restore the previous address */
318 writeb(reg_save, db->io_addr);
319 spin_unlock_irqrestore(&db->lock, flags);
320
321 mutex_unlock(&db->addr_lock);
322
323 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
324 return ret;
325 }
326
327 /* Write a word to phyxcer */
328 static void
329 dm9000_phy_write(struct net_device *dev,
330 int phyaddr_unused, int reg, int value)
331 {
332 board_info_t *db = netdev_priv(dev);
333 unsigned long flags;
334 unsigned long reg_save;
335
336 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
337 mutex_lock(&db->addr_lock);
338
339 spin_lock_irqsave(&db->lock, flags);
340
341 /* Save previous register address */
342 reg_save = readb(db->io_addr);
343
344 /* Fill the phyxcer register into REG_0C */
345 iow(db, DM9000_EPAR, DM9000_PHY | reg);
346
347 /* Fill the written data into REG_0D & REG_0E */
348 iow(db, DM9000_EPDRL, value);
349 iow(db, DM9000_EPDRH, value >> 8);
350
351 /* Issue phyxcer write command */
352 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
353
354 writeb(reg_save, db->io_addr);
355 spin_unlock_irqrestore(&db->lock, flags);
356
357 dm9000_msleep(db, 1); /* Wait write complete */
358
359 spin_lock_irqsave(&db->lock, flags);
360 reg_save = readb(db->io_addr);
361
362 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
363
364 /* restore the previous address */
365 writeb(reg_save, db->io_addr);
366
367 spin_unlock_irqrestore(&db->lock, flags);
368 mutex_unlock(&db->addr_lock);
369 }
370
371 /* dm9000_set_io
372 *
373 * select the specified set of io routines to use with the
374 * device
375 */
376
377 static void dm9000_set_io(struct board_info *db, int byte_width)
378 {
379 /* use the size of the data resource to work out what IO
380 * routines we want to use
381 */
382
383 switch (byte_width) {
384 case 1:
385 db->dumpblk = dm9000_dumpblk_8bit;
386 db->outblk = dm9000_outblk_8bit;
387 db->inblk = dm9000_inblk_8bit;
388 break;
389
390
391 case 3:
392 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
393 case 2:
394 db->dumpblk = dm9000_dumpblk_16bit;
395 db->outblk = dm9000_outblk_16bit;
396 db->inblk = dm9000_inblk_16bit;
397 break;
398
399 case 4:
400 default:
401 db->dumpblk = dm9000_dumpblk_32bit;
402 db->outblk = dm9000_outblk_32bit;
403 db->inblk = dm9000_inblk_32bit;
404 break;
405 }
406 }
407
408 static void dm9000_schedule_poll(board_info_t *db)
409 {
410 if (db->type == TYPE_DM9000E)
411 schedule_delayed_work(&db->phy_poll, HZ * 2);
412 }
413
414 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
415 {
416 board_info_t *dm = to_dm9000_board(dev);
417
418 if (!netif_running(dev))
419 return -EINVAL;
420
421 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
422 }
423
424 static unsigned int
425 dm9000_read_locked(board_info_t *db, int reg)
426 {
427 unsigned long flags;
428 unsigned int ret;
429
430 spin_lock_irqsave(&db->lock, flags);
431 ret = ior(db, reg);
432 spin_unlock_irqrestore(&db->lock, flags);
433
434 return ret;
435 }
436
437 static int dm9000_wait_eeprom(board_info_t *db)
438 {
439 unsigned int status;
440 int timeout = 8; /* wait max 8msec */
441
442 /* The DM9000 data sheets say we should be able to
443 * poll the ERRE bit in EPCR to wait for the EEPROM
444 * operation. From testing several chips, this bit
445 * does not seem to work.
446 *
447 * We attempt to use the bit, but fall back to the
448 * timeout (which is why we do not return an error
449 * on expiry) to say that the EEPROM operation has
450 * completed.
451 */
452
453 while (1) {
454 status = dm9000_read_locked(db, DM9000_EPCR);
455
456 if ((status & EPCR_ERRE) == 0)
457 break;
458
459 msleep(1);
460
461 if (timeout-- < 0) {
462 dev_dbg(db->dev, "timeout waiting EEPROM\n");
463 break;
464 }
465 }
466
467 return 0;
468 }
469
470 /*
471 * Read a word data from EEPROM
472 */
473 static void
474 dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
475 {
476 unsigned long flags;
477
478 if (db->flags & DM9000_PLATF_NO_EEPROM) {
479 to[0] = 0xff;
480 to[1] = 0xff;
481 return;
482 }
483
484 mutex_lock(&db->addr_lock);
485
486 spin_lock_irqsave(&db->lock, flags);
487
488 iow(db, DM9000_EPAR, offset);
489 iow(db, DM9000_EPCR, EPCR_ERPRR);
490
491 spin_unlock_irqrestore(&db->lock, flags);
492
493 dm9000_wait_eeprom(db);
494
495 /* delay for at-least 150uS */
496 msleep(1);
497
498 spin_lock_irqsave(&db->lock, flags);
499
500 iow(db, DM9000_EPCR, 0x0);
501
502 to[0] = ior(db, DM9000_EPDRL);
503 to[1] = ior(db, DM9000_EPDRH);
504
505 spin_unlock_irqrestore(&db->lock, flags);
506
507 mutex_unlock(&db->addr_lock);
508 }
509
510 /*
511 * Write a word data to SROM
512 */
513 static void
514 dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
515 {
516 unsigned long flags;
517
518 if (db->flags & DM9000_PLATF_NO_EEPROM)
519 return;
520
521 mutex_lock(&db->addr_lock);
522
523 spin_lock_irqsave(&db->lock, flags);
524 iow(db, DM9000_EPAR, offset);
525 iow(db, DM9000_EPDRH, data[1]);
526 iow(db, DM9000_EPDRL, data[0]);
527 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
528 spin_unlock_irqrestore(&db->lock, flags);
529
530 dm9000_wait_eeprom(db);
531
532 mdelay(1); /* wait at least 150uS to clear */
533
534 spin_lock_irqsave(&db->lock, flags);
535 iow(db, DM9000_EPCR, 0);
536 spin_unlock_irqrestore(&db->lock, flags);
537
538 mutex_unlock(&db->addr_lock);
539 }
540
541 /* ethtool ops */
542
543 static void dm9000_get_drvinfo(struct net_device *dev,
544 struct ethtool_drvinfo *info)
545 {
546 board_info_t *dm = to_dm9000_board(dev);
547
548 strlcpy(info->driver, CARDNAME, sizeof(info->driver));
549 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
550 strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
551 sizeof(info->bus_info));
552 }
553
554 static u32 dm9000_get_msglevel(struct net_device *dev)
555 {
556 board_info_t *dm = to_dm9000_board(dev);
557
558 return dm->msg_enable;
559 }
560
561 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
562 {
563 board_info_t *dm = to_dm9000_board(dev);
564
565 dm->msg_enable = value;
566 }
567
568 static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
569 {
570 board_info_t *dm = to_dm9000_board(dev);
571
572 mii_ethtool_gset(&dm->mii, cmd);
573 return 0;
574 }
575
576 static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
577 {
578 board_info_t *dm = to_dm9000_board(dev);
579
580 return mii_ethtool_sset(&dm->mii, cmd);
581 }
582
583 static int dm9000_nway_reset(struct net_device *dev)
584 {
585 board_info_t *dm = to_dm9000_board(dev);
586 return mii_nway_restart(&dm->mii);
587 }
588
589 static int dm9000_set_features(struct net_device *dev,
590 netdev_features_t features)
591 {
592 board_info_t *dm = to_dm9000_board(dev);
593 netdev_features_t changed = dev->features ^ features;
594 unsigned long flags;
595
596 if (!(changed & NETIF_F_RXCSUM))
597 return 0;
598
599 spin_lock_irqsave(&dm->lock, flags);
600 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
601 spin_unlock_irqrestore(&dm->lock, flags);
602
603 return 0;
604 }
605
606 static u32 dm9000_get_link(struct net_device *dev)
607 {
608 board_info_t *dm = to_dm9000_board(dev);
609 u32 ret;
610
611 if (dm->flags & DM9000_PLATF_EXT_PHY)
612 ret = mii_link_ok(&dm->mii);
613 else
614 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
615
616 return ret;
617 }
618
619 #define DM_EEPROM_MAGIC (0x444D394B)
620
621 static int dm9000_get_eeprom_len(struct net_device *dev)
622 {
623 return 128;
624 }
625
626 static int dm9000_get_eeprom(struct net_device *dev,
627 struct ethtool_eeprom *ee, u8 *data)
628 {
629 board_info_t *dm = to_dm9000_board(dev);
630 int offset = ee->offset;
631 int len = ee->len;
632 int i;
633
634 /* EEPROM access is aligned to two bytes */
635
636 if ((len & 1) != 0 || (offset & 1) != 0)
637 return -EINVAL;
638
639 if (dm->flags & DM9000_PLATF_NO_EEPROM)
640 return -ENOENT;
641
642 ee->magic = DM_EEPROM_MAGIC;
643
644 for (i = 0; i < len; i += 2)
645 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
646
647 return 0;
648 }
649
650 static int dm9000_set_eeprom(struct net_device *dev,
651 struct ethtool_eeprom *ee, u8 *data)
652 {
653 board_info_t *dm = to_dm9000_board(dev);
654 int offset = ee->offset;
655 int len = ee->len;
656 int done;
657
658 /* EEPROM access is aligned to two bytes */
659
660 if (dm->flags & DM9000_PLATF_NO_EEPROM)
661 return -ENOENT;
662
663 if (ee->magic != DM_EEPROM_MAGIC)
664 return -EINVAL;
665
666 while (len > 0) {
667 if (len & 1 || offset & 1) {
668 int which = offset & 1;
669 u8 tmp[2];
670
671 dm9000_read_eeprom(dm, offset / 2, tmp);
672 tmp[which] = *data;
673 dm9000_write_eeprom(dm, offset / 2, tmp);
674
675 done = 1;
676 } else {
677 dm9000_write_eeprom(dm, offset / 2, data);
678 done = 2;
679 }
680
681 data += done;
682 offset += done;
683 len -= done;
684 }
685
686 return 0;
687 }
688
689 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
690 {
691 board_info_t *dm = to_dm9000_board(dev);
692
693 memset(w, 0, sizeof(struct ethtool_wolinfo));
694
695 /* note, we could probably support wake-phy too */
696 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
697 w->wolopts = dm->wake_state;
698 }
699
700 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
701 {
702 board_info_t *dm = to_dm9000_board(dev);
703 unsigned long flags;
704 u32 opts = w->wolopts;
705 u32 wcr = 0;
706
707 if (!dm->wake_supported)
708 return -EOPNOTSUPP;
709
710 if (opts & ~WAKE_MAGIC)
711 return -EINVAL;
712
713 if (opts & WAKE_MAGIC)
714 wcr |= WCR_MAGICEN;
715
716 mutex_lock(&dm->addr_lock);
717
718 spin_lock_irqsave(&dm->lock, flags);
719 iow(dm, DM9000_WCR, wcr);
720 spin_unlock_irqrestore(&dm->lock, flags);
721
722 mutex_unlock(&dm->addr_lock);
723
724 if (dm->wake_state != opts) {
725 /* change in wol state, update IRQ state */
726
727 if (!dm->wake_state)
728 irq_set_irq_wake(dm->irq_wake, 1);
729 else if (dm->wake_state && !opts)
730 irq_set_irq_wake(dm->irq_wake, 0);
731 }
732
733 dm->wake_state = opts;
734 return 0;
735 }
736
737 static const struct ethtool_ops dm9000_ethtool_ops = {
738 .get_drvinfo = dm9000_get_drvinfo,
739 .get_settings = dm9000_get_settings,
740 .set_settings = dm9000_set_settings,
741 .get_msglevel = dm9000_get_msglevel,
742 .set_msglevel = dm9000_set_msglevel,
743 .nway_reset = dm9000_nway_reset,
744 .get_link = dm9000_get_link,
745 .get_wol = dm9000_get_wol,
746 .set_wol = dm9000_set_wol,
747 .get_eeprom_len = dm9000_get_eeprom_len,
748 .get_eeprom = dm9000_get_eeprom,
749 .set_eeprom = dm9000_set_eeprom,
750 };
751
752 static void dm9000_show_carrier(board_info_t *db,
753 unsigned carrier, unsigned nsr)
754 {
755 int lpa;
756 struct net_device *ndev = db->ndev;
757 struct mii_if_info *mii = &db->mii;
758 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
759
760 if (carrier) {
761 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
762 dev_info(db->dev,
763 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
764 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
765 (ncr & NCR_FDX) ? "full" : "half", lpa);
766 } else {
767 dev_info(db->dev, "%s: link down\n", ndev->name);
768 }
769 }
770
771 static void
772 dm9000_poll_work(struct work_struct *w)
773 {
774 struct delayed_work *dw = to_delayed_work(w);
775 board_info_t *db = container_of(dw, board_info_t, phy_poll);
776 struct net_device *ndev = db->ndev;
777
778 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
779 !(db->flags & DM9000_PLATF_EXT_PHY)) {
780 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
781 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
782 unsigned new_carrier;
783
784 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
785
786 if (old_carrier != new_carrier) {
787 if (netif_msg_link(db))
788 dm9000_show_carrier(db, new_carrier, nsr);
789
790 if (!new_carrier)
791 netif_carrier_off(ndev);
792 else
793 netif_carrier_on(ndev);
794 }
795 } else
796 mii_check_media(&db->mii, netif_msg_link(db), 0);
797
798 if (netif_running(ndev))
799 dm9000_schedule_poll(db);
800 }
801
802 /* dm9000_release_board
803 *
804 * release a board, and any mapped resources
805 */
806
807 static void
808 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
809 {
810 /* unmap our resources */
811
812 iounmap(db->io_addr);
813 iounmap(db->io_data);
814
815 /* release the resources */
816
817 release_resource(db->data_req);
818 kfree(db->data_req);
819
820 release_resource(db->addr_req);
821 kfree(db->addr_req);
822 }
823
824 static unsigned char dm9000_type_to_char(enum dm9000_type type)
825 {
826 switch (type) {
827 case TYPE_DM9000E: return 'e';
828 case TYPE_DM9000A: return 'a';
829 case TYPE_DM9000B: return 'b';
830 }
831
832 return '?';
833 }
834
835 /*
836 * Set DM9000 multicast address
837 */
838 static void
839 dm9000_hash_table_unlocked(struct net_device *dev)
840 {
841 board_info_t *db = netdev_priv(dev);
842 struct netdev_hw_addr *ha;
843 int i, oft;
844 u32 hash_val;
845 u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */
846 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
847
848 dm9000_dbg(db, 1, "entering %s\n", __func__);
849
850 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
851 iow(db, oft, dev->dev_addr[i]);
852
853 if (dev->flags & IFF_PROMISC)
854 rcr |= RCR_PRMSC;
855
856 if (dev->flags & IFF_ALLMULTI)
857 rcr |= RCR_ALL;
858
859 /* the multicast address in Hash Table : 64 bits */
860 netdev_for_each_mc_addr(ha, dev) {
861 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
862 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
863 }
864
865 /* Write the hash table to MAC MD table */
866 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
867 iow(db, oft++, hash_table[i]);
868 iow(db, oft++, hash_table[i] >> 8);
869 }
870
871 iow(db, DM9000_RCR, rcr);
872 }
873
874 static void
875 dm9000_hash_table(struct net_device *dev)
876 {
877 board_info_t *db = netdev_priv(dev);
878 unsigned long flags;
879
880 spin_lock_irqsave(&db->lock, flags);
881 dm9000_hash_table_unlocked(dev);
882 spin_unlock_irqrestore(&db->lock, flags);
883 }
884
885 /*
886 * Initialize dm9000 board
887 */
888 static void
889 dm9000_init_dm9000(struct net_device *dev)
890 {
891 board_info_t *db = netdev_priv(dev);
892 unsigned int imr;
893 unsigned int ncr;
894
895 dm9000_dbg(db, 1, "entering %s\n", __func__);
896
897 /* I/O mode */
898 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
899
900 /* Checksum mode */
901 if (dev->hw_features & NETIF_F_RXCSUM)
902 iow(db, DM9000_RCSR,
903 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
904
905 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
906 iow(db, DM9000_GPR, 0);
907
908 /* If we are dealing with DM9000B, some extra steps are required: a
909 * manual phy reset, and setting init params.
910 */
911 if (db->type == TYPE_DM9000B) {
912 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
913 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
914 }
915
916 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
917
918 /* if wol is needed, then always set NCR_WAKEEN otherwise we end
919 * up dumping the wake events if we disable this. There is already
920 * a wake-mask in DM9000_WCR */
921 if (db->wake_supported)
922 ncr |= NCR_WAKEEN;
923
924 iow(db, DM9000_NCR, ncr);
925
926 /* Program operating register */
927 iow(db, DM9000_TCR, 0); /* TX Polling clear */
928 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
929 iow(db, DM9000_FCR, 0xff); /* Flow Control */
930 iow(db, DM9000_SMCR, 0); /* Special Mode */
931 /* clear TX status */
932 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
933 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
934
935 /* Set address filter table */
936 dm9000_hash_table_unlocked(dev);
937
938 imr = IMR_PAR | IMR_PTM | IMR_PRM;
939 if (db->type != TYPE_DM9000E)
940 imr |= IMR_LNKCHNG;
941
942 db->imr_all = imr;
943
944 /* Enable TX/RX interrupt mask */
945 iow(db, DM9000_IMR, imr);
946
947 /* Init Driver variable */
948 db->tx_pkt_cnt = 0;
949 db->queue_pkt_len = 0;
950 dev->trans_start = jiffies;
951 }
952
953 /* Our watchdog timed out. Called by the networking layer */
954 static void dm9000_timeout(struct net_device *dev)
955 {
956 board_info_t *db = netdev_priv(dev);
957 u8 reg_save;
958 unsigned long flags;
959
960 /* Save previous register address */
961 spin_lock_irqsave(&db->lock, flags);
962 reg_save = readb(db->io_addr);
963
964 netif_stop_queue(dev);
965 dm9000_reset(db);
966 dm9000_init_dm9000(dev);
967 /* We can accept TX packets again */
968 dev->trans_start = jiffies; /* prevent tx timeout */
969 netif_wake_queue(dev);
970
971 /* Restore previous register address */
972 writeb(reg_save, db->io_addr);
973 spin_unlock_irqrestore(&db->lock, flags);
974 }
975
976 static void dm9000_send_packet(struct net_device *dev,
977 int ip_summed,
978 u16 pkt_len)
979 {
980 board_info_t *dm = to_dm9000_board(dev);
981
982 /* The DM9000 is not smart enough to leave fragmented packets alone. */
983 if (dm->ip_summed != ip_summed) {
984 if (ip_summed == CHECKSUM_NONE)
985 iow(dm, DM9000_TCCR, 0);
986 else
987 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
988 dm->ip_summed = ip_summed;
989 }
990
991 /* Set TX length to DM9000 */
992 iow(dm, DM9000_TXPLL, pkt_len);
993 iow(dm, DM9000_TXPLH, pkt_len >> 8);
994
995 /* Issue TX polling command */
996 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
997 }
998
999 /*
1000 * Hardware start transmission.
1001 * Send a packet to media from the upper layer.
1002 */
1003 static int
1004 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1005 {
1006 unsigned long flags;
1007 board_info_t *db = netdev_priv(dev);
1008
1009 dm9000_dbg(db, 3, "%s:\n", __func__);
1010
1011 if (db->tx_pkt_cnt > 1)
1012 return NETDEV_TX_BUSY;
1013
1014 spin_lock_irqsave(&db->lock, flags);
1015
1016 /* Move data to DM9000 TX RAM */
1017 writeb(DM9000_MWCMD, db->io_addr);
1018
1019 (db->outblk)(db->io_data, skb->data, skb->len);
1020 dev->stats.tx_bytes += skb->len;
1021
1022 db->tx_pkt_cnt++;
1023 /* TX control: First packet immediately send, second packet queue */
1024 if (db->tx_pkt_cnt == 1) {
1025 dm9000_send_packet(dev, skb->ip_summed, skb->len);
1026 } else {
1027 /* Second packet */
1028 db->queue_pkt_len = skb->len;
1029 db->queue_ip_summed = skb->ip_summed;
1030 netif_stop_queue(dev);
1031 }
1032
1033 spin_unlock_irqrestore(&db->lock, flags);
1034
1035 /* free this SKB */
1036 dev_kfree_skb(skb);
1037
1038 return NETDEV_TX_OK;
1039 }
1040
1041 /*
1042 * DM9000 interrupt handler
1043 * receive the packet to upper layer, free the transmitted packet
1044 */
1045
1046 static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
1047 {
1048 int tx_status = ior(db, DM9000_NSR); /* Got TX status */
1049
1050 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
1051 /* One packet sent complete */
1052 db->tx_pkt_cnt--;
1053 dev->stats.tx_packets++;
1054
1055 if (netif_msg_tx_done(db))
1056 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
1057
1058 /* Queue packet check & send */
1059 if (db->tx_pkt_cnt > 0)
1060 dm9000_send_packet(dev, db->queue_ip_summed,
1061 db->queue_pkt_len);
1062 netif_wake_queue(dev);
1063 }
1064 }
1065
1066 struct dm9000_rxhdr {
1067 u8 RxPktReady;
1068 u8 RxStatus;
1069 __le16 RxLen;
1070 } __packed;
1071
1072 /*
1073 * Received a packet and pass to upper layer
1074 */
1075 static void
1076 dm9000_rx(struct net_device *dev)
1077 {
1078 board_info_t *db = netdev_priv(dev);
1079 struct dm9000_rxhdr rxhdr;
1080 struct sk_buff *skb;
1081 u8 rxbyte, *rdptr;
1082 bool GoodPacket;
1083 int RxLen;
1084
1085 /* Check packet ready or not */
1086 do {
1087 ior(db, DM9000_MRCMDX); /* Dummy read */
1088
1089 /* Get most updated data */
1090 rxbyte = readb(db->io_data);
1091
1092 /* Status check: this byte must be 0 or 1 */
1093 if (rxbyte & DM9000_PKT_ERR) {
1094 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1095 iow(db, DM9000_RCR, 0x00); /* Stop Device */
1096 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
1097 return;
1098 }
1099
1100 if (!(rxbyte & DM9000_PKT_RDY))
1101 return;
1102
1103 /* A packet ready now & Get status/length */
1104 GoodPacket = true;
1105 writeb(DM9000_MRCMD, db->io_addr);
1106
1107 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1108
1109 RxLen = le16_to_cpu(rxhdr.RxLen);
1110
1111 if (netif_msg_rx_status(db))
1112 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1113 rxhdr.RxStatus, RxLen);
1114
1115 /* Packet Status check */
1116 if (RxLen < 0x40) {
1117 GoodPacket = false;
1118 if (netif_msg_rx_err(db))
1119 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1120 }
1121
1122 if (RxLen > DM9000_PKT_MAX) {
1123 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1124 }
1125
1126 /* rxhdr.RxStatus is identical to RSR register. */
1127 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1128 RSR_PLE | RSR_RWTO |
1129 RSR_LCS | RSR_RF)) {
1130 GoodPacket = false;
1131 if (rxhdr.RxStatus & RSR_FOE) {
1132 if (netif_msg_rx_err(db))
1133 dev_dbg(db->dev, "fifo error\n");
1134 dev->stats.rx_fifo_errors++;
1135 }
1136 if (rxhdr.RxStatus & RSR_CE) {
1137 if (netif_msg_rx_err(db))
1138 dev_dbg(db->dev, "crc error\n");
1139 dev->stats.rx_crc_errors++;
1140 }
1141 if (rxhdr.RxStatus & RSR_RF) {
1142 if (netif_msg_rx_err(db))
1143 dev_dbg(db->dev, "length error\n");
1144 dev->stats.rx_length_errors++;
1145 }
1146 }
1147
1148 /* Move data from DM9000 */
1149 if (GoodPacket &&
1150 ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
1151 skb_reserve(skb, 2);
1152 rdptr = (u8 *) skb_put(skb, RxLen - 4);
1153
1154 /* Read received packet from RX SRAM */
1155
1156 (db->inblk)(db->io_data, rdptr, RxLen);
1157 dev->stats.rx_bytes += RxLen;
1158
1159 /* Pass to upper layer */
1160 skb->protocol = eth_type_trans(skb, dev);
1161 if (dev->features & NETIF_F_RXCSUM) {
1162 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1163 skb->ip_summed = CHECKSUM_UNNECESSARY;
1164 else
1165 skb_checksum_none_assert(skb);
1166 }
1167 netif_rx(skb);
1168 dev->stats.rx_packets++;
1169
1170 } else {
1171 /* need to dump the packet's data */
1172
1173 (db->dumpblk)(db->io_data, RxLen);
1174 }
1175 } while (rxbyte & DM9000_PKT_RDY);
1176 }
1177
1178 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1179 {
1180 struct net_device *dev = dev_id;
1181 board_info_t *db = netdev_priv(dev);
1182 int int_status;
1183 unsigned long flags;
1184 u8 reg_save;
1185
1186 dm9000_dbg(db, 3, "entering %s\n", __func__);
1187
1188 /* A real interrupt coming */
1189
1190 /* holders of db->lock must always block IRQs */
1191 spin_lock_irqsave(&db->lock, flags);
1192
1193 /* Save previous register address */
1194 reg_save = readb(db->io_addr);
1195
1196 /* Disable all interrupts */
1197 iow(db, DM9000_IMR, IMR_PAR);
1198
1199 /* Got DM9000 interrupt status */
1200 int_status = ior(db, DM9000_ISR); /* Got ISR */
1201 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
1202
1203 if (netif_msg_intr(db))
1204 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1205
1206 /* Received the coming packet */
1207 if (int_status & ISR_PRS)
1208 dm9000_rx(dev);
1209
1210 /* Trnasmit Interrupt check */
1211 if (int_status & ISR_PTS)
1212 dm9000_tx_done(dev, db);
1213
1214 if (db->type != TYPE_DM9000E) {
1215 if (int_status & ISR_LNKCHNG) {
1216 /* fire a link-change request */
1217 schedule_delayed_work(&db->phy_poll, 1);
1218 }
1219 }
1220
1221 /* Re-enable interrupt mask */
1222 iow(db, DM9000_IMR, db->imr_all);
1223
1224 /* Restore previous register address */
1225 writeb(reg_save, db->io_addr);
1226
1227 spin_unlock_irqrestore(&db->lock, flags);
1228
1229 return IRQ_HANDLED;
1230 }
1231
1232 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1233 {
1234 struct net_device *dev = dev_id;
1235 board_info_t *db = netdev_priv(dev);
1236 unsigned long flags;
1237 unsigned nsr, wcr;
1238
1239 spin_lock_irqsave(&db->lock, flags);
1240
1241 nsr = ior(db, DM9000_NSR);
1242 wcr = ior(db, DM9000_WCR);
1243
1244 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1245
1246 if (nsr & NSR_WAKEST) {
1247 /* clear, so we can avoid */
1248 iow(db, DM9000_NSR, NSR_WAKEST);
1249
1250 if (wcr & WCR_LINKST)
1251 dev_info(db->dev, "wake by link status change\n");
1252 if (wcr & WCR_SAMPLEST)
1253 dev_info(db->dev, "wake by sample packet\n");
1254 if (wcr & WCR_MAGICST)
1255 dev_info(db->dev, "wake by magic packet\n");
1256 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1257 dev_err(db->dev, "wake signalled with no reason? "
1258 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1259 }
1260
1261 spin_unlock_irqrestore(&db->lock, flags);
1262
1263 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1264 }
1265
1266 #ifdef CONFIG_NET_POLL_CONTROLLER
1267 /*
1268 *Used by netconsole
1269 */
1270 static void dm9000_poll_controller(struct net_device *dev)
1271 {
1272 disable_irq(dev->irq);
1273 dm9000_interrupt(dev->irq, dev);
1274 enable_irq(dev->irq);
1275 }
1276 #endif
1277
1278 /*
1279 * Open the interface.
1280 * The interface is opened whenever "ifconfig" actives it.
1281 */
1282 static int
1283 dm9000_open(struct net_device *dev)
1284 {
1285 board_info_t *db = netdev_priv(dev);
1286 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1287
1288 if (netif_msg_ifup(db))
1289 dev_dbg(db->dev, "enabling %s\n", dev->name);
1290
1291 /* If there is no IRQ type specified, default to something that
1292 * may work, and tell the user that this is a problem */
1293
1294 if (irqflags == IRQF_TRIGGER_NONE)
1295 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1296
1297 irqflags |= IRQF_SHARED;
1298
1299 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1300 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1301 mdelay(1); /* delay needs by DM9000B */
1302
1303 /* Initialize DM9000 board */
1304 dm9000_reset(db);
1305 dm9000_init_dm9000(dev);
1306
1307 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1308 return -EAGAIN;
1309
1310 /* Init driver variable */
1311 db->dbug_cnt = 0;
1312
1313 mii_check_media(&db->mii, netif_msg_link(db), 1);
1314 netif_start_queue(dev);
1315
1316 dm9000_schedule_poll(db);
1317
1318 return 0;
1319 }
1320
1321 static void
1322 dm9000_shutdown(struct net_device *dev)
1323 {
1324 board_info_t *db = netdev_priv(dev);
1325
1326 /* RESET device */
1327 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
1328 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
1329 iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
1330 iow(db, DM9000_RCR, 0x00); /* Disable RX */
1331 }
1332
1333 /*
1334 * Stop the interface.
1335 * The interface is stopped when it is brought.
1336 */
1337 static int
1338 dm9000_stop(struct net_device *ndev)
1339 {
1340 board_info_t *db = netdev_priv(ndev);
1341
1342 if (netif_msg_ifdown(db))
1343 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1344
1345 cancel_delayed_work_sync(&db->phy_poll);
1346
1347 netif_stop_queue(ndev);
1348 netif_carrier_off(ndev);
1349
1350 /* free interrupt */
1351 free_irq(ndev->irq, ndev);
1352
1353 dm9000_shutdown(ndev);
1354
1355 return 0;
1356 }
1357
1358 static const struct net_device_ops dm9000_netdev_ops = {
1359 .ndo_open = dm9000_open,
1360 .ndo_stop = dm9000_stop,
1361 .ndo_start_xmit = dm9000_start_xmit,
1362 .ndo_tx_timeout = dm9000_timeout,
1363 .ndo_set_rx_mode = dm9000_hash_table,
1364 .ndo_do_ioctl = dm9000_ioctl,
1365 .ndo_change_mtu = eth_change_mtu,
1366 .ndo_set_features = dm9000_set_features,
1367 .ndo_validate_addr = eth_validate_addr,
1368 .ndo_set_mac_address = eth_mac_addr,
1369 #ifdef CONFIG_NET_POLL_CONTROLLER
1370 .ndo_poll_controller = dm9000_poll_controller,
1371 #endif
1372 };
1373
1374 static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1375 {
1376 struct dm9000_plat_data *pdata;
1377 struct device_node *np = dev->of_node;
1378 const void *mac_addr;
1379
1380 if (!IS_ENABLED(CONFIG_OF) || !np)
1381 return NULL;
1382
1383 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1384 if (!pdata)
1385 return ERR_PTR(-ENOMEM);
1386
1387 if (of_find_property(np, "davicom,ext-phy", NULL))
1388 pdata->flags |= DM9000_PLATF_EXT_PHY;
1389 if (of_find_property(np, "davicom,no-eeprom", NULL))
1390 pdata->flags |= DM9000_PLATF_NO_EEPROM;
1391
1392 mac_addr = of_get_mac_address(np);
1393 if (mac_addr)
1394 memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
1395
1396 return pdata;
1397 }
1398
1399 /*
1400 * Search DM9000 board, allocate space and register it
1401 */
1402 static int
1403 dm9000_probe(struct platform_device *pdev)
1404 {
1405 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1406 struct board_info *db; /* Point a board information structure */
1407 struct net_device *ndev;
1408 const unsigned char *mac_src;
1409 int ret = 0;
1410 int iosize;
1411 int i;
1412 u32 id_val;
1413
1414 if (!pdata) {
1415 pdata = dm9000_parse_dt(&pdev->dev);
1416 if (IS_ERR(pdata))
1417 return PTR_ERR(pdata);
1418 }
1419
1420 /* Init network device */
1421 ndev = alloc_etherdev(sizeof(struct board_info));
1422 if (!ndev)
1423 return -ENOMEM;
1424
1425 SET_NETDEV_DEV(ndev, &pdev->dev);
1426
1427 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1428
1429 /* setup board info structure */
1430 db = netdev_priv(ndev);
1431
1432 db->dev = &pdev->dev;
1433 db->ndev = ndev;
1434
1435 spin_lock_init(&db->lock);
1436 mutex_init(&db->addr_lock);
1437
1438 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1439
1440 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1441 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1442 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1443
1444 if (db->addr_res == NULL || db->data_res == NULL ||
1445 db->irq_res == NULL) {
1446 dev_err(db->dev, "insufficient resources\n");
1447 ret = -ENOENT;
1448 goto out;
1449 }
1450
1451 db->irq_wake = platform_get_irq(pdev, 1);
1452 if (db->irq_wake >= 0) {
1453 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1454
1455 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1456 IRQF_SHARED, dev_name(db->dev), ndev);
1457 if (ret) {
1458 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1459 } else {
1460
1461 /* test to see if irq is really wakeup capable */
1462 ret = irq_set_irq_wake(db->irq_wake, 1);
1463 if (ret) {
1464 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1465 db->irq_wake, ret);
1466 ret = 0;
1467 } else {
1468 irq_set_irq_wake(db->irq_wake, 0);
1469 db->wake_supported = 1;
1470 }
1471 }
1472 }
1473
1474 iosize = resource_size(db->addr_res);
1475 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1476 pdev->name);
1477
1478 if (db->addr_req == NULL) {
1479 dev_err(db->dev, "cannot claim address reg area\n");
1480 ret = -EIO;
1481 goto out;
1482 }
1483
1484 db->io_addr = ioremap(db->addr_res->start, iosize);
1485
1486 if (db->io_addr == NULL) {
1487 dev_err(db->dev, "failed to ioremap address reg\n");
1488 ret = -EINVAL;
1489 goto out;
1490 }
1491
1492 iosize = resource_size(db->data_res);
1493 db->data_req = request_mem_region(db->data_res->start, iosize,
1494 pdev->name);
1495
1496 if (db->data_req == NULL) {
1497 dev_err(db->dev, "cannot claim data reg area\n");
1498 ret = -EIO;
1499 goto out;
1500 }
1501
1502 db->io_data = ioremap(db->data_res->start, iosize);
1503
1504 if (db->io_data == NULL) {
1505 dev_err(db->dev, "failed to ioremap data reg\n");
1506 ret = -EINVAL;
1507 goto out;
1508 }
1509
1510 /* fill in parameters for net-dev structure */
1511 ndev->base_addr = (unsigned long)db->io_addr;
1512 ndev->irq = db->irq_res->start;
1513
1514 /* ensure at least we have a default set of IO routines */
1515 dm9000_set_io(db, iosize);
1516
1517 /* check to see if anything is being over-ridden */
1518 if (pdata != NULL) {
1519 /* check to see if the driver wants to over-ride the
1520 * default IO width */
1521
1522 if (pdata->flags & DM9000_PLATF_8BITONLY)
1523 dm9000_set_io(db, 1);
1524
1525 if (pdata->flags & DM9000_PLATF_16BITONLY)
1526 dm9000_set_io(db, 2);
1527
1528 if (pdata->flags & DM9000_PLATF_32BITONLY)
1529 dm9000_set_io(db, 4);
1530
1531 /* check to see if there are any IO routine
1532 * over-rides */
1533
1534 if (pdata->inblk != NULL)
1535 db->inblk = pdata->inblk;
1536
1537 if (pdata->outblk != NULL)
1538 db->outblk = pdata->outblk;
1539
1540 if (pdata->dumpblk != NULL)
1541 db->dumpblk = pdata->dumpblk;
1542
1543 db->flags = pdata->flags;
1544 }
1545
1546 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1547 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1548 #endif
1549
1550 /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
1551 * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
1552 * while probe stage.
1553 */
1554
1555 iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
1556
1557 /* try multiple times, DM9000 sometimes gets the read wrong */
1558 for (i = 0; i < 8; i++) {
1559 id_val = ior(db, DM9000_VIDL);
1560 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1561 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1562 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1563
1564 if (id_val == DM9000_ID)
1565 break;
1566 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1567 }
1568
1569 if (id_val != DM9000_ID) {
1570 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1571 ret = -ENODEV;
1572 goto out;
1573 }
1574
1575 /* Identify what type of DM9000 we are working on */
1576
1577 id_val = ior(db, DM9000_CHIPR);
1578 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1579
1580 switch (id_val) {
1581 case CHIPR_DM9000A:
1582 db->type = TYPE_DM9000A;
1583 break;
1584 case CHIPR_DM9000B:
1585 db->type = TYPE_DM9000B;
1586 break;
1587 default:
1588 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1589 db->type = TYPE_DM9000E;
1590 }
1591
1592 /* dm9000a/b are capable of hardware checksum offload */
1593 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1594 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1595 ndev->features |= ndev->hw_features;
1596 }
1597
1598 /* from this point we assume that we have found a DM9000 */
1599
1600 /* driver system function */
1601 ether_setup(ndev);
1602
1603 ndev->netdev_ops = &dm9000_netdev_ops;
1604 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1605 ndev->ethtool_ops = &dm9000_ethtool_ops;
1606
1607 db->msg_enable = NETIF_MSG_LINK;
1608 db->mii.phy_id_mask = 0x1f;
1609 db->mii.reg_num_mask = 0x1f;
1610 db->mii.force_media = 0;
1611 db->mii.full_duplex = 0;
1612 db->mii.dev = ndev;
1613 db->mii.mdio_read = dm9000_phy_read;
1614 db->mii.mdio_write = dm9000_phy_write;
1615
1616 mac_src = "eeprom";
1617
1618 /* try reading the node address from the attached EEPROM */
1619 for (i = 0; i < 6; i += 2)
1620 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1621
1622 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1623 mac_src = "platform data";
1624 memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
1625 }
1626
1627 if (!is_valid_ether_addr(ndev->dev_addr)) {
1628 /* try reading from mac */
1629
1630 mac_src = "chip";
1631 for (i = 0; i < 6; i++)
1632 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1633 }
1634
1635 if (!is_valid_ether_addr(ndev->dev_addr)) {
1636 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1637 "set using ifconfig\n", ndev->name);
1638
1639 eth_hw_addr_random(ndev);
1640 mac_src = "random";
1641 }
1642
1643
1644 platform_set_drvdata(pdev, ndev);
1645 ret = register_netdev(ndev);
1646
1647 if (ret == 0)
1648 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1649 ndev->name, dm9000_type_to_char(db->type),
1650 db->io_addr, db->io_data, ndev->irq,
1651 ndev->dev_addr, mac_src);
1652 return 0;
1653
1654 out:
1655 dev_err(db->dev, "not found (%d).\n", ret);
1656
1657 dm9000_release_board(pdev, db);
1658 free_netdev(ndev);
1659
1660 return ret;
1661 }
1662
1663 static int
1664 dm9000_drv_suspend(struct device *dev)
1665 {
1666 struct platform_device *pdev = to_platform_device(dev);
1667 struct net_device *ndev = platform_get_drvdata(pdev);
1668 board_info_t *db;
1669
1670 if (ndev) {
1671 db = netdev_priv(ndev);
1672 db->in_suspend = 1;
1673
1674 if (!netif_running(ndev))
1675 return 0;
1676
1677 netif_device_detach(ndev);
1678
1679 /* only shutdown if not using WoL */
1680 if (!db->wake_state)
1681 dm9000_shutdown(ndev);
1682 }
1683 return 0;
1684 }
1685
1686 static int
1687 dm9000_drv_resume(struct device *dev)
1688 {
1689 struct platform_device *pdev = to_platform_device(dev);
1690 struct net_device *ndev = platform_get_drvdata(pdev);
1691 board_info_t *db = netdev_priv(ndev);
1692
1693 if (ndev) {
1694 if (netif_running(ndev)) {
1695 /* reset if we were not in wake mode to ensure if
1696 * the device was powered off it is in a known state */
1697 if (!db->wake_state) {
1698 dm9000_reset(db);
1699 dm9000_init_dm9000(ndev);
1700 }
1701
1702 netif_device_attach(ndev);
1703 }
1704
1705 db->in_suspend = 0;
1706 }
1707 return 0;
1708 }
1709
1710 static const struct dev_pm_ops dm9000_drv_pm_ops = {
1711 .suspend = dm9000_drv_suspend,
1712 .resume = dm9000_drv_resume,
1713 };
1714
1715 static int
1716 dm9000_drv_remove(struct platform_device *pdev)
1717 {
1718 struct net_device *ndev = platform_get_drvdata(pdev);
1719
1720 unregister_netdev(ndev);
1721 dm9000_release_board(pdev, netdev_priv(ndev));
1722 free_netdev(ndev); /* free device structure */
1723
1724 dev_dbg(&pdev->dev, "released and freed device\n");
1725 return 0;
1726 }
1727
1728 #ifdef CONFIG_OF
1729 static const struct of_device_id dm9000_of_matches[] = {
1730 { .compatible = "davicom,dm9000", },
1731 { /* sentinel */ }
1732 };
1733 MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1734 #endif
1735
1736 static struct platform_driver dm9000_driver = {
1737 .driver = {
1738 .name = "dm9000",
1739 .owner = THIS_MODULE,
1740 .pm = &dm9000_drv_pm_ops,
1741 .of_match_table = of_match_ptr(dm9000_of_matches),
1742 },
1743 .probe = dm9000_probe,
1744 .remove = dm9000_drv_remove,
1745 };
1746
1747 module_platform_driver(dm9000_driver);
1748
1749 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1750 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1751 MODULE_LICENSE("GPL");
1752 MODULE_ALIAS("platform:dm9000");
This page took 0.086786 seconds and 5 git commands to generate.