sis190: mdio operation failure is not correctly detected
[deliverable/linux.git] / drivers / net / sis190.c
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
10
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
17
18 See the file COPYING in this distribution for more information.
19
20 */
21
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
34
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
49
50 #define DRV_VERSION "1.2"
51 #define DRV_NAME "sis190"
52 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53 #define PFX DRV_NAME ": "
54
55 #define sis190_rx_skb netif_rx
56 #define sis190_rx_quota(count, quota) count
57
58 #define MAC_ADDR_LEN 6
59
60 #define NUM_TX_DESC 64 /* [8..1024] */
61 #define NUM_RX_DESC 64 /* [8..8192] */
62 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
63 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
64 #define RX_BUF_SIZE 1536
65 #define RX_BUF_MASK 0xfff8
66
67 #define SIS190_REGS_SIZE 0x80
68 #define SIS190_TX_TIMEOUT (6*HZ)
69 #define SIS190_PHY_TIMEOUT (10*HZ)
70 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
72 NETIF_MSG_IFDOWN)
73
74 /* Enhanced PHY access register bit definitions */
75 #define EhnMIIread 0x0000
76 #define EhnMIIwrite 0x0020
77 #define EhnMIIdataShift 16
78 #define EhnMIIpmdShift 6 /* 7016 only */
79 #define EhnMIIregShift 11
80 #define EhnMIIreq 0x0010
81 #define EhnMIInotDone 0x0010
82
83 /* Write/read MMIO register */
84 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
85 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
86 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
87 #define SIS_R8(reg) readb (ioaddr + (reg))
88 #define SIS_R16(reg) readw (ioaddr + (reg))
89 #define SIS_R32(reg) readl (ioaddr + (reg))
90
91 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
92
93 enum sis190_registers {
94 TxControl = 0x00,
95 TxDescStartAddr = 0x04,
96 rsv0 = 0x08, // reserved
97 TxSts = 0x0c, // unused (Control/Status)
98 RxControl = 0x10,
99 RxDescStartAddr = 0x14,
100 rsv1 = 0x18, // reserved
101 RxSts = 0x1c, // unused
102 IntrStatus = 0x20,
103 IntrMask = 0x24,
104 IntrControl = 0x28,
105 IntrTimer = 0x2c, // unused (Interupt Timer)
106 PMControl = 0x30, // unused (Power Mgmt Control/Status)
107 rsv2 = 0x34, // reserved
108 ROMControl = 0x38,
109 ROMInterface = 0x3c,
110 StationControl = 0x40,
111 GMIIControl = 0x44,
112 GIoCR = 0x48, // unused (GMAC IO Compensation)
113 GIoCtrl = 0x4c, // unused (GMAC IO Control)
114 TxMacControl = 0x50,
115 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
116 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
117 rsv3 = 0x5c, // reserved
118 RxMacControl = 0x60,
119 RxMacAddr = 0x62,
120 RxHashTable = 0x68,
121 // Undocumented = 0x6c,
122 RxWolCtrl = 0x70,
123 RxWolData = 0x74, // unused (Rx WOL Data Access)
124 RxMPSControl = 0x78, // unused (Rx MPS Control)
125 rsv4 = 0x7c, // reserved
126 };
127
128 enum sis190_register_content {
129 /* IntrStatus */
130 SoftInt = 0x40000000, // unused
131 Timeup = 0x20000000, // unused
132 PauseFrame = 0x00080000, // unused
133 MagicPacket = 0x00040000, // unused
134 WakeupFrame = 0x00020000, // unused
135 LinkChange = 0x00010000,
136 RxQEmpty = 0x00000080,
137 RxQInt = 0x00000040,
138 TxQ1Empty = 0x00000020, // unused
139 TxQ1Int = 0x00000010,
140 TxQ0Empty = 0x00000008, // unused
141 TxQ0Int = 0x00000004,
142 RxHalt = 0x00000002,
143 TxHalt = 0x00000001,
144
145 /* {Rx/Tx}CmdBits */
146 CmdReset = 0x10,
147 CmdRxEnb = 0x08, // unused
148 CmdTxEnb = 0x01,
149 RxBufEmpty = 0x01, // unused
150
151 /* Cfg9346Bits */
152 Cfg9346_Lock = 0x00, // unused
153 Cfg9346_Unlock = 0xc0, // unused
154
155 /* RxMacControl */
156 AcceptErr = 0x20, // unused
157 AcceptRunt = 0x10, // unused
158 AcceptBroadcast = 0x0800,
159 AcceptMulticast = 0x0400,
160 AcceptMyPhys = 0x0200,
161 AcceptAllPhys = 0x0100,
162
163 /* RxConfigBits */
164 RxCfgFIFOShift = 13,
165 RxCfgDMAShift = 8, // 0x1a in RxControl ?
166
167 /* TxConfigBits */
168 TxInterFrameGapShift = 24,
169 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
170
171 LinkStatus = 0x02, // unused
172 FullDup = 0x01, // unused
173
174 /* TBICSRBit */
175 TBILinkOK = 0x02000000, // unused
176 };
177
178 struct TxDesc {
179 __le32 PSize;
180 __le32 status;
181 __le32 addr;
182 __le32 size;
183 };
184
185 struct RxDesc {
186 __le32 PSize;
187 __le32 status;
188 __le32 addr;
189 __le32 size;
190 };
191
192 enum _DescStatusBit {
193 /* _Desc.status */
194 OWNbit = 0x80000000, // RXOWN/TXOWN
195 INTbit = 0x40000000, // RXINT/TXINT
196 CRCbit = 0x00020000, // CRCOFF/CRCEN
197 PADbit = 0x00010000, // PREADD/PADEN
198 /* _Desc.size */
199 RingEnd = 0x80000000,
200 /* TxDesc.status */
201 LSEN = 0x08000000, // TSO ? -- FR
202 IPCS = 0x04000000,
203 TCPCS = 0x02000000,
204 UDPCS = 0x01000000,
205 BSTEN = 0x00800000,
206 EXTEN = 0x00400000,
207 DEFEN = 0x00200000,
208 BKFEN = 0x00100000,
209 CRSEN = 0x00080000,
210 COLEN = 0x00040000,
211 THOL3 = 0x30000000,
212 THOL2 = 0x20000000,
213 THOL1 = 0x10000000,
214 THOL0 = 0x00000000,
215 /* RxDesc.status */
216 IPON = 0x20000000,
217 TCPON = 0x10000000,
218 UDPON = 0x08000000,
219 Wakup = 0x00400000,
220 Magic = 0x00200000,
221 Pause = 0x00100000,
222 DEFbit = 0x00200000,
223 BCAST = 0x000c0000,
224 MCAST = 0x00080000,
225 UCAST = 0x00040000,
226 /* RxDesc.PSize */
227 TAGON = 0x80000000,
228 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
229 ABORT = 0x00800000,
230 SHORT = 0x00400000,
231 LIMIT = 0x00200000,
232 MIIER = 0x00100000,
233 OVRUN = 0x00080000,
234 NIBON = 0x00040000,
235 COLON = 0x00020000,
236 CRCOK = 0x00010000,
237 RxSizeMask = 0x0000ffff
238 /*
239 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
240 * provide two (unused with Linux) Tx queues. No publically
241 * available documentation alas.
242 */
243 };
244
245 enum sis190_eeprom_access_register_bits {
246 EECS = 0x00000001, // unused
247 EECLK = 0x00000002, // unused
248 EEDO = 0x00000008, // unused
249 EEDI = 0x00000004, // unused
250 EEREQ = 0x00000080,
251 EEROP = 0x00000200,
252 EEWOP = 0x00000100 // unused
253 };
254
255 /* EEPROM Addresses */
256 enum sis190_eeprom_address {
257 EEPROMSignature = 0x00,
258 EEPROMCLK = 0x01, // unused
259 EEPROMInfo = 0x02,
260 EEPROMMACAddr = 0x03
261 };
262
263 enum sis190_feature {
264 F_HAS_RGMII = 1,
265 F_PHY_88E1111 = 2,
266 F_PHY_BCM5461 = 4
267 };
268
269 struct sis190_private {
270 void __iomem *mmio_addr;
271 struct pci_dev *pci_dev;
272 struct net_device *dev;
273 spinlock_t lock;
274 u32 rx_buf_sz;
275 u32 cur_rx;
276 u32 cur_tx;
277 u32 dirty_rx;
278 u32 dirty_tx;
279 dma_addr_t rx_dma;
280 dma_addr_t tx_dma;
281 struct RxDesc *RxDescRing;
282 struct TxDesc *TxDescRing;
283 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
284 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
285 struct work_struct phy_task;
286 struct timer_list timer;
287 u32 msg_enable;
288 struct mii_if_info mii_if;
289 struct list_head first_phy;
290 u32 features;
291 };
292
293 struct sis190_phy {
294 struct list_head list;
295 int phy_id;
296 u16 id[2];
297 u16 status;
298 u8 type;
299 };
300
301 enum sis190_phy_type {
302 UNKNOWN = 0x00,
303 HOME = 0x01,
304 LAN = 0x02,
305 MIX = 0x03
306 };
307
308 static struct mii_chip_info {
309 const char *name;
310 u16 id[2];
311 unsigned int type;
312 u32 feature;
313 } mii_chip_table[] = {
314 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
315 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
316 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
317 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
318 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
319 { NULL, }
320 };
321
322 static const struct {
323 const char *name;
324 } sis_chip_info[] = {
325 { "SiS 190 PCI Fast Ethernet adapter" },
326 { "SiS 191 PCI Gigabit Ethernet adapter" },
327 };
328
329 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
330 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
331 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
332 { 0, },
333 };
334
335 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
336
337 static int rx_copybreak = 200;
338
339 static struct {
340 u32 msg_enable;
341 } debug = { -1 };
342
343 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
344 module_param(rx_copybreak, int, 0);
345 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
346 module_param_named(debug, debug.msg_enable, int, 0);
347 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
348 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
349 MODULE_VERSION(DRV_VERSION);
350 MODULE_LICENSE("GPL");
351
352 static const u32 sis190_intr_mask =
353 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
354
355 /*
356 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
357 * The chips use a 64 element hash table based on the Ethernet CRC.
358 */
359 static const int multicast_filter_limit = 32;
360
361 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
362 {
363 unsigned int i;
364
365 SIS_W32(GMIIControl, ctl);
366
367 msleep(1);
368
369 for (i = 0; i < 100; i++) {
370 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
371 break;
372 msleep(1);
373 }
374
375 if (i > 99)
376 printk(KERN_ERR PFX "PHY command failed !\n");
377 }
378
379 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
380 {
381 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
382 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
383 (((u32) val) << EhnMIIdataShift));
384 }
385
386 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
387 {
388 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
389 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
390
391 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
392 }
393
394 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
395 {
396 struct sis190_private *tp = netdev_priv(dev);
397
398 mdio_write(tp->mmio_addr, phy_id, reg, val);
399 }
400
401 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
402 {
403 struct sis190_private *tp = netdev_priv(dev);
404
405 return mdio_read(tp->mmio_addr, phy_id, reg);
406 }
407
408 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
409 {
410 mdio_read(ioaddr, phy_id, reg);
411 return mdio_read(ioaddr, phy_id, reg);
412 }
413
414 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
415 {
416 u16 data = 0xffff;
417 unsigned int i;
418
419 if (!(SIS_R32(ROMControl) & 0x0002))
420 return 0;
421
422 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
423
424 for (i = 0; i < 200; i++) {
425 if (!(SIS_R32(ROMInterface) & EEREQ)) {
426 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
427 break;
428 }
429 msleep(1);
430 }
431
432 return data;
433 }
434
435 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
436 {
437 SIS_W32(IntrMask, 0x00);
438 SIS_W32(IntrStatus, 0xffffffff);
439 SIS_PCI_COMMIT();
440 }
441
442 static void sis190_asic_down(void __iomem *ioaddr)
443 {
444 /* Stop the chip's Tx and Rx DMA processes. */
445
446 SIS_W32(TxControl, 0x1a00);
447 SIS_W32(RxControl, 0x1a00);
448
449 sis190_irq_mask_and_ack(ioaddr);
450 }
451
452 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
453 {
454 desc->size |= cpu_to_le32(RingEnd);
455 }
456
457 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
458 {
459 u32 eor = le32_to_cpu(desc->size) & RingEnd;
460
461 desc->PSize = 0x0;
462 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
463 wmb();
464 desc->status = cpu_to_le32(OWNbit | INTbit);
465 }
466
467 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
468 u32 rx_buf_sz)
469 {
470 desc->addr = cpu_to_le32(mapping);
471 sis190_give_to_asic(desc, rx_buf_sz);
472 }
473
474 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
475 {
476 desc->PSize = 0x0;
477 desc->addr = cpu_to_le32(0xdeadbeef);
478 desc->size &= cpu_to_le32(RingEnd);
479 wmb();
480 desc->status = 0x0;
481 }
482
483 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
484 struct RxDesc *desc, u32 rx_buf_sz)
485 {
486 struct sk_buff *skb;
487 dma_addr_t mapping;
488 int ret = 0;
489
490 skb = dev_alloc_skb(rx_buf_sz);
491 if (!skb)
492 goto err_out;
493
494 *sk_buff = skb;
495
496 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
497 PCI_DMA_FROMDEVICE);
498
499 sis190_map_to_asic(desc, mapping, rx_buf_sz);
500 out:
501 return ret;
502
503 err_out:
504 ret = -ENOMEM;
505 sis190_make_unusable_by_asic(desc);
506 goto out;
507 }
508
509 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
510 u32 start, u32 end)
511 {
512 u32 cur;
513
514 for (cur = start; cur < end; cur++) {
515 int ret, i = cur % NUM_RX_DESC;
516
517 if (tp->Rx_skbuff[i])
518 continue;
519
520 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
521 tp->RxDescRing + i, tp->rx_buf_sz);
522 if (ret < 0)
523 break;
524 }
525 return cur - start;
526 }
527
528 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
529 struct RxDesc *desc, int rx_buf_sz)
530 {
531 int ret = -1;
532
533 if (pkt_size < rx_copybreak) {
534 struct sk_buff *skb;
535
536 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
537 if (skb) {
538 skb_reserve(skb, NET_IP_ALIGN);
539 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
540 *sk_buff = skb;
541 sis190_give_to_asic(desc, rx_buf_sz);
542 ret = 0;
543 }
544 }
545 return ret;
546 }
547
548 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
549 {
550 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
551
552 if ((status & CRCOK) && !(status & ErrMask))
553 return 0;
554
555 if (!(status & CRCOK))
556 stats->rx_crc_errors++;
557 else if (status & OVRUN)
558 stats->rx_over_errors++;
559 else if (status & (SHORT | LIMIT))
560 stats->rx_length_errors++;
561 else if (status & (MIIER | NIBON | COLON))
562 stats->rx_frame_errors++;
563
564 stats->rx_errors++;
565 return -1;
566 }
567
568 static int sis190_rx_interrupt(struct net_device *dev,
569 struct sis190_private *tp, void __iomem *ioaddr)
570 {
571 struct net_device_stats *stats = &dev->stats;
572 u32 rx_left, cur_rx = tp->cur_rx;
573 u32 delta, count;
574
575 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
576 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
577
578 for (; rx_left > 0; rx_left--, cur_rx++) {
579 unsigned int entry = cur_rx % NUM_RX_DESC;
580 struct RxDesc *desc = tp->RxDescRing + entry;
581 u32 status;
582
583 if (le32_to_cpu(desc->status) & OWNbit)
584 break;
585
586 status = le32_to_cpu(desc->PSize);
587
588 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
589 // status);
590
591 if (sis190_rx_pkt_err(status, stats) < 0)
592 sis190_give_to_asic(desc, tp->rx_buf_sz);
593 else {
594 struct sk_buff *skb = tp->Rx_skbuff[entry];
595 int pkt_size = (status & RxSizeMask) - 4;
596 void (*pci_action)(struct pci_dev *, dma_addr_t,
597 size_t, int) = pci_dma_sync_single_for_device;
598
599 if (unlikely(pkt_size > tp->rx_buf_sz)) {
600 net_intr(tp, KERN_INFO
601 "%s: (frag) status = %08x.\n",
602 dev->name, status);
603 stats->rx_dropped++;
604 stats->rx_length_errors++;
605 sis190_give_to_asic(desc, tp->rx_buf_sz);
606 continue;
607 }
608
609 pci_dma_sync_single_for_cpu(tp->pci_dev,
610 le32_to_cpu(desc->addr), tp->rx_buf_sz,
611 PCI_DMA_FROMDEVICE);
612
613 if (sis190_try_rx_copy(&skb, pkt_size, desc,
614 tp->rx_buf_sz)) {
615 pci_action = pci_unmap_single;
616 tp->Rx_skbuff[entry] = NULL;
617 sis190_make_unusable_by_asic(desc);
618 }
619
620 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
621 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
622
623 skb_put(skb, pkt_size);
624 skb->protocol = eth_type_trans(skb, dev);
625
626 sis190_rx_skb(skb);
627
628 dev->last_rx = jiffies;
629 stats->rx_packets++;
630 stats->rx_bytes += pkt_size;
631 if ((status & BCAST) == MCAST)
632 stats->multicast++;
633 }
634 }
635 count = cur_rx - tp->cur_rx;
636 tp->cur_rx = cur_rx;
637
638 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
639 if (!delta && count && netif_msg_intr(tp))
640 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
641 tp->dirty_rx += delta;
642
643 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
644 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
645
646 return count;
647 }
648
649 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
650 struct TxDesc *desc)
651 {
652 unsigned int len;
653
654 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
655
656 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
657
658 memset(desc, 0x00, sizeof(*desc));
659 }
660
661 static void sis190_tx_interrupt(struct net_device *dev,
662 struct sis190_private *tp, void __iomem *ioaddr)
663 {
664 u32 pending, dirty_tx = tp->dirty_tx;
665 /*
666 * It would not be needed if queueing was allowed to be enabled
667 * again too early (hint: think preempt and unclocked smp systems).
668 */
669 unsigned int queue_stopped;
670
671 smp_rmb();
672 pending = tp->cur_tx - dirty_tx;
673 queue_stopped = (pending == NUM_TX_DESC);
674
675 for (; pending; pending--, dirty_tx++) {
676 unsigned int entry = dirty_tx % NUM_TX_DESC;
677 struct TxDesc *txd = tp->TxDescRing + entry;
678 struct sk_buff *skb;
679
680 if (le32_to_cpu(txd->status) & OWNbit)
681 break;
682
683 skb = tp->Tx_skbuff[entry];
684
685 dev->stats.tx_packets++;
686 dev->stats.tx_bytes += skb->len;
687
688 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
689 tp->Tx_skbuff[entry] = NULL;
690 dev_kfree_skb_irq(skb);
691 }
692
693 if (tp->dirty_tx != dirty_tx) {
694 tp->dirty_tx = dirty_tx;
695 smp_wmb();
696 if (queue_stopped)
697 netif_wake_queue(dev);
698 }
699 }
700
701 /*
702 * The interrupt handler does all of the Rx thread work and cleans up after
703 * the Tx thread.
704 */
705 static irqreturn_t sis190_interrupt(int irq, void *__dev)
706 {
707 struct net_device *dev = __dev;
708 struct sis190_private *tp = netdev_priv(dev);
709 void __iomem *ioaddr = tp->mmio_addr;
710 unsigned int handled = 0;
711 u32 status;
712
713 status = SIS_R32(IntrStatus);
714
715 if ((status == 0xffffffff) || !status)
716 goto out;
717
718 handled = 1;
719
720 if (unlikely(!netif_running(dev))) {
721 sis190_asic_down(ioaddr);
722 goto out;
723 }
724
725 SIS_W32(IntrStatus, status);
726
727 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
728
729 if (status & LinkChange) {
730 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
731 schedule_work(&tp->phy_task);
732 }
733
734 if (status & RxQInt)
735 sis190_rx_interrupt(dev, tp, ioaddr);
736
737 if (status & TxQ0Int)
738 sis190_tx_interrupt(dev, tp, ioaddr);
739 out:
740 return IRQ_RETVAL(handled);
741 }
742
743 #ifdef CONFIG_NET_POLL_CONTROLLER
744 static void sis190_netpoll(struct net_device *dev)
745 {
746 struct sis190_private *tp = netdev_priv(dev);
747 struct pci_dev *pdev = tp->pci_dev;
748
749 disable_irq(pdev->irq);
750 sis190_interrupt(pdev->irq, dev);
751 enable_irq(pdev->irq);
752 }
753 #endif
754
755 static void sis190_free_rx_skb(struct sis190_private *tp,
756 struct sk_buff **sk_buff, struct RxDesc *desc)
757 {
758 struct pci_dev *pdev = tp->pci_dev;
759
760 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
761 PCI_DMA_FROMDEVICE);
762 dev_kfree_skb(*sk_buff);
763 *sk_buff = NULL;
764 sis190_make_unusable_by_asic(desc);
765 }
766
767 static void sis190_rx_clear(struct sis190_private *tp)
768 {
769 unsigned int i;
770
771 for (i = 0; i < NUM_RX_DESC; i++) {
772 if (!tp->Rx_skbuff[i])
773 continue;
774 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
775 }
776 }
777
778 static void sis190_init_ring_indexes(struct sis190_private *tp)
779 {
780 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
781 }
782
783 static int sis190_init_ring(struct net_device *dev)
784 {
785 struct sis190_private *tp = netdev_priv(dev);
786
787 sis190_init_ring_indexes(tp);
788
789 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
790 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
791
792 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
793 goto err_rx_clear;
794
795 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
796
797 return 0;
798
799 err_rx_clear:
800 sis190_rx_clear(tp);
801 return -ENOMEM;
802 }
803
804 static void sis190_set_rx_mode(struct net_device *dev)
805 {
806 struct sis190_private *tp = netdev_priv(dev);
807 void __iomem *ioaddr = tp->mmio_addr;
808 unsigned long flags;
809 u32 mc_filter[2]; /* Multicast hash filter */
810 u16 rx_mode;
811
812 if (dev->flags & IFF_PROMISC) {
813 rx_mode =
814 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
815 AcceptAllPhys;
816 mc_filter[1] = mc_filter[0] = 0xffffffff;
817 } else if ((dev->mc_count > multicast_filter_limit) ||
818 (dev->flags & IFF_ALLMULTI)) {
819 /* Too many to filter perfectly -- accept all multicasts. */
820 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
821 mc_filter[1] = mc_filter[0] = 0xffffffff;
822 } else {
823 struct dev_mc_list *mclist;
824 unsigned int i;
825
826 rx_mode = AcceptBroadcast | AcceptMyPhys;
827 mc_filter[1] = mc_filter[0] = 0;
828 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
829 i++, mclist = mclist->next) {
830 int bit_nr =
831 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
832 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
833 rx_mode |= AcceptMulticast;
834 }
835 }
836
837 spin_lock_irqsave(&tp->lock, flags);
838
839 SIS_W16(RxMacControl, rx_mode | 0x2);
840 SIS_W32(RxHashTable, mc_filter[0]);
841 SIS_W32(RxHashTable + 4, mc_filter[1]);
842
843 spin_unlock_irqrestore(&tp->lock, flags);
844 }
845
846 static void sis190_soft_reset(void __iomem *ioaddr)
847 {
848 SIS_W32(IntrControl, 0x8000);
849 SIS_PCI_COMMIT();
850 msleep(1);
851 SIS_W32(IntrControl, 0x0);
852 sis190_asic_down(ioaddr);
853 msleep(1);
854 }
855
856 static void sis190_hw_start(struct net_device *dev)
857 {
858 struct sis190_private *tp = netdev_priv(dev);
859 void __iomem *ioaddr = tp->mmio_addr;
860
861 sis190_soft_reset(ioaddr);
862
863 SIS_W32(TxDescStartAddr, tp->tx_dma);
864 SIS_W32(RxDescStartAddr, tp->rx_dma);
865
866 SIS_W32(IntrStatus, 0xffffffff);
867 SIS_W32(IntrMask, 0x0);
868 SIS_W32(GMIIControl, 0x0);
869 SIS_W32(TxMacControl, 0x60);
870 SIS_W16(RxMacControl, 0x02);
871 SIS_W32(RxHashTable, 0x0);
872 SIS_W32(0x6c, 0x0);
873 SIS_W32(RxWolCtrl, 0x0);
874 SIS_W32(RxWolData, 0x0);
875
876 SIS_PCI_COMMIT();
877
878 sis190_set_rx_mode(dev);
879
880 /* Enable all known interrupts by setting the interrupt mask. */
881 SIS_W32(IntrMask, sis190_intr_mask);
882
883 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
884 SIS_W32(RxControl, 0x1a1d);
885
886 netif_start_queue(dev);
887 }
888
889 static void sis190_phy_task(struct work_struct *work)
890 {
891 struct sis190_private *tp =
892 container_of(work, struct sis190_private, phy_task);
893 struct net_device *dev = tp->dev;
894 void __iomem *ioaddr = tp->mmio_addr;
895 int phy_id = tp->mii_if.phy_id;
896 u16 val;
897
898 rtnl_lock();
899
900 if (!netif_running(dev))
901 goto out_unlock;
902
903 val = mdio_read(ioaddr, phy_id, MII_BMCR);
904 if (val & BMCR_RESET) {
905 // FIXME: needlessly high ? -- FR 02/07/2005
906 mod_timer(&tp->timer, jiffies + HZ/10);
907 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
908 BMSR_ANEGCOMPLETE)) {
909 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
910 dev->name);
911 netif_carrier_off(dev);
912 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
913 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
914 } else {
915 /* Rejoice ! */
916 struct {
917 int val;
918 u32 ctl;
919 const char *msg;
920 } reg31[] = {
921 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
922 "1000 Mbps Full Duplex" },
923 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
924 "1000 Mbps Half Duplex" },
925 { LPA_100FULL, 0x04000800 | 0x00001000,
926 "100 Mbps Full Duplex" },
927 { LPA_100HALF, 0x04000800,
928 "100 Mbps Half Duplex" },
929 { LPA_10FULL, 0x04000400 | 0x00001000,
930 "10 Mbps Full Duplex" },
931 { LPA_10HALF, 0x04000400,
932 "10 Mbps Half Duplex" },
933 { 0, 0x04000400, "unknown" }
934 }, *p;
935 u16 adv;
936
937 val = mdio_read(ioaddr, phy_id, 0x1f);
938 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
939
940 val = mdio_read(ioaddr, phy_id, MII_LPA);
941 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
942 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
943 dev->name, val, adv);
944
945 val &= adv;
946
947 for (p = reg31; p->val; p++) {
948 if ((val & p->val) == p->val)
949 break;
950 }
951
952 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
953
954 if ((tp->features & F_HAS_RGMII) &&
955 (tp->features & F_PHY_BCM5461)) {
956 // Set Tx Delay in RGMII mode.
957 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
958 udelay(200);
959 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
960 p->ctl |= 0x03000000;
961 }
962
963 SIS_W32(StationControl, p->ctl);
964
965 if (tp->features & F_HAS_RGMII) {
966 SIS_W32(RGDelay, 0x0441);
967 SIS_W32(RGDelay, 0x0440);
968 }
969
970 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
971 p->msg);
972 netif_carrier_on(dev);
973 }
974
975 out_unlock:
976 rtnl_unlock();
977 }
978
979 static void sis190_phy_timer(unsigned long __opaque)
980 {
981 struct net_device *dev = (struct net_device *)__opaque;
982 struct sis190_private *tp = netdev_priv(dev);
983
984 if (likely(netif_running(dev)))
985 schedule_work(&tp->phy_task);
986 }
987
988 static inline void sis190_delete_timer(struct net_device *dev)
989 {
990 struct sis190_private *tp = netdev_priv(dev);
991
992 del_timer_sync(&tp->timer);
993 }
994
995 static inline void sis190_request_timer(struct net_device *dev)
996 {
997 struct sis190_private *tp = netdev_priv(dev);
998 struct timer_list *timer = &tp->timer;
999
1000 init_timer(timer);
1001 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1002 timer->data = (unsigned long)dev;
1003 timer->function = sis190_phy_timer;
1004 add_timer(timer);
1005 }
1006
1007 static void sis190_set_rxbufsize(struct sis190_private *tp,
1008 struct net_device *dev)
1009 {
1010 unsigned int mtu = dev->mtu;
1011
1012 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1013 /* RxDesc->size has a licence to kill the lower bits */
1014 if (tp->rx_buf_sz & 0x07) {
1015 tp->rx_buf_sz += 8;
1016 tp->rx_buf_sz &= RX_BUF_MASK;
1017 }
1018 }
1019
1020 static int sis190_open(struct net_device *dev)
1021 {
1022 struct sis190_private *tp = netdev_priv(dev);
1023 struct pci_dev *pdev = tp->pci_dev;
1024 int rc = -ENOMEM;
1025
1026 sis190_set_rxbufsize(tp, dev);
1027
1028 /*
1029 * Rx and Tx descriptors need 256 bytes alignment.
1030 * pci_alloc_consistent() guarantees a stronger alignment.
1031 */
1032 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1033 if (!tp->TxDescRing)
1034 goto out;
1035
1036 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1037 if (!tp->RxDescRing)
1038 goto err_free_tx_0;
1039
1040 rc = sis190_init_ring(dev);
1041 if (rc < 0)
1042 goto err_free_rx_1;
1043
1044 sis190_request_timer(dev);
1045
1046 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1047 if (rc < 0)
1048 goto err_release_timer_2;
1049
1050 sis190_hw_start(dev);
1051 out:
1052 return rc;
1053
1054 err_release_timer_2:
1055 sis190_delete_timer(dev);
1056 sis190_rx_clear(tp);
1057 err_free_rx_1:
1058 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1059 tp->rx_dma);
1060 err_free_tx_0:
1061 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1062 tp->tx_dma);
1063 goto out;
1064 }
1065
1066 static void sis190_tx_clear(struct sis190_private *tp)
1067 {
1068 unsigned int i;
1069
1070 for (i = 0; i < NUM_TX_DESC; i++) {
1071 struct sk_buff *skb = tp->Tx_skbuff[i];
1072
1073 if (!skb)
1074 continue;
1075
1076 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1077 tp->Tx_skbuff[i] = NULL;
1078 dev_kfree_skb(skb);
1079
1080 tp->dev->stats.tx_dropped++;
1081 }
1082 tp->cur_tx = tp->dirty_tx = 0;
1083 }
1084
1085 static void sis190_down(struct net_device *dev)
1086 {
1087 struct sis190_private *tp = netdev_priv(dev);
1088 void __iomem *ioaddr = tp->mmio_addr;
1089 unsigned int poll_locked = 0;
1090
1091 sis190_delete_timer(dev);
1092
1093 netif_stop_queue(dev);
1094
1095 do {
1096 spin_lock_irq(&tp->lock);
1097
1098 sis190_asic_down(ioaddr);
1099
1100 spin_unlock_irq(&tp->lock);
1101
1102 synchronize_irq(dev->irq);
1103
1104 if (!poll_locked)
1105 poll_locked++;
1106
1107 synchronize_sched();
1108
1109 } while (SIS_R32(IntrMask));
1110
1111 sis190_tx_clear(tp);
1112 sis190_rx_clear(tp);
1113 }
1114
1115 static int sis190_close(struct net_device *dev)
1116 {
1117 struct sis190_private *tp = netdev_priv(dev);
1118 struct pci_dev *pdev = tp->pci_dev;
1119
1120 sis190_down(dev);
1121
1122 free_irq(dev->irq, dev);
1123
1124 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1125 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1126
1127 tp->TxDescRing = NULL;
1128 tp->RxDescRing = NULL;
1129
1130 return 0;
1131 }
1132
1133 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1134 {
1135 struct sis190_private *tp = netdev_priv(dev);
1136 void __iomem *ioaddr = tp->mmio_addr;
1137 u32 len, entry, dirty_tx;
1138 struct TxDesc *desc;
1139 dma_addr_t mapping;
1140
1141 if (unlikely(skb->len < ETH_ZLEN)) {
1142 if (skb_padto(skb, ETH_ZLEN)) {
1143 dev->stats.tx_dropped++;
1144 goto out;
1145 }
1146 len = ETH_ZLEN;
1147 } else {
1148 len = skb->len;
1149 }
1150
1151 entry = tp->cur_tx % NUM_TX_DESC;
1152 desc = tp->TxDescRing + entry;
1153
1154 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1155 netif_stop_queue(dev);
1156 net_tx_err(tp, KERN_ERR PFX
1157 "%s: BUG! Tx Ring full when queue awake!\n",
1158 dev->name);
1159 return NETDEV_TX_BUSY;
1160 }
1161
1162 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1163
1164 tp->Tx_skbuff[entry] = skb;
1165
1166 desc->PSize = cpu_to_le32(len);
1167 desc->addr = cpu_to_le32(mapping);
1168
1169 desc->size = cpu_to_le32(len);
1170 if (entry == (NUM_TX_DESC - 1))
1171 desc->size |= cpu_to_le32(RingEnd);
1172
1173 wmb();
1174
1175 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1176
1177 tp->cur_tx++;
1178
1179 smp_wmb();
1180
1181 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1182
1183 dev->trans_start = jiffies;
1184
1185 dirty_tx = tp->dirty_tx;
1186 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1187 netif_stop_queue(dev);
1188 smp_rmb();
1189 if (dirty_tx != tp->dirty_tx)
1190 netif_wake_queue(dev);
1191 }
1192 out:
1193 return NETDEV_TX_OK;
1194 }
1195
1196 static void sis190_free_phy(struct list_head *first_phy)
1197 {
1198 struct sis190_phy *cur, *next;
1199
1200 list_for_each_entry_safe(cur, next, first_phy, list) {
1201 kfree(cur);
1202 }
1203 }
1204
1205 /**
1206 * sis190_default_phy - Select default PHY for sis190 mac.
1207 * @dev: the net device to probe for
1208 *
1209 * Select first detected PHY with link as default.
1210 * If no one is link on, select PHY whose types is HOME as default.
1211 * If HOME doesn't exist, select LAN.
1212 */
1213 static u16 sis190_default_phy(struct net_device *dev)
1214 {
1215 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1216 struct sis190_private *tp = netdev_priv(dev);
1217 struct mii_if_info *mii_if = &tp->mii_if;
1218 void __iomem *ioaddr = tp->mmio_addr;
1219 u16 status;
1220
1221 phy_home = phy_default = phy_lan = NULL;
1222
1223 list_for_each_entry(phy, &tp->first_phy, list) {
1224 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1225
1226 // Link ON & Not select default PHY & not ghost PHY.
1227 if ((status & BMSR_LSTATUS) &&
1228 !phy_default &&
1229 (phy->type != UNKNOWN)) {
1230 phy_default = phy;
1231 } else {
1232 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1233 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1234 status | BMCR_ANENABLE | BMCR_ISOLATE);
1235 if (phy->type == HOME)
1236 phy_home = phy;
1237 else if (phy->type == LAN)
1238 phy_lan = phy;
1239 }
1240 }
1241
1242 if (!phy_default) {
1243 if (phy_home)
1244 phy_default = phy_home;
1245 else if (phy_lan)
1246 phy_default = phy_lan;
1247 else
1248 phy_default = list_entry(&tp->first_phy,
1249 struct sis190_phy, list);
1250 }
1251
1252 if (mii_if->phy_id != phy_default->phy_id) {
1253 mii_if->phy_id = phy_default->phy_id;
1254 net_probe(tp, KERN_INFO
1255 "%s: Using transceiver at address %d as default.\n",
1256 pci_name(tp->pci_dev), mii_if->phy_id);
1257 }
1258
1259 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1260 status &= (~BMCR_ISOLATE);
1261
1262 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1263 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1264
1265 return status;
1266 }
1267
1268 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1269 struct sis190_phy *phy, unsigned int phy_id,
1270 u16 mii_status)
1271 {
1272 void __iomem *ioaddr = tp->mmio_addr;
1273 struct mii_chip_info *p;
1274
1275 INIT_LIST_HEAD(&phy->list);
1276 phy->status = mii_status;
1277 phy->phy_id = phy_id;
1278
1279 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1280 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1281
1282 for (p = mii_chip_table; p->type; p++) {
1283 if ((p->id[0] == phy->id[0]) &&
1284 (p->id[1] == (phy->id[1] & 0xfff0))) {
1285 break;
1286 }
1287 }
1288
1289 if (p->id[1]) {
1290 phy->type = (p->type == MIX) ?
1291 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1292 LAN : HOME) : p->type;
1293 tp->features |= p->feature;
1294 } else
1295 phy->type = UNKNOWN;
1296
1297 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1298 pci_name(tp->pci_dev),
1299 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1300 }
1301
1302 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1303 {
1304 if (tp->features & F_PHY_88E1111) {
1305 void __iomem *ioaddr = tp->mmio_addr;
1306 int phy_id = tp->mii_if.phy_id;
1307 u16 reg[2][2] = {
1308 { 0x808b, 0x0ce1 },
1309 { 0x808f, 0x0c60 }
1310 }, *p;
1311
1312 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1313
1314 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1315 udelay(200);
1316 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1317 udelay(200);
1318 }
1319 }
1320
1321 /**
1322 * sis190_mii_probe - Probe MII PHY for sis190
1323 * @dev: the net device to probe for
1324 *
1325 * Search for total of 32 possible mii phy addresses.
1326 * Identify and set current phy if found one,
1327 * return error if it failed to found.
1328 */
1329 static int __devinit sis190_mii_probe(struct net_device *dev)
1330 {
1331 struct sis190_private *tp = netdev_priv(dev);
1332 struct mii_if_info *mii_if = &tp->mii_if;
1333 void __iomem *ioaddr = tp->mmio_addr;
1334 int phy_id;
1335 int rc = 0;
1336
1337 INIT_LIST_HEAD(&tp->first_phy);
1338
1339 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1340 struct sis190_phy *phy;
1341 u16 status;
1342
1343 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1344
1345 // Try next mii if the current one is not accessible.
1346 if (status == 0xffff || status == 0x0000)
1347 continue;
1348
1349 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1350 if (!phy) {
1351 sis190_free_phy(&tp->first_phy);
1352 rc = -ENOMEM;
1353 goto out;
1354 }
1355
1356 sis190_init_phy(dev, tp, phy, phy_id, status);
1357
1358 list_add(&tp->first_phy, &phy->list);
1359 }
1360
1361 if (list_empty(&tp->first_phy)) {
1362 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1363 pci_name(tp->pci_dev));
1364 rc = -EIO;
1365 goto out;
1366 }
1367
1368 /* Select default PHY for mac */
1369 sis190_default_phy(dev);
1370
1371 sis190_mii_probe_88e1111_fixup(tp);
1372
1373 mii_if->dev = dev;
1374 mii_if->mdio_read = __mdio_read;
1375 mii_if->mdio_write = __mdio_write;
1376 mii_if->phy_id_mask = PHY_ID_ANY;
1377 mii_if->reg_num_mask = MII_REG_ANY;
1378 out:
1379 return rc;
1380 }
1381
1382 static void sis190_mii_remove(struct net_device *dev)
1383 {
1384 struct sis190_private *tp = netdev_priv(dev);
1385
1386 sis190_free_phy(&tp->first_phy);
1387 }
1388
1389 static void sis190_release_board(struct pci_dev *pdev)
1390 {
1391 struct net_device *dev = pci_get_drvdata(pdev);
1392 struct sis190_private *tp = netdev_priv(dev);
1393
1394 iounmap(tp->mmio_addr);
1395 pci_release_regions(pdev);
1396 pci_disable_device(pdev);
1397 free_netdev(dev);
1398 }
1399
1400 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1401 {
1402 struct sis190_private *tp;
1403 struct net_device *dev;
1404 void __iomem *ioaddr;
1405 int rc;
1406
1407 dev = alloc_etherdev(sizeof(*tp));
1408 if (!dev) {
1409 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1410 rc = -ENOMEM;
1411 goto err_out_0;
1412 }
1413
1414 SET_NETDEV_DEV(dev, &pdev->dev);
1415
1416 tp = netdev_priv(dev);
1417 tp->dev = dev;
1418 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1419
1420 rc = pci_enable_device(pdev);
1421 if (rc < 0) {
1422 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1423 goto err_free_dev_1;
1424 }
1425
1426 rc = -ENODEV;
1427
1428 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1429 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1430 pci_name(pdev));
1431 goto err_pci_disable_2;
1432 }
1433 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1434 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1435 pci_name(pdev));
1436 goto err_pci_disable_2;
1437 }
1438
1439 rc = pci_request_regions(pdev, DRV_NAME);
1440 if (rc < 0) {
1441 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1442 pci_name(pdev));
1443 goto err_pci_disable_2;
1444 }
1445
1446 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1447 if (rc < 0) {
1448 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1449 pci_name(pdev));
1450 goto err_free_res_3;
1451 }
1452
1453 pci_set_master(pdev);
1454
1455 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1456 if (!ioaddr) {
1457 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1458 pci_name(pdev));
1459 rc = -EIO;
1460 goto err_free_res_3;
1461 }
1462
1463 tp->pci_dev = pdev;
1464 tp->mmio_addr = ioaddr;
1465
1466 sis190_irq_mask_and_ack(ioaddr);
1467
1468 sis190_soft_reset(ioaddr);
1469 out:
1470 return dev;
1471
1472 err_free_res_3:
1473 pci_release_regions(pdev);
1474 err_pci_disable_2:
1475 pci_disable_device(pdev);
1476 err_free_dev_1:
1477 free_netdev(dev);
1478 err_out_0:
1479 dev = ERR_PTR(rc);
1480 goto out;
1481 }
1482
1483 static void sis190_tx_timeout(struct net_device *dev)
1484 {
1485 struct sis190_private *tp = netdev_priv(dev);
1486 void __iomem *ioaddr = tp->mmio_addr;
1487 u8 tmp8;
1488
1489 /* Disable Tx, if not already */
1490 tmp8 = SIS_R8(TxControl);
1491 if (tmp8 & CmdTxEnb)
1492 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1493
1494
1495 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1496 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1497
1498 /* Disable interrupts by clearing the interrupt mask. */
1499 SIS_W32(IntrMask, 0x0000);
1500
1501 /* Stop a shared interrupt from scavenging while we are. */
1502 spin_lock_irq(&tp->lock);
1503 sis190_tx_clear(tp);
1504 spin_unlock_irq(&tp->lock);
1505
1506 /* ...and finally, reset everything. */
1507 sis190_hw_start(dev);
1508
1509 netif_wake_queue(dev);
1510 }
1511
1512 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1513 {
1514 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1515 }
1516
1517 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1518 struct net_device *dev)
1519 {
1520 struct sis190_private *tp = netdev_priv(dev);
1521 void __iomem *ioaddr = tp->mmio_addr;
1522 u16 sig;
1523 int i;
1524
1525 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1526 pci_name(pdev));
1527
1528 /* Check to see if there is a sane EEPROM */
1529 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1530
1531 if ((sig == 0xffff) || (sig == 0x0000)) {
1532 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1533 pci_name(pdev), sig);
1534 return -EIO;
1535 }
1536
1537 /* Get MAC address from EEPROM */
1538 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1539 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1540
1541 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1542 }
1543
1544 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1545
1546 return 0;
1547 }
1548
1549 /**
1550 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1551 * @pdev: PCI device
1552 * @dev: network device to get address for
1553 *
1554 * SiS96x model, use APC CMOS RAM to store MAC address.
1555 * APC CMOS RAM is accessed through ISA bridge.
1556 * MAC address is read into @net_dev->dev_addr.
1557 */
1558 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1559 struct net_device *dev)
1560 {
1561 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1562 struct sis190_private *tp = netdev_priv(dev);
1563 struct pci_dev *isa_bridge;
1564 u8 reg, tmp8;
1565 unsigned int i;
1566
1567 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1568 pci_name(pdev));
1569
1570 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1571 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1572 if (isa_bridge)
1573 break;
1574 }
1575
1576 if (!isa_bridge) {
1577 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1578 pci_name(pdev));
1579 return -EIO;
1580 }
1581
1582 /* Enable port 78h & 79h to access APC Registers. */
1583 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1584 reg = (tmp8 & ~0x02);
1585 pci_write_config_byte(isa_bridge, 0x48, reg);
1586 udelay(50);
1587 pci_read_config_byte(isa_bridge, 0x48, &reg);
1588
1589 for (i = 0; i < MAC_ADDR_LEN; i++) {
1590 outb(0x9 + i, 0x78);
1591 dev->dev_addr[i] = inb(0x79);
1592 }
1593
1594 outb(0x12, 0x78);
1595 reg = inb(0x79);
1596
1597 sis190_set_rgmii(tp, reg);
1598
1599 /* Restore the value to ISA Bridge */
1600 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1601 pci_dev_put(isa_bridge);
1602
1603 return 0;
1604 }
1605
1606 /**
1607 * sis190_init_rxfilter - Initialize the Rx filter
1608 * @dev: network device to initialize
1609 *
1610 * Set receive filter address to our MAC address
1611 * and enable packet filtering.
1612 */
1613 static inline void sis190_init_rxfilter(struct net_device *dev)
1614 {
1615 struct sis190_private *tp = netdev_priv(dev);
1616 void __iomem *ioaddr = tp->mmio_addr;
1617 u16 ctl;
1618 int i;
1619
1620 ctl = SIS_R16(RxMacControl);
1621 /*
1622 * Disable packet filtering before setting filter.
1623 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1624 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1625 */
1626 SIS_W16(RxMacControl, ctl & ~0x0f00);
1627
1628 for (i = 0; i < MAC_ADDR_LEN; i++)
1629 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1630
1631 SIS_W16(RxMacControl, ctl);
1632 SIS_PCI_COMMIT();
1633 }
1634
1635 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1636 {
1637 u8 from;
1638
1639 pci_read_config_byte(pdev, 0x73, &from);
1640
1641 return (from & 0x00000001) ?
1642 sis190_get_mac_addr_from_apc(pdev, dev) :
1643 sis190_get_mac_addr_from_eeprom(pdev, dev);
1644 }
1645
1646 static void sis190_set_speed_auto(struct net_device *dev)
1647 {
1648 struct sis190_private *tp = netdev_priv(dev);
1649 void __iomem *ioaddr = tp->mmio_addr;
1650 int phy_id = tp->mii_if.phy_id;
1651 int val;
1652
1653 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1654
1655 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1656
1657 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1658 // unchanged.
1659 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1660 ADVERTISE_100FULL | ADVERTISE_10FULL |
1661 ADVERTISE_100HALF | ADVERTISE_10HALF);
1662
1663 // Enable 1000 Full Mode.
1664 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1665
1666 // Enable auto-negotiation and restart auto-negotiation.
1667 mdio_write(ioaddr, phy_id, MII_BMCR,
1668 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1669 }
1670
1671 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1672 {
1673 struct sis190_private *tp = netdev_priv(dev);
1674
1675 return mii_ethtool_gset(&tp->mii_if, cmd);
1676 }
1677
1678 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1679 {
1680 struct sis190_private *tp = netdev_priv(dev);
1681
1682 return mii_ethtool_sset(&tp->mii_if, cmd);
1683 }
1684
1685 static void sis190_get_drvinfo(struct net_device *dev,
1686 struct ethtool_drvinfo *info)
1687 {
1688 struct sis190_private *tp = netdev_priv(dev);
1689
1690 strcpy(info->driver, DRV_NAME);
1691 strcpy(info->version, DRV_VERSION);
1692 strcpy(info->bus_info, pci_name(tp->pci_dev));
1693 }
1694
1695 static int sis190_get_regs_len(struct net_device *dev)
1696 {
1697 return SIS190_REGS_SIZE;
1698 }
1699
1700 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1701 void *p)
1702 {
1703 struct sis190_private *tp = netdev_priv(dev);
1704 unsigned long flags;
1705
1706 if (regs->len > SIS190_REGS_SIZE)
1707 regs->len = SIS190_REGS_SIZE;
1708
1709 spin_lock_irqsave(&tp->lock, flags);
1710 memcpy_fromio(p, tp->mmio_addr, regs->len);
1711 spin_unlock_irqrestore(&tp->lock, flags);
1712 }
1713
1714 static int sis190_nway_reset(struct net_device *dev)
1715 {
1716 struct sis190_private *tp = netdev_priv(dev);
1717
1718 return mii_nway_restart(&tp->mii_if);
1719 }
1720
1721 static u32 sis190_get_msglevel(struct net_device *dev)
1722 {
1723 struct sis190_private *tp = netdev_priv(dev);
1724
1725 return tp->msg_enable;
1726 }
1727
1728 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1729 {
1730 struct sis190_private *tp = netdev_priv(dev);
1731
1732 tp->msg_enable = value;
1733 }
1734
1735 static const struct ethtool_ops sis190_ethtool_ops = {
1736 .get_settings = sis190_get_settings,
1737 .set_settings = sis190_set_settings,
1738 .get_drvinfo = sis190_get_drvinfo,
1739 .get_regs_len = sis190_get_regs_len,
1740 .get_regs = sis190_get_regs,
1741 .get_link = ethtool_op_get_link,
1742 .get_msglevel = sis190_get_msglevel,
1743 .set_msglevel = sis190_set_msglevel,
1744 .nway_reset = sis190_nway_reset,
1745 };
1746
1747 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1748 {
1749 struct sis190_private *tp = netdev_priv(dev);
1750
1751 return !netif_running(dev) ? -EINVAL :
1752 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1753 }
1754
1755 static int __devinit sis190_init_one(struct pci_dev *pdev,
1756 const struct pci_device_id *ent)
1757 {
1758 static int printed_version = 0;
1759 struct sis190_private *tp;
1760 struct net_device *dev;
1761 void __iomem *ioaddr;
1762 int rc;
1763 DECLARE_MAC_BUF(mac);
1764
1765 if (!printed_version) {
1766 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1767 printed_version = 1;
1768 }
1769
1770 dev = sis190_init_board(pdev);
1771 if (IS_ERR(dev)) {
1772 rc = PTR_ERR(dev);
1773 goto out;
1774 }
1775
1776 pci_set_drvdata(pdev, dev);
1777
1778 tp = netdev_priv(dev);
1779 ioaddr = tp->mmio_addr;
1780
1781 rc = sis190_get_mac_addr(pdev, dev);
1782 if (rc < 0)
1783 goto err_release_board;
1784
1785 sis190_init_rxfilter(dev);
1786
1787 INIT_WORK(&tp->phy_task, sis190_phy_task);
1788
1789 dev->open = sis190_open;
1790 dev->stop = sis190_close;
1791 dev->do_ioctl = sis190_ioctl;
1792 dev->tx_timeout = sis190_tx_timeout;
1793 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1794 dev->hard_start_xmit = sis190_start_xmit;
1795 #ifdef CONFIG_NET_POLL_CONTROLLER
1796 dev->poll_controller = sis190_netpoll;
1797 #endif
1798 dev->set_multicast_list = sis190_set_rx_mode;
1799 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1800 dev->irq = pdev->irq;
1801 dev->base_addr = (unsigned long) 0xdead;
1802
1803 spin_lock_init(&tp->lock);
1804
1805 rc = sis190_mii_probe(dev);
1806 if (rc < 0)
1807 goto err_release_board;
1808
1809 rc = register_netdev(dev);
1810 if (rc < 0)
1811 goto err_remove_mii;
1812
1813 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1814 "%s\n",
1815 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1816 ioaddr, dev->irq, print_mac(mac, dev->dev_addr));
1817
1818 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1819 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1820
1821 netif_carrier_off(dev);
1822
1823 sis190_set_speed_auto(dev);
1824 out:
1825 return rc;
1826
1827 err_remove_mii:
1828 sis190_mii_remove(dev);
1829 err_release_board:
1830 sis190_release_board(pdev);
1831 goto out;
1832 }
1833
1834 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1835 {
1836 struct net_device *dev = pci_get_drvdata(pdev);
1837
1838 sis190_mii_remove(dev);
1839 flush_scheduled_work();
1840 unregister_netdev(dev);
1841 sis190_release_board(pdev);
1842 pci_set_drvdata(pdev, NULL);
1843 }
1844
1845 static struct pci_driver sis190_pci_driver = {
1846 .name = DRV_NAME,
1847 .id_table = sis190_pci_tbl,
1848 .probe = sis190_init_one,
1849 .remove = __devexit_p(sis190_remove_one),
1850 };
1851
1852 static int __init sis190_init_module(void)
1853 {
1854 return pci_register_driver(&sis190_pci_driver);
1855 }
1856
1857 static void __exit sis190_cleanup_module(void)
1858 {
1859 pci_unregister_driver(&sis190_pci_driver);
1860 }
1861
1862 module_init(sis190_init_module);
1863 module_exit(sis190_cleanup_module);
This page took 0.06827 seconds and 6 git commands to generate.