net: convert print_mac to %pM
[deliverable/linux.git] / drivers / net / sis190.c
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
10
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
17
18 See the file COPYING in this distribution for more information.
19
20 */
21
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
34
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
49
50 #define DRV_VERSION "1.2"
51 #define DRV_NAME "sis190"
52 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53 #define PFX DRV_NAME ": "
54
55 #define sis190_rx_skb netif_rx
56 #define sis190_rx_quota(count, quota) count
57
58 #define MAC_ADDR_LEN 6
59
60 #define NUM_TX_DESC 64 /* [8..1024] */
61 #define NUM_RX_DESC 64 /* [8..8192] */
62 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
63 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
64 #define RX_BUF_SIZE 1536
65 #define RX_BUF_MASK 0xfff8
66
67 #define SIS190_REGS_SIZE 0x80
68 #define SIS190_TX_TIMEOUT (6*HZ)
69 #define SIS190_PHY_TIMEOUT (10*HZ)
70 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
72 NETIF_MSG_IFDOWN)
73
74 /* Enhanced PHY access register bit definitions */
75 #define EhnMIIread 0x0000
76 #define EhnMIIwrite 0x0020
77 #define EhnMIIdataShift 16
78 #define EhnMIIpmdShift 6 /* 7016 only */
79 #define EhnMIIregShift 11
80 #define EhnMIIreq 0x0010
81 #define EhnMIInotDone 0x0010
82
83 /* Write/read MMIO register */
84 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
85 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
86 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
87 #define SIS_R8(reg) readb (ioaddr + (reg))
88 #define SIS_R16(reg) readw (ioaddr + (reg))
89 #define SIS_R32(reg) readl (ioaddr + (reg))
90
91 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
92
93 enum sis190_registers {
94 TxControl = 0x00,
95 TxDescStartAddr = 0x04,
96 rsv0 = 0x08, // reserved
97 TxSts = 0x0c, // unused (Control/Status)
98 RxControl = 0x10,
99 RxDescStartAddr = 0x14,
100 rsv1 = 0x18, // reserved
101 RxSts = 0x1c, // unused
102 IntrStatus = 0x20,
103 IntrMask = 0x24,
104 IntrControl = 0x28,
105 IntrTimer = 0x2c, // unused (Interupt Timer)
106 PMControl = 0x30, // unused (Power Mgmt Control/Status)
107 rsv2 = 0x34, // reserved
108 ROMControl = 0x38,
109 ROMInterface = 0x3c,
110 StationControl = 0x40,
111 GMIIControl = 0x44,
112 GIoCR = 0x48, // unused (GMAC IO Compensation)
113 GIoCtrl = 0x4c, // unused (GMAC IO Control)
114 TxMacControl = 0x50,
115 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
116 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
117 rsv3 = 0x5c, // reserved
118 RxMacControl = 0x60,
119 RxMacAddr = 0x62,
120 RxHashTable = 0x68,
121 // Undocumented = 0x6c,
122 RxWolCtrl = 0x70,
123 RxWolData = 0x74, // unused (Rx WOL Data Access)
124 RxMPSControl = 0x78, // unused (Rx MPS Control)
125 rsv4 = 0x7c, // reserved
126 };
127
128 enum sis190_register_content {
129 /* IntrStatus */
130 SoftInt = 0x40000000, // unused
131 Timeup = 0x20000000, // unused
132 PauseFrame = 0x00080000, // unused
133 MagicPacket = 0x00040000, // unused
134 WakeupFrame = 0x00020000, // unused
135 LinkChange = 0x00010000,
136 RxQEmpty = 0x00000080,
137 RxQInt = 0x00000040,
138 TxQ1Empty = 0x00000020, // unused
139 TxQ1Int = 0x00000010,
140 TxQ0Empty = 0x00000008, // unused
141 TxQ0Int = 0x00000004,
142 RxHalt = 0x00000002,
143 TxHalt = 0x00000001,
144
145 /* {Rx/Tx}CmdBits */
146 CmdReset = 0x10,
147 CmdRxEnb = 0x08, // unused
148 CmdTxEnb = 0x01,
149 RxBufEmpty = 0x01, // unused
150
151 /* Cfg9346Bits */
152 Cfg9346_Lock = 0x00, // unused
153 Cfg9346_Unlock = 0xc0, // unused
154
155 /* RxMacControl */
156 AcceptErr = 0x20, // unused
157 AcceptRunt = 0x10, // unused
158 AcceptBroadcast = 0x0800,
159 AcceptMulticast = 0x0400,
160 AcceptMyPhys = 0x0200,
161 AcceptAllPhys = 0x0100,
162
163 /* RxConfigBits */
164 RxCfgFIFOShift = 13,
165 RxCfgDMAShift = 8, // 0x1a in RxControl ?
166
167 /* TxConfigBits */
168 TxInterFrameGapShift = 24,
169 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
170
171 LinkStatus = 0x02, // unused
172 FullDup = 0x01, // unused
173
174 /* TBICSRBit */
175 TBILinkOK = 0x02000000, // unused
176 };
177
178 struct TxDesc {
179 __le32 PSize;
180 __le32 status;
181 __le32 addr;
182 __le32 size;
183 };
184
185 struct RxDesc {
186 __le32 PSize;
187 __le32 status;
188 __le32 addr;
189 __le32 size;
190 };
191
192 enum _DescStatusBit {
193 /* _Desc.status */
194 OWNbit = 0x80000000, // RXOWN/TXOWN
195 INTbit = 0x40000000, // RXINT/TXINT
196 CRCbit = 0x00020000, // CRCOFF/CRCEN
197 PADbit = 0x00010000, // PREADD/PADEN
198 /* _Desc.size */
199 RingEnd = 0x80000000,
200 /* TxDesc.status */
201 LSEN = 0x08000000, // TSO ? -- FR
202 IPCS = 0x04000000,
203 TCPCS = 0x02000000,
204 UDPCS = 0x01000000,
205 BSTEN = 0x00800000,
206 EXTEN = 0x00400000,
207 DEFEN = 0x00200000,
208 BKFEN = 0x00100000,
209 CRSEN = 0x00080000,
210 COLEN = 0x00040000,
211 THOL3 = 0x30000000,
212 THOL2 = 0x20000000,
213 THOL1 = 0x10000000,
214 THOL0 = 0x00000000,
215
216 WND = 0x00080000,
217 TABRT = 0x00040000,
218 FIFO = 0x00020000,
219 LINK = 0x00010000,
220 ColCountMask = 0x0000ffff,
221 /* RxDesc.status */
222 IPON = 0x20000000,
223 TCPON = 0x10000000,
224 UDPON = 0x08000000,
225 Wakup = 0x00400000,
226 Magic = 0x00200000,
227 Pause = 0x00100000,
228 DEFbit = 0x00200000,
229 BCAST = 0x000c0000,
230 MCAST = 0x00080000,
231 UCAST = 0x00040000,
232 /* RxDesc.PSize */
233 TAGON = 0x80000000,
234 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
235 ABORT = 0x00800000,
236 SHORT = 0x00400000,
237 LIMIT = 0x00200000,
238 MIIER = 0x00100000,
239 OVRUN = 0x00080000,
240 NIBON = 0x00040000,
241 COLON = 0x00020000,
242 CRCOK = 0x00010000,
243 RxSizeMask = 0x0000ffff
244 /*
245 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
246 * provide two (unused with Linux) Tx queues. No publically
247 * available documentation alas.
248 */
249 };
250
251 enum sis190_eeprom_access_register_bits {
252 EECS = 0x00000001, // unused
253 EECLK = 0x00000002, // unused
254 EEDO = 0x00000008, // unused
255 EEDI = 0x00000004, // unused
256 EEREQ = 0x00000080,
257 EEROP = 0x00000200,
258 EEWOP = 0x00000100 // unused
259 };
260
261 /* EEPROM Addresses */
262 enum sis190_eeprom_address {
263 EEPROMSignature = 0x00,
264 EEPROMCLK = 0x01, // unused
265 EEPROMInfo = 0x02,
266 EEPROMMACAddr = 0x03
267 };
268
269 enum sis190_feature {
270 F_HAS_RGMII = 1,
271 F_PHY_88E1111 = 2,
272 F_PHY_BCM5461 = 4
273 };
274
275 struct sis190_private {
276 void __iomem *mmio_addr;
277 struct pci_dev *pci_dev;
278 struct net_device *dev;
279 spinlock_t lock;
280 u32 rx_buf_sz;
281 u32 cur_rx;
282 u32 cur_tx;
283 u32 dirty_rx;
284 u32 dirty_tx;
285 dma_addr_t rx_dma;
286 dma_addr_t tx_dma;
287 struct RxDesc *RxDescRing;
288 struct TxDesc *TxDescRing;
289 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
290 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
291 struct work_struct phy_task;
292 struct timer_list timer;
293 u32 msg_enable;
294 struct mii_if_info mii_if;
295 struct list_head first_phy;
296 u32 features;
297 };
298
299 struct sis190_phy {
300 struct list_head list;
301 int phy_id;
302 u16 id[2];
303 u16 status;
304 u8 type;
305 };
306
307 enum sis190_phy_type {
308 UNKNOWN = 0x00,
309 HOME = 0x01,
310 LAN = 0x02,
311 MIX = 0x03
312 };
313
314 static struct mii_chip_info {
315 const char *name;
316 u16 id[2];
317 unsigned int type;
318 u32 feature;
319 } mii_chip_table[] = {
320 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
321 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
322 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
323 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
324 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
325 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
326 { NULL, }
327 };
328
329 static const struct {
330 const char *name;
331 } sis_chip_info[] = {
332 { "SiS 190 PCI Fast Ethernet adapter" },
333 { "SiS 191 PCI Gigabit Ethernet adapter" },
334 };
335
336 static struct pci_device_id sis190_pci_tbl[] = {
337 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
338 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
339 { 0, },
340 };
341
342 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
343
344 static int rx_copybreak = 200;
345
346 static struct {
347 u32 msg_enable;
348 } debug = { -1 };
349
350 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
351 module_param(rx_copybreak, int, 0);
352 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
353 module_param_named(debug, debug.msg_enable, int, 0);
354 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
355 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
356 MODULE_VERSION(DRV_VERSION);
357 MODULE_LICENSE("GPL");
358
359 static const u32 sis190_intr_mask =
360 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
361
362 /*
363 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
364 * The chips use a 64 element hash table based on the Ethernet CRC.
365 */
366 static const int multicast_filter_limit = 32;
367
368 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
369 {
370 unsigned int i;
371
372 SIS_W32(GMIIControl, ctl);
373
374 msleep(1);
375
376 for (i = 0; i < 100; i++) {
377 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
378 break;
379 msleep(1);
380 }
381
382 if (i > 99)
383 printk(KERN_ERR PFX "PHY command failed !\n");
384 }
385
386 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
387 {
388 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
389 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
390 (((u32) val) << EhnMIIdataShift));
391 }
392
393 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
394 {
395 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
396 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
397
398 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
399 }
400
401 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
402 {
403 struct sis190_private *tp = netdev_priv(dev);
404
405 mdio_write(tp->mmio_addr, phy_id, reg, val);
406 }
407
408 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
409 {
410 struct sis190_private *tp = netdev_priv(dev);
411
412 return mdio_read(tp->mmio_addr, phy_id, reg);
413 }
414
415 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
416 {
417 mdio_read(ioaddr, phy_id, reg);
418 return mdio_read(ioaddr, phy_id, reg);
419 }
420
421 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
422 {
423 u16 data = 0xffff;
424 unsigned int i;
425
426 if (!(SIS_R32(ROMControl) & 0x0002))
427 return 0;
428
429 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
430
431 for (i = 0; i < 200; i++) {
432 if (!(SIS_R32(ROMInterface) & EEREQ)) {
433 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
434 break;
435 }
436 msleep(1);
437 }
438
439 return data;
440 }
441
442 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
443 {
444 SIS_W32(IntrMask, 0x00);
445 SIS_W32(IntrStatus, 0xffffffff);
446 SIS_PCI_COMMIT();
447 }
448
449 static void sis190_asic_down(void __iomem *ioaddr)
450 {
451 /* Stop the chip's Tx and Rx DMA processes. */
452
453 SIS_W32(TxControl, 0x1a00);
454 SIS_W32(RxControl, 0x1a00);
455
456 sis190_irq_mask_and_ack(ioaddr);
457 }
458
459 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
460 {
461 desc->size |= cpu_to_le32(RingEnd);
462 }
463
464 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
465 {
466 u32 eor = le32_to_cpu(desc->size) & RingEnd;
467
468 desc->PSize = 0x0;
469 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
470 wmb();
471 desc->status = cpu_to_le32(OWNbit | INTbit);
472 }
473
474 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
475 u32 rx_buf_sz)
476 {
477 desc->addr = cpu_to_le32(mapping);
478 sis190_give_to_asic(desc, rx_buf_sz);
479 }
480
481 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
482 {
483 desc->PSize = 0x0;
484 desc->addr = cpu_to_le32(0xdeadbeef);
485 desc->size &= cpu_to_le32(RingEnd);
486 wmb();
487 desc->status = 0x0;
488 }
489
490 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
491 struct RxDesc *desc)
492 {
493 u32 rx_buf_sz = tp->rx_buf_sz;
494 struct sk_buff *skb;
495
496 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
497 if (likely(skb)) {
498 dma_addr_t mapping;
499
500 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
501 PCI_DMA_FROMDEVICE);
502 sis190_map_to_asic(desc, mapping, rx_buf_sz);
503 } else
504 sis190_make_unusable_by_asic(desc);
505
506 return skb;
507 }
508
509 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
510 u32 start, u32 end)
511 {
512 u32 cur;
513
514 for (cur = start; cur < end; cur++) {
515 unsigned int i = cur % NUM_RX_DESC;
516
517 if (tp->Rx_skbuff[i])
518 continue;
519
520 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
521
522 if (!tp->Rx_skbuff[i])
523 break;
524 }
525 return cur - start;
526 }
527
528 static bool sis190_try_rx_copy(struct sis190_private *tp,
529 struct sk_buff **sk_buff, int pkt_size,
530 dma_addr_t addr)
531 {
532 struct sk_buff *skb;
533 bool done = false;
534
535 if (pkt_size >= rx_copybreak)
536 goto out;
537
538 skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
539 if (!skb)
540 goto out;
541
542 pci_dma_sync_single_for_device(tp->pci_dev, addr, pkt_size,
543 PCI_DMA_FROMDEVICE);
544 skb_reserve(skb, 2);
545 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
546 *sk_buff = skb;
547 done = true;
548 out:
549 return done;
550 }
551
552 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
553 {
554 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
555
556 if ((status & CRCOK) && !(status & ErrMask))
557 return 0;
558
559 if (!(status & CRCOK))
560 stats->rx_crc_errors++;
561 else if (status & OVRUN)
562 stats->rx_over_errors++;
563 else if (status & (SHORT | LIMIT))
564 stats->rx_length_errors++;
565 else if (status & (MIIER | NIBON | COLON))
566 stats->rx_frame_errors++;
567
568 stats->rx_errors++;
569 return -1;
570 }
571
572 static int sis190_rx_interrupt(struct net_device *dev,
573 struct sis190_private *tp, void __iomem *ioaddr)
574 {
575 struct net_device_stats *stats = &dev->stats;
576 u32 rx_left, cur_rx = tp->cur_rx;
577 u32 delta, count;
578
579 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
580 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
581
582 for (; rx_left > 0; rx_left--, cur_rx++) {
583 unsigned int entry = cur_rx % NUM_RX_DESC;
584 struct RxDesc *desc = tp->RxDescRing + entry;
585 u32 status;
586
587 if (le32_to_cpu(desc->status) & OWNbit)
588 break;
589
590 status = le32_to_cpu(desc->PSize);
591
592 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
593 // status);
594
595 if (sis190_rx_pkt_err(status, stats) < 0)
596 sis190_give_to_asic(desc, tp->rx_buf_sz);
597 else {
598 struct sk_buff *skb = tp->Rx_skbuff[entry];
599 dma_addr_t addr = le32_to_cpu(desc->addr);
600 int pkt_size = (status & RxSizeMask) - 4;
601 struct pci_dev *pdev = tp->pci_dev;
602
603 if (unlikely(pkt_size > tp->rx_buf_sz)) {
604 net_intr(tp, KERN_INFO
605 "%s: (frag) status = %08x.\n",
606 dev->name, status);
607 stats->rx_dropped++;
608 stats->rx_length_errors++;
609 sis190_give_to_asic(desc, tp->rx_buf_sz);
610 continue;
611 }
612
613
614 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
615 pci_dma_sync_single_for_device(pdev, addr,
616 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
617 sis190_give_to_asic(desc, tp->rx_buf_sz);
618 } else {
619 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
620 PCI_DMA_FROMDEVICE);
621 tp->Rx_skbuff[entry] = NULL;
622 sis190_make_unusable_by_asic(desc);
623 }
624
625 skb_put(skb, pkt_size);
626 skb->protocol = eth_type_trans(skb, dev);
627
628 sis190_rx_skb(skb);
629
630 dev->last_rx = jiffies;
631 stats->rx_packets++;
632 stats->rx_bytes += pkt_size;
633 if ((status & BCAST) == MCAST)
634 stats->multicast++;
635 }
636 }
637 count = cur_rx - tp->cur_rx;
638 tp->cur_rx = cur_rx;
639
640 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
641 if (!delta && count && netif_msg_intr(tp))
642 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
643 tp->dirty_rx += delta;
644
645 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
646 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
647
648 return count;
649 }
650
651 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
652 struct TxDesc *desc)
653 {
654 unsigned int len;
655
656 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
657
658 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
659
660 memset(desc, 0x00, sizeof(*desc));
661 }
662
663 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
664 {
665 #define TxErrMask (WND | TABRT | FIFO | LINK)
666
667 if (!unlikely(status & TxErrMask))
668 return 0;
669
670 if (status & WND)
671 stats->tx_window_errors++;
672 if (status & TABRT)
673 stats->tx_aborted_errors++;
674 if (status & FIFO)
675 stats->tx_fifo_errors++;
676 if (status & LINK)
677 stats->tx_carrier_errors++;
678
679 stats->tx_errors++;
680
681 return -1;
682 }
683
684 static void sis190_tx_interrupt(struct net_device *dev,
685 struct sis190_private *tp, void __iomem *ioaddr)
686 {
687 struct net_device_stats *stats = &dev->stats;
688 u32 pending, dirty_tx = tp->dirty_tx;
689 /*
690 * It would not be needed if queueing was allowed to be enabled
691 * again too early (hint: think preempt and unclocked smp systems).
692 */
693 unsigned int queue_stopped;
694
695 smp_rmb();
696 pending = tp->cur_tx - dirty_tx;
697 queue_stopped = (pending == NUM_TX_DESC);
698
699 for (; pending; pending--, dirty_tx++) {
700 unsigned int entry = dirty_tx % NUM_TX_DESC;
701 struct TxDesc *txd = tp->TxDescRing + entry;
702 u32 status = le32_to_cpu(txd->status);
703 struct sk_buff *skb;
704
705 if (status & OWNbit)
706 break;
707
708 skb = tp->Tx_skbuff[entry];
709
710 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
711 stats->tx_packets++;
712 stats->tx_bytes += skb->len;
713 stats->collisions += ((status & ColCountMask) - 1);
714 }
715
716 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
717 tp->Tx_skbuff[entry] = NULL;
718 dev_kfree_skb_irq(skb);
719 }
720
721 if (tp->dirty_tx != dirty_tx) {
722 tp->dirty_tx = dirty_tx;
723 smp_wmb();
724 if (queue_stopped)
725 netif_wake_queue(dev);
726 }
727 }
728
729 /*
730 * The interrupt handler does all of the Rx thread work and cleans up after
731 * the Tx thread.
732 */
733 static irqreturn_t sis190_interrupt(int irq, void *__dev)
734 {
735 struct net_device *dev = __dev;
736 struct sis190_private *tp = netdev_priv(dev);
737 void __iomem *ioaddr = tp->mmio_addr;
738 unsigned int handled = 0;
739 u32 status;
740
741 status = SIS_R32(IntrStatus);
742
743 if ((status == 0xffffffff) || !status)
744 goto out;
745
746 handled = 1;
747
748 if (unlikely(!netif_running(dev))) {
749 sis190_asic_down(ioaddr);
750 goto out;
751 }
752
753 SIS_W32(IntrStatus, status);
754
755 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
756
757 if (status & LinkChange) {
758 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
759 schedule_work(&tp->phy_task);
760 }
761
762 if (status & RxQInt)
763 sis190_rx_interrupt(dev, tp, ioaddr);
764
765 if (status & TxQ0Int)
766 sis190_tx_interrupt(dev, tp, ioaddr);
767 out:
768 return IRQ_RETVAL(handled);
769 }
770
771 #ifdef CONFIG_NET_POLL_CONTROLLER
772 static void sis190_netpoll(struct net_device *dev)
773 {
774 struct sis190_private *tp = netdev_priv(dev);
775 struct pci_dev *pdev = tp->pci_dev;
776
777 disable_irq(pdev->irq);
778 sis190_interrupt(pdev->irq, dev);
779 enable_irq(pdev->irq);
780 }
781 #endif
782
783 static void sis190_free_rx_skb(struct sis190_private *tp,
784 struct sk_buff **sk_buff, struct RxDesc *desc)
785 {
786 struct pci_dev *pdev = tp->pci_dev;
787
788 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
789 PCI_DMA_FROMDEVICE);
790 dev_kfree_skb(*sk_buff);
791 *sk_buff = NULL;
792 sis190_make_unusable_by_asic(desc);
793 }
794
795 static void sis190_rx_clear(struct sis190_private *tp)
796 {
797 unsigned int i;
798
799 for (i = 0; i < NUM_RX_DESC; i++) {
800 if (!tp->Rx_skbuff[i])
801 continue;
802 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
803 }
804 }
805
806 static void sis190_init_ring_indexes(struct sis190_private *tp)
807 {
808 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
809 }
810
811 static int sis190_init_ring(struct net_device *dev)
812 {
813 struct sis190_private *tp = netdev_priv(dev);
814
815 sis190_init_ring_indexes(tp);
816
817 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
818 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
819
820 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
821 goto err_rx_clear;
822
823 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
824
825 return 0;
826
827 err_rx_clear:
828 sis190_rx_clear(tp);
829 return -ENOMEM;
830 }
831
832 static void sis190_set_rx_mode(struct net_device *dev)
833 {
834 struct sis190_private *tp = netdev_priv(dev);
835 void __iomem *ioaddr = tp->mmio_addr;
836 unsigned long flags;
837 u32 mc_filter[2]; /* Multicast hash filter */
838 u16 rx_mode;
839
840 if (dev->flags & IFF_PROMISC) {
841 rx_mode =
842 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
843 AcceptAllPhys;
844 mc_filter[1] = mc_filter[0] = 0xffffffff;
845 } else if ((dev->mc_count > multicast_filter_limit) ||
846 (dev->flags & IFF_ALLMULTI)) {
847 /* Too many to filter perfectly -- accept all multicasts. */
848 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
849 mc_filter[1] = mc_filter[0] = 0xffffffff;
850 } else {
851 struct dev_mc_list *mclist;
852 unsigned int i;
853
854 rx_mode = AcceptBroadcast | AcceptMyPhys;
855 mc_filter[1] = mc_filter[0] = 0;
856 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
857 i++, mclist = mclist->next) {
858 int bit_nr =
859 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
860 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
861 rx_mode |= AcceptMulticast;
862 }
863 }
864
865 spin_lock_irqsave(&tp->lock, flags);
866
867 SIS_W16(RxMacControl, rx_mode | 0x2);
868 SIS_W32(RxHashTable, mc_filter[0]);
869 SIS_W32(RxHashTable + 4, mc_filter[1]);
870
871 spin_unlock_irqrestore(&tp->lock, flags);
872 }
873
874 static void sis190_soft_reset(void __iomem *ioaddr)
875 {
876 SIS_W32(IntrControl, 0x8000);
877 SIS_PCI_COMMIT();
878 SIS_W32(IntrControl, 0x0);
879 sis190_asic_down(ioaddr);
880 }
881
882 static void sis190_hw_start(struct net_device *dev)
883 {
884 struct sis190_private *tp = netdev_priv(dev);
885 void __iomem *ioaddr = tp->mmio_addr;
886
887 sis190_soft_reset(ioaddr);
888
889 SIS_W32(TxDescStartAddr, tp->tx_dma);
890 SIS_W32(RxDescStartAddr, tp->rx_dma);
891
892 SIS_W32(IntrStatus, 0xffffffff);
893 SIS_W32(IntrMask, 0x0);
894 SIS_W32(GMIIControl, 0x0);
895 SIS_W32(TxMacControl, 0x60);
896 SIS_W16(RxMacControl, 0x02);
897 SIS_W32(RxHashTable, 0x0);
898 SIS_W32(0x6c, 0x0);
899 SIS_W32(RxWolCtrl, 0x0);
900 SIS_W32(RxWolData, 0x0);
901
902 SIS_PCI_COMMIT();
903
904 sis190_set_rx_mode(dev);
905
906 /* Enable all known interrupts by setting the interrupt mask. */
907 SIS_W32(IntrMask, sis190_intr_mask);
908
909 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
910 SIS_W32(RxControl, 0x1a1d);
911
912 netif_start_queue(dev);
913 }
914
915 static void sis190_phy_task(struct work_struct *work)
916 {
917 struct sis190_private *tp =
918 container_of(work, struct sis190_private, phy_task);
919 struct net_device *dev = tp->dev;
920 void __iomem *ioaddr = tp->mmio_addr;
921 int phy_id = tp->mii_if.phy_id;
922 u16 val;
923
924 rtnl_lock();
925
926 if (!netif_running(dev))
927 goto out_unlock;
928
929 val = mdio_read(ioaddr, phy_id, MII_BMCR);
930 if (val & BMCR_RESET) {
931 // FIXME: needlessly high ? -- FR 02/07/2005
932 mod_timer(&tp->timer, jiffies + HZ/10);
933 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
934 BMSR_ANEGCOMPLETE)) {
935 netif_carrier_off(dev);
936 net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
937 dev->name);
938 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
939 } else {
940 /* Rejoice ! */
941 struct {
942 int val;
943 u32 ctl;
944 const char *msg;
945 } reg31[] = {
946 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
947 "1000 Mbps Full Duplex" },
948 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
949 "1000 Mbps Half Duplex" },
950 { LPA_100FULL, 0x04000800 | 0x00001000,
951 "100 Mbps Full Duplex" },
952 { LPA_100HALF, 0x04000800,
953 "100 Mbps Half Duplex" },
954 { LPA_10FULL, 0x04000400 | 0x00001000,
955 "10 Mbps Full Duplex" },
956 { LPA_10HALF, 0x04000400,
957 "10 Mbps Half Duplex" },
958 { 0, 0x04000400, "unknown" }
959 }, *p;
960 u16 adv;
961
962 val = mdio_read(ioaddr, phy_id, 0x1f);
963 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
964
965 val = mdio_read(ioaddr, phy_id, MII_LPA);
966 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
967 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
968 dev->name, val, adv);
969
970 val &= adv;
971
972 for (p = reg31; p->val; p++) {
973 if ((val & p->val) == p->val)
974 break;
975 }
976
977 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
978
979 if ((tp->features & F_HAS_RGMII) &&
980 (tp->features & F_PHY_BCM5461)) {
981 // Set Tx Delay in RGMII mode.
982 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
983 udelay(200);
984 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
985 p->ctl |= 0x03000000;
986 }
987
988 SIS_W32(StationControl, p->ctl);
989
990 if (tp->features & F_HAS_RGMII) {
991 SIS_W32(RGDelay, 0x0441);
992 SIS_W32(RGDelay, 0x0440);
993 }
994
995 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
996 p->msg);
997 netif_carrier_on(dev);
998 }
999
1000 out_unlock:
1001 rtnl_unlock();
1002 }
1003
1004 static void sis190_phy_timer(unsigned long __opaque)
1005 {
1006 struct net_device *dev = (struct net_device *)__opaque;
1007 struct sis190_private *tp = netdev_priv(dev);
1008
1009 if (likely(netif_running(dev)))
1010 schedule_work(&tp->phy_task);
1011 }
1012
1013 static inline void sis190_delete_timer(struct net_device *dev)
1014 {
1015 struct sis190_private *tp = netdev_priv(dev);
1016
1017 del_timer_sync(&tp->timer);
1018 }
1019
1020 static inline void sis190_request_timer(struct net_device *dev)
1021 {
1022 struct sis190_private *tp = netdev_priv(dev);
1023 struct timer_list *timer = &tp->timer;
1024
1025 init_timer(timer);
1026 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1027 timer->data = (unsigned long)dev;
1028 timer->function = sis190_phy_timer;
1029 add_timer(timer);
1030 }
1031
1032 static void sis190_set_rxbufsize(struct sis190_private *tp,
1033 struct net_device *dev)
1034 {
1035 unsigned int mtu = dev->mtu;
1036
1037 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1038 /* RxDesc->size has a licence to kill the lower bits */
1039 if (tp->rx_buf_sz & 0x07) {
1040 tp->rx_buf_sz += 8;
1041 tp->rx_buf_sz &= RX_BUF_MASK;
1042 }
1043 }
1044
1045 static int sis190_open(struct net_device *dev)
1046 {
1047 struct sis190_private *tp = netdev_priv(dev);
1048 struct pci_dev *pdev = tp->pci_dev;
1049 int rc = -ENOMEM;
1050
1051 sis190_set_rxbufsize(tp, dev);
1052
1053 /*
1054 * Rx and Tx descriptors need 256 bytes alignment.
1055 * pci_alloc_consistent() guarantees a stronger alignment.
1056 */
1057 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1058 if (!tp->TxDescRing)
1059 goto out;
1060
1061 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1062 if (!tp->RxDescRing)
1063 goto err_free_tx_0;
1064
1065 rc = sis190_init_ring(dev);
1066 if (rc < 0)
1067 goto err_free_rx_1;
1068
1069 sis190_request_timer(dev);
1070
1071 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1072 if (rc < 0)
1073 goto err_release_timer_2;
1074
1075 sis190_hw_start(dev);
1076 out:
1077 return rc;
1078
1079 err_release_timer_2:
1080 sis190_delete_timer(dev);
1081 sis190_rx_clear(tp);
1082 err_free_rx_1:
1083 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1084 tp->rx_dma);
1085 err_free_tx_0:
1086 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1087 tp->tx_dma);
1088 goto out;
1089 }
1090
1091 static void sis190_tx_clear(struct sis190_private *tp)
1092 {
1093 unsigned int i;
1094
1095 for (i = 0; i < NUM_TX_DESC; i++) {
1096 struct sk_buff *skb = tp->Tx_skbuff[i];
1097
1098 if (!skb)
1099 continue;
1100
1101 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1102 tp->Tx_skbuff[i] = NULL;
1103 dev_kfree_skb(skb);
1104
1105 tp->dev->stats.tx_dropped++;
1106 }
1107 tp->cur_tx = tp->dirty_tx = 0;
1108 }
1109
1110 static void sis190_down(struct net_device *dev)
1111 {
1112 struct sis190_private *tp = netdev_priv(dev);
1113 void __iomem *ioaddr = tp->mmio_addr;
1114 unsigned int poll_locked = 0;
1115
1116 sis190_delete_timer(dev);
1117
1118 netif_stop_queue(dev);
1119
1120 do {
1121 spin_lock_irq(&tp->lock);
1122
1123 sis190_asic_down(ioaddr);
1124
1125 spin_unlock_irq(&tp->lock);
1126
1127 synchronize_irq(dev->irq);
1128
1129 if (!poll_locked)
1130 poll_locked++;
1131
1132 synchronize_sched();
1133
1134 } while (SIS_R32(IntrMask));
1135
1136 sis190_tx_clear(tp);
1137 sis190_rx_clear(tp);
1138 }
1139
1140 static int sis190_close(struct net_device *dev)
1141 {
1142 struct sis190_private *tp = netdev_priv(dev);
1143 struct pci_dev *pdev = tp->pci_dev;
1144
1145 sis190_down(dev);
1146
1147 free_irq(dev->irq, dev);
1148
1149 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1150 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1151
1152 tp->TxDescRing = NULL;
1153 tp->RxDescRing = NULL;
1154
1155 return 0;
1156 }
1157
1158 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1159 {
1160 struct sis190_private *tp = netdev_priv(dev);
1161 void __iomem *ioaddr = tp->mmio_addr;
1162 u32 len, entry, dirty_tx;
1163 struct TxDesc *desc;
1164 dma_addr_t mapping;
1165
1166 if (unlikely(skb->len < ETH_ZLEN)) {
1167 if (skb_padto(skb, ETH_ZLEN)) {
1168 dev->stats.tx_dropped++;
1169 goto out;
1170 }
1171 len = ETH_ZLEN;
1172 } else {
1173 len = skb->len;
1174 }
1175
1176 entry = tp->cur_tx % NUM_TX_DESC;
1177 desc = tp->TxDescRing + entry;
1178
1179 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1180 netif_stop_queue(dev);
1181 net_tx_err(tp, KERN_ERR PFX
1182 "%s: BUG! Tx Ring full when queue awake!\n",
1183 dev->name);
1184 return NETDEV_TX_BUSY;
1185 }
1186
1187 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1188
1189 tp->Tx_skbuff[entry] = skb;
1190
1191 desc->PSize = cpu_to_le32(len);
1192 desc->addr = cpu_to_le32(mapping);
1193
1194 desc->size = cpu_to_le32(len);
1195 if (entry == (NUM_TX_DESC - 1))
1196 desc->size |= cpu_to_le32(RingEnd);
1197
1198 wmb();
1199
1200 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1201
1202 tp->cur_tx++;
1203
1204 smp_wmb();
1205
1206 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1207
1208 dev->trans_start = jiffies;
1209
1210 dirty_tx = tp->dirty_tx;
1211 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1212 netif_stop_queue(dev);
1213 smp_rmb();
1214 if (dirty_tx != tp->dirty_tx)
1215 netif_wake_queue(dev);
1216 }
1217 out:
1218 return NETDEV_TX_OK;
1219 }
1220
1221 static void sis190_free_phy(struct list_head *first_phy)
1222 {
1223 struct sis190_phy *cur, *next;
1224
1225 list_for_each_entry_safe(cur, next, first_phy, list) {
1226 kfree(cur);
1227 }
1228 }
1229
1230 /**
1231 * sis190_default_phy - Select default PHY for sis190 mac.
1232 * @dev: the net device to probe for
1233 *
1234 * Select first detected PHY with link as default.
1235 * If no one is link on, select PHY whose types is HOME as default.
1236 * If HOME doesn't exist, select LAN.
1237 */
1238 static u16 sis190_default_phy(struct net_device *dev)
1239 {
1240 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1241 struct sis190_private *tp = netdev_priv(dev);
1242 struct mii_if_info *mii_if = &tp->mii_if;
1243 void __iomem *ioaddr = tp->mmio_addr;
1244 u16 status;
1245
1246 phy_home = phy_default = phy_lan = NULL;
1247
1248 list_for_each_entry(phy, &tp->first_phy, list) {
1249 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1250
1251 // Link ON & Not select default PHY & not ghost PHY.
1252 if ((status & BMSR_LSTATUS) &&
1253 !phy_default &&
1254 (phy->type != UNKNOWN)) {
1255 phy_default = phy;
1256 } else {
1257 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1258 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1259 status | BMCR_ANENABLE | BMCR_ISOLATE);
1260 if (phy->type == HOME)
1261 phy_home = phy;
1262 else if (phy->type == LAN)
1263 phy_lan = phy;
1264 }
1265 }
1266
1267 if (!phy_default) {
1268 if (phy_home)
1269 phy_default = phy_home;
1270 else if (phy_lan)
1271 phy_default = phy_lan;
1272 else
1273 phy_default = list_entry(&tp->first_phy,
1274 struct sis190_phy, list);
1275 }
1276
1277 if (mii_if->phy_id != phy_default->phy_id) {
1278 mii_if->phy_id = phy_default->phy_id;
1279 net_probe(tp, KERN_INFO
1280 "%s: Using transceiver at address %d as default.\n",
1281 pci_name(tp->pci_dev), mii_if->phy_id);
1282 }
1283
1284 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1285 status &= (~BMCR_ISOLATE);
1286
1287 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1288 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1289
1290 return status;
1291 }
1292
1293 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1294 struct sis190_phy *phy, unsigned int phy_id,
1295 u16 mii_status)
1296 {
1297 void __iomem *ioaddr = tp->mmio_addr;
1298 struct mii_chip_info *p;
1299
1300 INIT_LIST_HEAD(&phy->list);
1301 phy->status = mii_status;
1302 phy->phy_id = phy_id;
1303
1304 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1305 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1306
1307 for (p = mii_chip_table; p->type; p++) {
1308 if ((p->id[0] == phy->id[0]) &&
1309 (p->id[1] == (phy->id[1] & 0xfff0))) {
1310 break;
1311 }
1312 }
1313
1314 if (p->id[1]) {
1315 phy->type = (p->type == MIX) ?
1316 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1317 LAN : HOME) : p->type;
1318 tp->features |= p->feature;
1319 } else
1320 phy->type = UNKNOWN;
1321
1322 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1323 pci_name(tp->pci_dev),
1324 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1325 }
1326
1327 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1328 {
1329 if (tp->features & F_PHY_88E1111) {
1330 void __iomem *ioaddr = tp->mmio_addr;
1331 int phy_id = tp->mii_if.phy_id;
1332 u16 reg[2][2] = {
1333 { 0x808b, 0x0ce1 },
1334 { 0x808f, 0x0c60 }
1335 }, *p;
1336
1337 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1338
1339 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1340 udelay(200);
1341 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1342 udelay(200);
1343 }
1344 }
1345
1346 /**
1347 * sis190_mii_probe - Probe MII PHY for sis190
1348 * @dev: the net device to probe for
1349 *
1350 * Search for total of 32 possible mii phy addresses.
1351 * Identify and set current phy if found one,
1352 * return error if it failed to found.
1353 */
1354 static int __devinit sis190_mii_probe(struct net_device *dev)
1355 {
1356 struct sis190_private *tp = netdev_priv(dev);
1357 struct mii_if_info *mii_if = &tp->mii_if;
1358 void __iomem *ioaddr = tp->mmio_addr;
1359 int phy_id;
1360 int rc = 0;
1361
1362 INIT_LIST_HEAD(&tp->first_phy);
1363
1364 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1365 struct sis190_phy *phy;
1366 u16 status;
1367
1368 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1369
1370 // Try next mii if the current one is not accessible.
1371 if (status == 0xffff || status == 0x0000)
1372 continue;
1373
1374 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1375 if (!phy) {
1376 sis190_free_phy(&tp->first_phy);
1377 rc = -ENOMEM;
1378 goto out;
1379 }
1380
1381 sis190_init_phy(dev, tp, phy, phy_id, status);
1382
1383 list_add(&tp->first_phy, &phy->list);
1384 }
1385
1386 if (list_empty(&tp->first_phy)) {
1387 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1388 pci_name(tp->pci_dev));
1389 rc = -EIO;
1390 goto out;
1391 }
1392
1393 /* Select default PHY for mac */
1394 sis190_default_phy(dev);
1395
1396 sis190_mii_probe_88e1111_fixup(tp);
1397
1398 mii_if->dev = dev;
1399 mii_if->mdio_read = __mdio_read;
1400 mii_if->mdio_write = __mdio_write;
1401 mii_if->phy_id_mask = PHY_ID_ANY;
1402 mii_if->reg_num_mask = MII_REG_ANY;
1403 out:
1404 return rc;
1405 }
1406
1407 static void sis190_mii_remove(struct net_device *dev)
1408 {
1409 struct sis190_private *tp = netdev_priv(dev);
1410
1411 sis190_free_phy(&tp->first_phy);
1412 }
1413
1414 static void sis190_release_board(struct pci_dev *pdev)
1415 {
1416 struct net_device *dev = pci_get_drvdata(pdev);
1417 struct sis190_private *tp = netdev_priv(dev);
1418
1419 iounmap(tp->mmio_addr);
1420 pci_release_regions(pdev);
1421 pci_disable_device(pdev);
1422 free_netdev(dev);
1423 }
1424
1425 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1426 {
1427 struct sis190_private *tp;
1428 struct net_device *dev;
1429 void __iomem *ioaddr;
1430 int rc;
1431
1432 dev = alloc_etherdev(sizeof(*tp));
1433 if (!dev) {
1434 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1435 rc = -ENOMEM;
1436 goto err_out_0;
1437 }
1438
1439 SET_NETDEV_DEV(dev, &pdev->dev);
1440
1441 tp = netdev_priv(dev);
1442 tp->dev = dev;
1443 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1444
1445 rc = pci_enable_device(pdev);
1446 if (rc < 0) {
1447 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1448 goto err_free_dev_1;
1449 }
1450
1451 rc = -ENODEV;
1452
1453 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1454 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1455 pci_name(pdev));
1456 goto err_pci_disable_2;
1457 }
1458 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1459 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1460 pci_name(pdev));
1461 goto err_pci_disable_2;
1462 }
1463
1464 rc = pci_request_regions(pdev, DRV_NAME);
1465 if (rc < 0) {
1466 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1467 pci_name(pdev));
1468 goto err_pci_disable_2;
1469 }
1470
1471 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1472 if (rc < 0) {
1473 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1474 pci_name(pdev));
1475 goto err_free_res_3;
1476 }
1477
1478 pci_set_master(pdev);
1479
1480 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1481 if (!ioaddr) {
1482 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1483 pci_name(pdev));
1484 rc = -EIO;
1485 goto err_free_res_3;
1486 }
1487
1488 tp->pci_dev = pdev;
1489 tp->mmio_addr = ioaddr;
1490
1491 sis190_irq_mask_and_ack(ioaddr);
1492
1493 sis190_soft_reset(ioaddr);
1494 out:
1495 return dev;
1496
1497 err_free_res_3:
1498 pci_release_regions(pdev);
1499 err_pci_disable_2:
1500 pci_disable_device(pdev);
1501 err_free_dev_1:
1502 free_netdev(dev);
1503 err_out_0:
1504 dev = ERR_PTR(rc);
1505 goto out;
1506 }
1507
1508 static void sis190_tx_timeout(struct net_device *dev)
1509 {
1510 struct sis190_private *tp = netdev_priv(dev);
1511 void __iomem *ioaddr = tp->mmio_addr;
1512 u8 tmp8;
1513
1514 /* Disable Tx, if not already */
1515 tmp8 = SIS_R8(TxControl);
1516 if (tmp8 & CmdTxEnb)
1517 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1518
1519
1520 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1521 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1522
1523 /* Disable interrupts by clearing the interrupt mask. */
1524 SIS_W32(IntrMask, 0x0000);
1525
1526 /* Stop a shared interrupt from scavenging while we are. */
1527 spin_lock_irq(&tp->lock);
1528 sis190_tx_clear(tp);
1529 spin_unlock_irq(&tp->lock);
1530
1531 /* ...and finally, reset everything. */
1532 sis190_hw_start(dev);
1533
1534 netif_wake_queue(dev);
1535 }
1536
1537 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1538 {
1539 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1540 }
1541
1542 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1543 struct net_device *dev)
1544 {
1545 struct sis190_private *tp = netdev_priv(dev);
1546 void __iomem *ioaddr = tp->mmio_addr;
1547 u16 sig;
1548 int i;
1549
1550 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1551 pci_name(pdev));
1552
1553 /* Check to see if there is a sane EEPROM */
1554 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1555
1556 if ((sig == 0xffff) || (sig == 0x0000)) {
1557 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1558 pci_name(pdev), sig);
1559 return -EIO;
1560 }
1561
1562 /* Get MAC address from EEPROM */
1563 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1564 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1565
1566 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1567 }
1568
1569 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1570
1571 return 0;
1572 }
1573
1574 /**
1575 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1576 * @pdev: PCI device
1577 * @dev: network device to get address for
1578 *
1579 * SiS96x model, use APC CMOS RAM to store MAC address.
1580 * APC CMOS RAM is accessed through ISA bridge.
1581 * MAC address is read into @net_dev->dev_addr.
1582 */
1583 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1584 struct net_device *dev)
1585 {
1586 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1587 struct sis190_private *tp = netdev_priv(dev);
1588 struct pci_dev *isa_bridge;
1589 u8 reg, tmp8;
1590 unsigned int i;
1591
1592 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1593 pci_name(pdev));
1594
1595 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1596 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1597 if (isa_bridge)
1598 break;
1599 }
1600
1601 if (!isa_bridge) {
1602 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1603 pci_name(pdev));
1604 return -EIO;
1605 }
1606
1607 /* Enable port 78h & 79h to access APC Registers. */
1608 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1609 reg = (tmp8 & ~0x02);
1610 pci_write_config_byte(isa_bridge, 0x48, reg);
1611 udelay(50);
1612 pci_read_config_byte(isa_bridge, 0x48, &reg);
1613
1614 for (i = 0; i < MAC_ADDR_LEN; i++) {
1615 outb(0x9 + i, 0x78);
1616 dev->dev_addr[i] = inb(0x79);
1617 }
1618
1619 outb(0x12, 0x78);
1620 reg = inb(0x79);
1621
1622 sis190_set_rgmii(tp, reg);
1623
1624 /* Restore the value to ISA Bridge */
1625 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1626 pci_dev_put(isa_bridge);
1627
1628 return 0;
1629 }
1630
1631 /**
1632 * sis190_init_rxfilter - Initialize the Rx filter
1633 * @dev: network device to initialize
1634 *
1635 * Set receive filter address to our MAC address
1636 * and enable packet filtering.
1637 */
1638 static inline void sis190_init_rxfilter(struct net_device *dev)
1639 {
1640 struct sis190_private *tp = netdev_priv(dev);
1641 void __iomem *ioaddr = tp->mmio_addr;
1642 u16 ctl;
1643 int i;
1644
1645 ctl = SIS_R16(RxMacControl);
1646 /*
1647 * Disable packet filtering before setting filter.
1648 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1649 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1650 */
1651 SIS_W16(RxMacControl, ctl & ~0x0f00);
1652
1653 for (i = 0; i < MAC_ADDR_LEN; i++)
1654 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1655
1656 SIS_W16(RxMacControl, ctl);
1657 SIS_PCI_COMMIT();
1658 }
1659
1660 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1661 struct net_device *dev)
1662 {
1663 int rc;
1664
1665 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1666 if (rc < 0) {
1667 u8 reg;
1668
1669 pci_read_config_byte(pdev, 0x73, &reg);
1670
1671 if (reg & 0x00000001)
1672 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1673 }
1674 return rc;
1675 }
1676
1677 static void sis190_set_speed_auto(struct net_device *dev)
1678 {
1679 struct sis190_private *tp = netdev_priv(dev);
1680 void __iomem *ioaddr = tp->mmio_addr;
1681 int phy_id = tp->mii_if.phy_id;
1682 int val;
1683
1684 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1685
1686 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1687
1688 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1689 // unchanged.
1690 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1691 ADVERTISE_100FULL | ADVERTISE_10FULL |
1692 ADVERTISE_100HALF | ADVERTISE_10HALF);
1693
1694 // Enable 1000 Full Mode.
1695 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1696
1697 // Enable auto-negotiation and restart auto-negotiation.
1698 mdio_write(ioaddr, phy_id, MII_BMCR,
1699 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1700 }
1701
1702 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1703 {
1704 struct sis190_private *tp = netdev_priv(dev);
1705
1706 return mii_ethtool_gset(&tp->mii_if, cmd);
1707 }
1708
1709 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1710 {
1711 struct sis190_private *tp = netdev_priv(dev);
1712
1713 return mii_ethtool_sset(&tp->mii_if, cmd);
1714 }
1715
1716 static void sis190_get_drvinfo(struct net_device *dev,
1717 struct ethtool_drvinfo *info)
1718 {
1719 struct sis190_private *tp = netdev_priv(dev);
1720
1721 strcpy(info->driver, DRV_NAME);
1722 strcpy(info->version, DRV_VERSION);
1723 strcpy(info->bus_info, pci_name(tp->pci_dev));
1724 }
1725
1726 static int sis190_get_regs_len(struct net_device *dev)
1727 {
1728 return SIS190_REGS_SIZE;
1729 }
1730
1731 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1732 void *p)
1733 {
1734 struct sis190_private *tp = netdev_priv(dev);
1735 unsigned long flags;
1736
1737 if (regs->len > SIS190_REGS_SIZE)
1738 regs->len = SIS190_REGS_SIZE;
1739
1740 spin_lock_irqsave(&tp->lock, flags);
1741 memcpy_fromio(p, tp->mmio_addr, regs->len);
1742 spin_unlock_irqrestore(&tp->lock, flags);
1743 }
1744
1745 static int sis190_nway_reset(struct net_device *dev)
1746 {
1747 struct sis190_private *tp = netdev_priv(dev);
1748
1749 return mii_nway_restart(&tp->mii_if);
1750 }
1751
1752 static u32 sis190_get_msglevel(struct net_device *dev)
1753 {
1754 struct sis190_private *tp = netdev_priv(dev);
1755
1756 return tp->msg_enable;
1757 }
1758
1759 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1760 {
1761 struct sis190_private *tp = netdev_priv(dev);
1762
1763 tp->msg_enable = value;
1764 }
1765
1766 static const struct ethtool_ops sis190_ethtool_ops = {
1767 .get_settings = sis190_get_settings,
1768 .set_settings = sis190_set_settings,
1769 .get_drvinfo = sis190_get_drvinfo,
1770 .get_regs_len = sis190_get_regs_len,
1771 .get_regs = sis190_get_regs,
1772 .get_link = ethtool_op_get_link,
1773 .get_msglevel = sis190_get_msglevel,
1774 .set_msglevel = sis190_set_msglevel,
1775 .nway_reset = sis190_nway_reset,
1776 };
1777
1778 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1779 {
1780 struct sis190_private *tp = netdev_priv(dev);
1781
1782 return !netif_running(dev) ? -EINVAL :
1783 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1784 }
1785
1786 static int __devinit sis190_init_one(struct pci_dev *pdev,
1787 const struct pci_device_id *ent)
1788 {
1789 static int printed_version = 0;
1790 struct sis190_private *tp;
1791 struct net_device *dev;
1792 void __iomem *ioaddr;
1793 int rc;
1794
1795 if (!printed_version) {
1796 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1797 printed_version = 1;
1798 }
1799
1800 dev = sis190_init_board(pdev);
1801 if (IS_ERR(dev)) {
1802 rc = PTR_ERR(dev);
1803 goto out;
1804 }
1805
1806 pci_set_drvdata(pdev, dev);
1807
1808 tp = netdev_priv(dev);
1809 ioaddr = tp->mmio_addr;
1810
1811 rc = sis190_get_mac_addr(pdev, dev);
1812 if (rc < 0)
1813 goto err_release_board;
1814
1815 sis190_init_rxfilter(dev);
1816
1817 INIT_WORK(&tp->phy_task, sis190_phy_task);
1818
1819 dev->open = sis190_open;
1820 dev->stop = sis190_close;
1821 dev->do_ioctl = sis190_ioctl;
1822 dev->tx_timeout = sis190_tx_timeout;
1823 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1824 dev->hard_start_xmit = sis190_start_xmit;
1825 #ifdef CONFIG_NET_POLL_CONTROLLER
1826 dev->poll_controller = sis190_netpoll;
1827 #endif
1828 dev->set_multicast_list = sis190_set_rx_mode;
1829 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1830 dev->irq = pdev->irq;
1831 dev->base_addr = (unsigned long) 0xdead;
1832
1833 spin_lock_init(&tp->lock);
1834
1835 rc = sis190_mii_probe(dev);
1836 if (rc < 0)
1837 goto err_release_board;
1838
1839 rc = register_netdev(dev);
1840 if (rc < 0)
1841 goto err_remove_mii;
1842
1843 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), %pM\n",
1844 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1845 ioaddr, dev->irq, dev->dev_addr);
1846
1847 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1848 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1849
1850 netif_carrier_off(dev);
1851
1852 sis190_set_speed_auto(dev);
1853 out:
1854 return rc;
1855
1856 err_remove_mii:
1857 sis190_mii_remove(dev);
1858 err_release_board:
1859 sis190_release_board(pdev);
1860 goto out;
1861 }
1862
1863 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1864 {
1865 struct net_device *dev = pci_get_drvdata(pdev);
1866
1867 sis190_mii_remove(dev);
1868 flush_scheduled_work();
1869 unregister_netdev(dev);
1870 sis190_release_board(pdev);
1871 pci_set_drvdata(pdev, NULL);
1872 }
1873
1874 static struct pci_driver sis190_pci_driver = {
1875 .name = DRV_NAME,
1876 .id_table = sis190_pci_tbl,
1877 .probe = sis190_init_one,
1878 .remove = __devexit_p(sis190_remove_one),
1879 };
1880
1881 static int __init sis190_init_module(void)
1882 {
1883 return pci_register_driver(&sis190_pci_driver);
1884 }
1885
1886 static void __exit sis190_cleanup_module(void)
1887 {
1888 pci_unregister_driver(&sis190_pci_driver);
1889 }
1890
1891 module_init(sis190_init_module);
1892 module_exit(sis190_cleanup_module);
This page took 0.078743 seconds and 5 git commands to generate.