WorkStruct: make allyesconfig
[deliverable/linux.git] / drivers / net / sis190.c
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
10
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
17
18 See the file COPYING in this distribution for more information.
19
20 */
21
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
34
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
49
50 #ifdef CONFIG_SIS190_NAPI
51 #define NAPI_SUFFIX "-NAPI"
52 #else
53 #define NAPI_SUFFIX ""
54 #endif
55
56 #define DRV_VERSION "1.2" NAPI_SUFFIX
57 #define DRV_NAME "sis190"
58 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
59 #define PFX DRV_NAME ": "
60
61 #ifdef CONFIG_SIS190_NAPI
62 #define sis190_rx_skb netif_receive_skb
63 #define sis190_rx_quota(count, quota) min(count, quota)
64 #else
65 #define sis190_rx_skb netif_rx
66 #define sis190_rx_quota(count, quota) count
67 #endif
68
69 #define MAC_ADDR_LEN 6
70
71 #define NUM_TX_DESC 64 /* [8..1024] */
72 #define NUM_RX_DESC 64 /* [8..8192] */
73 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
74 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
75 #define RX_BUF_SIZE 1536
76 #define RX_BUF_MASK 0xfff8
77
78 #define SIS190_REGS_SIZE 0x80
79 #define SIS190_TX_TIMEOUT (6*HZ)
80 #define SIS190_PHY_TIMEOUT (10*HZ)
81 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
83 NETIF_MSG_IFDOWN)
84
85 /* Enhanced PHY access register bit definitions */
86 #define EhnMIIread 0x0000
87 #define EhnMIIwrite 0x0020
88 #define EhnMIIdataShift 16
89 #define EhnMIIpmdShift 6 /* 7016 only */
90 #define EhnMIIregShift 11
91 #define EhnMIIreq 0x0010
92 #define EhnMIInotDone 0x0010
93
94 /* Write/read MMIO register */
95 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
96 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
97 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
98 #define SIS_R8(reg) readb (ioaddr + (reg))
99 #define SIS_R16(reg) readw (ioaddr + (reg))
100 #define SIS_R32(reg) readl (ioaddr + (reg))
101
102 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
103
104 enum sis190_registers {
105 TxControl = 0x00,
106 TxDescStartAddr = 0x04,
107 rsv0 = 0x08, // reserved
108 TxSts = 0x0c, // unused (Control/Status)
109 RxControl = 0x10,
110 RxDescStartAddr = 0x14,
111 rsv1 = 0x18, // reserved
112 RxSts = 0x1c, // unused
113 IntrStatus = 0x20,
114 IntrMask = 0x24,
115 IntrControl = 0x28,
116 IntrTimer = 0x2c, // unused (Interupt Timer)
117 PMControl = 0x30, // unused (Power Mgmt Control/Status)
118 rsv2 = 0x34, // reserved
119 ROMControl = 0x38,
120 ROMInterface = 0x3c,
121 StationControl = 0x40,
122 GMIIControl = 0x44,
123 GIoCR = 0x48, // unused (GMAC IO Compensation)
124 GIoCtrl = 0x4c, // unused (GMAC IO Control)
125 TxMacControl = 0x50,
126 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
127 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
128 rsv3 = 0x5c, // reserved
129 RxMacControl = 0x60,
130 RxMacAddr = 0x62,
131 RxHashTable = 0x68,
132 // Undocumented = 0x6c,
133 RxWolCtrl = 0x70,
134 RxWolData = 0x74, // unused (Rx WOL Data Access)
135 RxMPSControl = 0x78, // unused (Rx MPS Control)
136 rsv4 = 0x7c, // reserved
137 };
138
139 enum sis190_register_content {
140 /* IntrStatus */
141 SoftInt = 0x40000000, // unused
142 Timeup = 0x20000000, // unused
143 PauseFrame = 0x00080000, // unused
144 MagicPacket = 0x00040000, // unused
145 WakeupFrame = 0x00020000, // unused
146 LinkChange = 0x00010000,
147 RxQEmpty = 0x00000080,
148 RxQInt = 0x00000040,
149 TxQ1Empty = 0x00000020, // unused
150 TxQ1Int = 0x00000010,
151 TxQ0Empty = 0x00000008, // unused
152 TxQ0Int = 0x00000004,
153 RxHalt = 0x00000002,
154 TxHalt = 0x00000001,
155
156 /* {Rx/Tx}CmdBits */
157 CmdReset = 0x10,
158 CmdRxEnb = 0x08, // unused
159 CmdTxEnb = 0x01,
160 RxBufEmpty = 0x01, // unused
161
162 /* Cfg9346Bits */
163 Cfg9346_Lock = 0x00, // unused
164 Cfg9346_Unlock = 0xc0, // unused
165
166 /* RxMacControl */
167 AcceptErr = 0x20, // unused
168 AcceptRunt = 0x10, // unused
169 AcceptBroadcast = 0x0800,
170 AcceptMulticast = 0x0400,
171 AcceptMyPhys = 0x0200,
172 AcceptAllPhys = 0x0100,
173
174 /* RxConfigBits */
175 RxCfgFIFOShift = 13,
176 RxCfgDMAShift = 8, // 0x1a in RxControl ?
177
178 /* TxConfigBits */
179 TxInterFrameGapShift = 24,
180 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
181
182 LinkStatus = 0x02, // unused
183 FullDup = 0x01, // unused
184
185 /* TBICSRBit */
186 TBILinkOK = 0x02000000, // unused
187 };
188
189 struct TxDesc {
190 __le32 PSize;
191 __le32 status;
192 __le32 addr;
193 __le32 size;
194 };
195
196 struct RxDesc {
197 __le32 PSize;
198 __le32 status;
199 __le32 addr;
200 __le32 size;
201 };
202
203 enum _DescStatusBit {
204 /* _Desc.status */
205 OWNbit = 0x80000000, // RXOWN/TXOWN
206 INTbit = 0x40000000, // RXINT/TXINT
207 CRCbit = 0x00020000, // CRCOFF/CRCEN
208 PADbit = 0x00010000, // PREADD/PADEN
209 /* _Desc.size */
210 RingEnd = 0x80000000,
211 /* TxDesc.status */
212 LSEN = 0x08000000, // TSO ? -- FR
213 IPCS = 0x04000000,
214 TCPCS = 0x02000000,
215 UDPCS = 0x01000000,
216 BSTEN = 0x00800000,
217 EXTEN = 0x00400000,
218 DEFEN = 0x00200000,
219 BKFEN = 0x00100000,
220 CRSEN = 0x00080000,
221 COLEN = 0x00040000,
222 THOL3 = 0x30000000,
223 THOL2 = 0x20000000,
224 THOL1 = 0x10000000,
225 THOL0 = 0x00000000,
226 /* RxDesc.status */
227 IPON = 0x20000000,
228 TCPON = 0x10000000,
229 UDPON = 0x08000000,
230 Wakup = 0x00400000,
231 Magic = 0x00200000,
232 Pause = 0x00100000,
233 DEFbit = 0x00200000,
234 BCAST = 0x000c0000,
235 MCAST = 0x00080000,
236 UCAST = 0x00040000,
237 /* RxDesc.PSize */
238 TAGON = 0x80000000,
239 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
240 ABORT = 0x00800000,
241 SHORT = 0x00400000,
242 LIMIT = 0x00200000,
243 MIIER = 0x00100000,
244 OVRUN = 0x00080000,
245 NIBON = 0x00040000,
246 COLON = 0x00020000,
247 CRCOK = 0x00010000,
248 RxSizeMask = 0x0000ffff
249 /*
250 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
251 * provide two (unused with Linux) Tx queues. No publically
252 * available documentation alas.
253 */
254 };
255
256 enum sis190_eeprom_access_register_bits {
257 EECS = 0x00000001, // unused
258 EECLK = 0x00000002, // unused
259 EEDO = 0x00000008, // unused
260 EEDI = 0x00000004, // unused
261 EEREQ = 0x00000080,
262 EEROP = 0x00000200,
263 EEWOP = 0x00000100 // unused
264 };
265
266 /* EEPROM Addresses */
267 enum sis190_eeprom_address {
268 EEPROMSignature = 0x00,
269 EEPROMCLK = 0x01, // unused
270 EEPROMInfo = 0x02,
271 EEPROMMACAddr = 0x03
272 };
273
274 enum sis190_feature {
275 F_HAS_RGMII = 1,
276 F_PHY_88E1111 = 2,
277 F_PHY_BCM5461 = 4
278 };
279
280 struct sis190_private {
281 void __iomem *mmio_addr;
282 struct pci_dev *pci_dev;
283 struct net_device *dev;
284 struct net_device_stats stats;
285 spinlock_t lock;
286 u32 rx_buf_sz;
287 u32 cur_rx;
288 u32 cur_tx;
289 u32 dirty_rx;
290 u32 dirty_tx;
291 dma_addr_t rx_dma;
292 dma_addr_t tx_dma;
293 struct RxDesc *RxDescRing;
294 struct TxDesc *TxDescRing;
295 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
296 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
297 struct work_struct phy_task;
298 struct timer_list timer;
299 u32 msg_enable;
300 struct mii_if_info mii_if;
301 struct list_head first_phy;
302 u32 features;
303 };
304
305 struct sis190_phy {
306 struct list_head list;
307 int phy_id;
308 u16 id[2];
309 u16 status;
310 u8 type;
311 };
312
313 enum sis190_phy_type {
314 UNKNOWN = 0x00,
315 HOME = 0x01,
316 LAN = 0x02,
317 MIX = 0x03
318 };
319
320 static struct mii_chip_info {
321 const char *name;
322 u16 id[2];
323 unsigned int type;
324 u32 feature;
325 } mii_chip_table[] = {
326 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
327 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
328 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
329 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
330 { NULL, }
331 };
332
333 static const struct {
334 const char *name;
335 } sis_chip_info[] = {
336 { "SiS 190 PCI Fast Ethernet adapter" },
337 { "SiS 191 PCI Gigabit Ethernet adapter" },
338 };
339
340 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
341 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
342 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
343 { 0, },
344 };
345
346 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
347
348 static int rx_copybreak = 200;
349
350 static struct {
351 u32 msg_enable;
352 } debug = { -1 };
353
354 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
355 module_param(rx_copybreak, int, 0);
356 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
357 module_param_named(debug, debug.msg_enable, int, 0);
358 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
359 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
360 MODULE_VERSION(DRV_VERSION);
361 MODULE_LICENSE("GPL");
362
363 static const u32 sis190_intr_mask =
364 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
365
366 /*
367 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
368 * The chips use a 64 element hash table based on the Ethernet CRC.
369 */
370 static const int multicast_filter_limit = 32;
371
372 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
373 {
374 unsigned int i;
375
376 SIS_W32(GMIIControl, ctl);
377
378 msleep(1);
379
380 for (i = 0; i < 100; i++) {
381 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
382 break;
383 msleep(1);
384 }
385
386 if (i > 999)
387 printk(KERN_ERR PFX "PHY command failed !\n");
388 }
389
390 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
391 {
392 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
393 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
394 (((u32) val) << EhnMIIdataShift));
395 }
396
397 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
398 {
399 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
400 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
401
402 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
403 }
404
405 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
406 {
407 struct sis190_private *tp = netdev_priv(dev);
408
409 mdio_write(tp->mmio_addr, phy_id, reg, val);
410 }
411
412 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
413 {
414 struct sis190_private *tp = netdev_priv(dev);
415
416 return mdio_read(tp->mmio_addr, phy_id, reg);
417 }
418
419 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
420 {
421 mdio_read(ioaddr, phy_id, reg);
422 return mdio_read(ioaddr, phy_id, reg);
423 }
424
425 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
426 {
427 u16 data = 0xffff;
428 unsigned int i;
429
430 if (!(SIS_R32(ROMControl) & 0x0002))
431 return 0;
432
433 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
434
435 for (i = 0; i < 200; i++) {
436 if (!(SIS_R32(ROMInterface) & EEREQ)) {
437 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
438 break;
439 }
440 msleep(1);
441 }
442
443 return data;
444 }
445
446 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
447 {
448 SIS_W32(IntrMask, 0x00);
449 SIS_W32(IntrStatus, 0xffffffff);
450 SIS_PCI_COMMIT();
451 }
452
453 static void sis190_asic_down(void __iomem *ioaddr)
454 {
455 /* Stop the chip's Tx and Rx DMA processes. */
456
457 SIS_W32(TxControl, 0x1a00);
458 SIS_W32(RxControl, 0x1a00);
459
460 sis190_irq_mask_and_ack(ioaddr);
461 }
462
463 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
464 {
465 desc->size |= cpu_to_le32(RingEnd);
466 }
467
468 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
469 {
470 u32 eor = le32_to_cpu(desc->size) & RingEnd;
471
472 desc->PSize = 0x0;
473 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
474 wmb();
475 desc->status = cpu_to_le32(OWNbit | INTbit);
476 }
477
478 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
479 u32 rx_buf_sz)
480 {
481 desc->addr = cpu_to_le32(mapping);
482 sis190_give_to_asic(desc, rx_buf_sz);
483 }
484
485 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
486 {
487 desc->PSize = 0x0;
488 desc->addr = 0xdeadbeef;
489 desc->size &= cpu_to_le32(RingEnd);
490 wmb();
491 desc->status = 0x0;
492 }
493
494 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
495 struct RxDesc *desc, u32 rx_buf_sz)
496 {
497 struct sk_buff *skb;
498 dma_addr_t mapping;
499 int ret = 0;
500
501 skb = dev_alloc_skb(rx_buf_sz);
502 if (!skb)
503 goto err_out;
504
505 *sk_buff = skb;
506
507 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
508 PCI_DMA_FROMDEVICE);
509
510 sis190_map_to_asic(desc, mapping, rx_buf_sz);
511 out:
512 return ret;
513
514 err_out:
515 ret = -ENOMEM;
516 sis190_make_unusable_by_asic(desc);
517 goto out;
518 }
519
520 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
521 u32 start, u32 end)
522 {
523 u32 cur;
524
525 for (cur = start; cur < end; cur++) {
526 int ret, i = cur % NUM_RX_DESC;
527
528 if (tp->Rx_skbuff[i])
529 continue;
530
531 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
532 tp->RxDescRing + i, tp->rx_buf_sz);
533 if (ret < 0)
534 break;
535 }
536 return cur - start;
537 }
538
539 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
540 struct RxDesc *desc, int rx_buf_sz)
541 {
542 int ret = -1;
543
544 if (pkt_size < rx_copybreak) {
545 struct sk_buff *skb;
546
547 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
548 if (skb) {
549 skb_reserve(skb, NET_IP_ALIGN);
550 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
551 *sk_buff = skb;
552 sis190_give_to_asic(desc, rx_buf_sz);
553 ret = 0;
554 }
555 }
556 return ret;
557 }
558
559 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
560 {
561 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
562
563 if ((status & CRCOK) && !(status & ErrMask))
564 return 0;
565
566 if (!(status & CRCOK))
567 stats->rx_crc_errors++;
568 else if (status & OVRUN)
569 stats->rx_over_errors++;
570 else if (status & (SHORT | LIMIT))
571 stats->rx_length_errors++;
572 else if (status & (MIIER | NIBON | COLON))
573 stats->rx_frame_errors++;
574
575 stats->rx_errors++;
576 return -1;
577 }
578
579 static int sis190_rx_interrupt(struct net_device *dev,
580 struct sis190_private *tp, void __iomem *ioaddr)
581 {
582 struct net_device_stats *stats = &tp->stats;
583 u32 rx_left, cur_rx = tp->cur_rx;
584 u32 delta, count;
585
586 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
587 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
588
589 for (; rx_left > 0; rx_left--, cur_rx++) {
590 unsigned int entry = cur_rx % NUM_RX_DESC;
591 struct RxDesc *desc = tp->RxDescRing + entry;
592 u32 status;
593
594 if (desc->status & OWNbit)
595 break;
596
597 status = le32_to_cpu(desc->PSize);
598
599 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
600 // status);
601
602 if (sis190_rx_pkt_err(status, stats) < 0)
603 sis190_give_to_asic(desc, tp->rx_buf_sz);
604 else {
605 struct sk_buff *skb = tp->Rx_skbuff[entry];
606 int pkt_size = (status & RxSizeMask) - 4;
607 void (*pci_action)(struct pci_dev *, dma_addr_t,
608 size_t, int) = pci_dma_sync_single_for_device;
609
610 if (unlikely(pkt_size > tp->rx_buf_sz)) {
611 net_intr(tp, KERN_INFO
612 "%s: (frag) status = %08x.\n",
613 dev->name, status);
614 stats->rx_dropped++;
615 stats->rx_length_errors++;
616 sis190_give_to_asic(desc, tp->rx_buf_sz);
617 continue;
618 }
619
620 pci_dma_sync_single_for_cpu(tp->pci_dev,
621 le32_to_cpu(desc->addr), tp->rx_buf_sz,
622 PCI_DMA_FROMDEVICE);
623
624 if (sis190_try_rx_copy(&skb, pkt_size, desc,
625 tp->rx_buf_sz)) {
626 pci_action = pci_unmap_single;
627 tp->Rx_skbuff[entry] = NULL;
628 sis190_make_unusable_by_asic(desc);
629 }
630
631 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
632 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
633
634 skb->dev = dev;
635 skb_put(skb, pkt_size);
636 skb->protocol = eth_type_trans(skb, dev);
637
638 sis190_rx_skb(skb);
639
640 dev->last_rx = jiffies;
641 stats->rx_packets++;
642 stats->rx_bytes += pkt_size;
643 if ((status & BCAST) == MCAST)
644 stats->multicast++;
645 }
646 }
647 count = cur_rx - tp->cur_rx;
648 tp->cur_rx = cur_rx;
649
650 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
651 if (!delta && count && netif_msg_intr(tp))
652 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
653 tp->dirty_rx += delta;
654
655 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
656 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
657
658 return count;
659 }
660
661 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
662 struct TxDesc *desc)
663 {
664 unsigned int len;
665
666 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
667
668 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
669
670 memset(desc, 0x00, sizeof(*desc));
671 }
672
673 static void sis190_tx_interrupt(struct net_device *dev,
674 struct sis190_private *tp, void __iomem *ioaddr)
675 {
676 u32 pending, dirty_tx = tp->dirty_tx;
677 /*
678 * It would not be needed if queueing was allowed to be enabled
679 * again too early (hint: think preempt and unclocked smp systems).
680 */
681 unsigned int queue_stopped;
682
683 smp_rmb();
684 pending = tp->cur_tx - dirty_tx;
685 queue_stopped = (pending == NUM_TX_DESC);
686
687 for (; pending; pending--, dirty_tx++) {
688 unsigned int entry = dirty_tx % NUM_TX_DESC;
689 struct TxDesc *txd = tp->TxDescRing + entry;
690 struct sk_buff *skb;
691
692 if (le32_to_cpu(txd->status) & OWNbit)
693 break;
694
695 skb = tp->Tx_skbuff[entry];
696
697 tp->stats.tx_packets++;
698 tp->stats.tx_bytes += skb->len;
699
700 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
701 tp->Tx_skbuff[entry] = NULL;
702 dev_kfree_skb_irq(skb);
703 }
704
705 if (tp->dirty_tx != dirty_tx) {
706 tp->dirty_tx = dirty_tx;
707 smp_wmb();
708 if (queue_stopped)
709 netif_wake_queue(dev);
710 }
711 }
712
713 /*
714 * The interrupt handler does all of the Rx thread work and cleans up after
715 * the Tx thread.
716 */
717 static irqreturn_t sis190_interrupt(int irq, void *__dev)
718 {
719 struct net_device *dev = __dev;
720 struct sis190_private *tp = netdev_priv(dev);
721 void __iomem *ioaddr = tp->mmio_addr;
722 unsigned int handled = 0;
723 u32 status;
724
725 status = SIS_R32(IntrStatus);
726
727 if ((status == 0xffffffff) || !status)
728 goto out;
729
730 handled = 1;
731
732 if (unlikely(!netif_running(dev))) {
733 sis190_asic_down(ioaddr);
734 goto out;
735 }
736
737 SIS_W32(IntrStatus, status);
738
739 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
740
741 if (status & LinkChange) {
742 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
743 schedule_work(&tp->phy_task);
744 }
745
746 if (status & RxQInt)
747 sis190_rx_interrupt(dev, tp, ioaddr);
748
749 if (status & TxQ0Int)
750 sis190_tx_interrupt(dev, tp, ioaddr);
751 out:
752 return IRQ_RETVAL(handled);
753 }
754
755 #ifdef CONFIG_NET_POLL_CONTROLLER
756 static void sis190_netpoll(struct net_device *dev)
757 {
758 struct sis190_private *tp = netdev_priv(dev);
759 struct pci_dev *pdev = tp->pci_dev;
760
761 disable_irq(pdev->irq);
762 sis190_interrupt(pdev->irq, dev);
763 enable_irq(pdev->irq);
764 }
765 #endif
766
767 static void sis190_free_rx_skb(struct sis190_private *tp,
768 struct sk_buff **sk_buff, struct RxDesc *desc)
769 {
770 struct pci_dev *pdev = tp->pci_dev;
771
772 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
773 PCI_DMA_FROMDEVICE);
774 dev_kfree_skb(*sk_buff);
775 *sk_buff = NULL;
776 sis190_make_unusable_by_asic(desc);
777 }
778
779 static void sis190_rx_clear(struct sis190_private *tp)
780 {
781 unsigned int i;
782
783 for (i = 0; i < NUM_RX_DESC; i++) {
784 if (!tp->Rx_skbuff[i])
785 continue;
786 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
787 }
788 }
789
790 static void sis190_init_ring_indexes(struct sis190_private *tp)
791 {
792 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
793 }
794
795 static int sis190_init_ring(struct net_device *dev)
796 {
797 struct sis190_private *tp = netdev_priv(dev);
798
799 sis190_init_ring_indexes(tp);
800
801 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
802 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
803
804 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
805 goto err_rx_clear;
806
807 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
808
809 return 0;
810
811 err_rx_clear:
812 sis190_rx_clear(tp);
813 return -ENOMEM;
814 }
815
816 static void sis190_set_rx_mode(struct net_device *dev)
817 {
818 struct sis190_private *tp = netdev_priv(dev);
819 void __iomem *ioaddr = tp->mmio_addr;
820 unsigned long flags;
821 u32 mc_filter[2]; /* Multicast hash filter */
822 u16 rx_mode;
823
824 if (dev->flags & IFF_PROMISC) {
825 rx_mode =
826 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
827 AcceptAllPhys;
828 mc_filter[1] = mc_filter[0] = 0xffffffff;
829 } else if ((dev->mc_count > multicast_filter_limit) ||
830 (dev->flags & IFF_ALLMULTI)) {
831 /* Too many to filter perfectly -- accept all multicasts. */
832 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
833 mc_filter[1] = mc_filter[0] = 0xffffffff;
834 } else {
835 struct dev_mc_list *mclist;
836 unsigned int i;
837
838 rx_mode = AcceptBroadcast | AcceptMyPhys;
839 mc_filter[1] = mc_filter[0] = 0;
840 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
841 i++, mclist = mclist->next) {
842 int bit_nr =
843 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
844 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
845 rx_mode |= AcceptMulticast;
846 }
847 }
848
849 spin_lock_irqsave(&tp->lock, flags);
850
851 SIS_W16(RxMacControl, rx_mode | 0x2);
852 SIS_W32(RxHashTable, mc_filter[0]);
853 SIS_W32(RxHashTable + 4, mc_filter[1]);
854
855 spin_unlock_irqrestore(&tp->lock, flags);
856 }
857
858 static void sis190_soft_reset(void __iomem *ioaddr)
859 {
860 SIS_W32(IntrControl, 0x8000);
861 SIS_PCI_COMMIT();
862 msleep(1);
863 SIS_W32(IntrControl, 0x0);
864 sis190_asic_down(ioaddr);
865 msleep(1);
866 }
867
868 static void sis190_hw_start(struct net_device *dev)
869 {
870 struct sis190_private *tp = netdev_priv(dev);
871 void __iomem *ioaddr = tp->mmio_addr;
872
873 sis190_soft_reset(ioaddr);
874
875 SIS_W32(TxDescStartAddr, tp->tx_dma);
876 SIS_W32(RxDescStartAddr, tp->rx_dma);
877
878 SIS_W32(IntrStatus, 0xffffffff);
879 SIS_W32(IntrMask, 0x0);
880 SIS_W32(GMIIControl, 0x0);
881 SIS_W32(TxMacControl, 0x60);
882 SIS_W16(RxMacControl, 0x02);
883 SIS_W32(RxHashTable, 0x0);
884 SIS_W32(0x6c, 0x0);
885 SIS_W32(RxWolCtrl, 0x0);
886 SIS_W32(RxWolData, 0x0);
887
888 SIS_PCI_COMMIT();
889
890 sis190_set_rx_mode(dev);
891
892 /* Enable all known interrupts by setting the interrupt mask. */
893 SIS_W32(IntrMask, sis190_intr_mask);
894
895 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
896 SIS_W32(RxControl, 0x1a1d);
897
898 netif_start_queue(dev);
899 }
900
901 static void sis190_phy_task(struct work_struct *work)
902 {
903 struct sis190_private *tp =
904 container_of(work, struct sis190_private, phy_task);
905 struct net_device *dev = tp->dev;
906 void __iomem *ioaddr = tp->mmio_addr;
907 int phy_id = tp->mii_if.phy_id;
908 u16 val;
909
910 rtnl_lock();
911
912 val = mdio_read(ioaddr, phy_id, MII_BMCR);
913 if (val & BMCR_RESET) {
914 // FIXME: needlessly high ? -- FR 02/07/2005
915 mod_timer(&tp->timer, jiffies + HZ/10);
916 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
917 BMSR_ANEGCOMPLETE)) {
918 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
919 dev->name);
920 netif_carrier_off(dev);
921 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
922 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
923 } else {
924 /* Rejoice ! */
925 struct {
926 int val;
927 u32 ctl;
928 const char *msg;
929 } reg31[] = {
930 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
931 "1000 Mbps Full Duplex" },
932 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
933 "1000 Mbps Half Duplex" },
934 { LPA_100FULL, 0x04000800 | 0x00001000,
935 "100 Mbps Full Duplex" },
936 { LPA_100HALF, 0x04000800,
937 "100 Mbps Half Duplex" },
938 { LPA_10FULL, 0x04000400 | 0x00001000,
939 "10 Mbps Full Duplex" },
940 { LPA_10HALF, 0x04000400,
941 "10 Mbps Half Duplex" },
942 { 0, 0x04000400, "unknown" }
943 }, *p;
944 u16 adv;
945
946 val = mdio_read(ioaddr, phy_id, 0x1f);
947 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
948
949 val = mdio_read(ioaddr, phy_id, MII_LPA);
950 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
951 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
952 dev->name, val, adv);
953
954 val &= adv;
955
956 for (p = reg31; p->val; p++) {
957 if ((val & p->val) == p->val)
958 break;
959 }
960
961 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
962
963 if ((tp->features & F_HAS_RGMII) &&
964 (tp->features & F_PHY_BCM5461)) {
965 // Set Tx Delay in RGMII mode.
966 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
967 udelay(200);
968 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
969 p->ctl |= 0x03000000;
970 }
971
972 SIS_W32(StationControl, p->ctl);
973
974 if (tp->features & F_HAS_RGMII) {
975 SIS_W32(RGDelay, 0x0441);
976 SIS_W32(RGDelay, 0x0440);
977 }
978
979 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
980 p->msg);
981 netif_carrier_on(dev);
982 }
983
984 rtnl_unlock();
985 }
986
987 static void sis190_phy_timer(unsigned long __opaque)
988 {
989 struct net_device *dev = (struct net_device *)__opaque;
990 struct sis190_private *tp = netdev_priv(dev);
991
992 if (likely(netif_running(dev)))
993 schedule_work(&tp->phy_task);
994 }
995
996 static inline void sis190_delete_timer(struct net_device *dev)
997 {
998 struct sis190_private *tp = netdev_priv(dev);
999
1000 del_timer_sync(&tp->timer);
1001 }
1002
1003 static inline void sis190_request_timer(struct net_device *dev)
1004 {
1005 struct sis190_private *tp = netdev_priv(dev);
1006 struct timer_list *timer = &tp->timer;
1007
1008 init_timer(timer);
1009 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1010 timer->data = (unsigned long)dev;
1011 timer->function = sis190_phy_timer;
1012 add_timer(timer);
1013 }
1014
1015 static void sis190_set_rxbufsize(struct sis190_private *tp,
1016 struct net_device *dev)
1017 {
1018 unsigned int mtu = dev->mtu;
1019
1020 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1021 /* RxDesc->size has a licence to kill the lower bits */
1022 if (tp->rx_buf_sz & 0x07) {
1023 tp->rx_buf_sz += 8;
1024 tp->rx_buf_sz &= RX_BUF_MASK;
1025 }
1026 }
1027
1028 static int sis190_open(struct net_device *dev)
1029 {
1030 struct sis190_private *tp = netdev_priv(dev);
1031 struct pci_dev *pdev = tp->pci_dev;
1032 int rc = -ENOMEM;
1033
1034 sis190_set_rxbufsize(tp, dev);
1035
1036 /*
1037 * Rx and Tx descriptors need 256 bytes alignment.
1038 * pci_alloc_consistent() guarantees a stronger alignment.
1039 */
1040 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1041 if (!tp->TxDescRing)
1042 goto out;
1043
1044 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1045 if (!tp->RxDescRing)
1046 goto err_free_tx_0;
1047
1048 rc = sis190_init_ring(dev);
1049 if (rc < 0)
1050 goto err_free_rx_1;
1051
1052 INIT_WORK(&tp->phy_task, sis190_phy_task);
1053
1054 sis190_request_timer(dev);
1055
1056 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1057 if (rc < 0)
1058 goto err_release_timer_2;
1059
1060 sis190_hw_start(dev);
1061 out:
1062 return rc;
1063
1064 err_release_timer_2:
1065 sis190_delete_timer(dev);
1066 sis190_rx_clear(tp);
1067 err_free_rx_1:
1068 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1069 tp->rx_dma);
1070 err_free_tx_0:
1071 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1072 tp->tx_dma);
1073 goto out;
1074 }
1075
1076 static void sis190_tx_clear(struct sis190_private *tp)
1077 {
1078 unsigned int i;
1079
1080 for (i = 0; i < NUM_TX_DESC; i++) {
1081 struct sk_buff *skb = tp->Tx_skbuff[i];
1082
1083 if (!skb)
1084 continue;
1085
1086 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1087 tp->Tx_skbuff[i] = NULL;
1088 dev_kfree_skb(skb);
1089
1090 tp->stats.tx_dropped++;
1091 }
1092 tp->cur_tx = tp->dirty_tx = 0;
1093 }
1094
1095 static void sis190_down(struct net_device *dev)
1096 {
1097 struct sis190_private *tp = netdev_priv(dev);
1098 void __iomem *ioaddr = tp->mmio_addr;
1099 unsigned int poll_locked = 0;
1100
1101 sis190_delete_timer(dev);
1102
1103 netif_stop_queue(dev);
1104
1105 flush_scheduled_work();
1106
1107 do {
1108 spin_lock_irq(&tp->lock);
1109
1110 sis190_asic_down(ioaddr);
1111
1112 spin_unlock_irq(&tp->lock);
1113
1114 synchronize_irq(dev->irq);
1115
1116 if (!poll_locked) {
1117 netif_poll_disable(dev);
1118 poll_locked++;
1119 }
1120
1121 synchronize_sched();
1122
1123 } while (SIS_R32(IntrMask));
1124
1125 sis190_tx_clear(tp);
1126 sis190_rx_clear(tp);
1127 }
1128
1129 static int sis190_close(struct net_device *dev)
1130 {
1131 struct sis190_private *tp = netdev_priv(dev);
1132 struct pci_dev *pdev = tp->pci_dev;
1133
1134 sis190_down(dev);
1135
1136 free_irq(dev->irq, dev);
1137
1138 netif_poll_enable(dev);
1139
1140 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1141 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1142
1143 tp->TxDescRing = NULL;
1144 tp->RxDescRing = NULL;
1145
1146 return 0;
1147 }
1148
1149 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1150 {
1151 struct sis190_private *tp = netdev_priv(dev);
1152 void __iomem *ioaddr = tp->mmio_addr;
1153 u32 len, entry, dirty_tx;
1154 struct TxDesc *desc;
1155 dma_addr_t mapping;
1156
1157 if (unlikely(skb->len < ETH_ZLEN)) {
1158 if (skb_padto(skb, ETH_ZLEN)) {
1159 tp->stats.tx_dropped++;
1160 goto out;
1161 }
1162 len = ETH_ZLEN;
1163 } else {
1164 len = skb->len;
1165 }
1166
1167 entry = tp->cur_tx % NUM_TX_DESC;
1168 desc = tp->TxDescRing + entry;
1169
1170 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1171 netif_stop_queue(dev);
1172 net_tx_err(tp, KERN_ERR PFX
1173 "%s: BUG! Tx Ring full when queue awake!\n",
1174 dev->name);
1175 return NETDEV_TX_BUSY;
1176 }
1177
1178 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1179
1180 tp->Tx_skbuff[entry] = skb;
1181
1182 desc->PSize = cpu_to_le32(len);
1183 desc->addr = cpu_to_le32(mapping);
1184
1185 desc->size = cpu_to_le32(len);
1186 if (entry == (NUM_TX_DESC - 1))
1187 desc->size |= cpu_to_le32(RingEnd);
1188
1189 wmb();
1190
1191 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1192
1193 tp->cur_tx++;
1194
1195 smp_wmb();
1196
1197 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1198
1199 dev->trans_start = jiffies;
1200
1201 dirty_tx = tp->dirty_tx;
1202 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1203 netif_stop_queue(dev);
1204 smp_rmb();
1205 if (dirty_tx != tp->dirty_tx)
1206 netif_wake_queue(dev);
1207 }
1208 out:
1209 return NETDEV_TX_OK;
1210 }
1211
1212 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1213 {
1214 struct sis190_private *tp = netdev_priv(dev);
1215
1216 return &tp->stats;
1217 }
1218
1219 static void sis190_free_phy(struct list_head *first_phy)
1220 {
1221 struct sis190_phy *cur, *next;
1222
1223 list_for_each_entry_safe(cur, next, first_phy, list) {
1224 kfree(cur);
1225 }
1226 }
1227
1228 /**
1229 * sis190_default_phy - Select default PHY for sis190 mac.
1230 * @dev: the net device to probe for
1231 *
1232 * Select first detected PHY with link as default.
1233 * If no one is link on, select PHY whose types is HOME as default.
1234 * If HOME doesn't exist, select LAN.
1235 */
1236 static u16 sis190_default_phy(struct net_device *dev)
1237 {
1238 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1239 struct sis190_private *tp = netdev_priv(dev);
1240 struct mii_if_info *mii_if = &tp->mii_if;
1241 void __iomem *ioaddr = tp->mmio_addr;
1242 u16 status;
1243
1244 phy_home = phy_default = phy_lan = NULL;
1245
1246 list_for_each_entry(phy, &tp->first_phy, list) {
1247 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1248
1249 // Link ON & Not select default PHY & not ghost PHY.
1250 if ((status & BMSR_LSTATUS) &&
1251 !phy_default &&
1252 (phy->type != UNKNOWN)) {
1253 phy_default = phy;
1254 } else {
1255 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1256 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1257 status | BMCR_ANENABLE | BMCR_ISOLATE);
1258 if (phy->type == HOME)
1259 phy_home = phy;
1260 else if (phy->type == LAN)
1261 phy_lan = phy;
1262 }
1263 }
1264
1265 if (!phy_default) {
1266 if (phy_home)
1267 phy_default = phy_home;
1268 else if (phy_lan)
1269 phy_default = phy_lan;
1270 else
1271 phy_default = list_entry(&tp->first_phy,
1272 struct sis190_phy, list);
1273 }
1274
1275 if (mii_if->phy_id != phy_default->phy_id) {
1276 mii_if->phy_id = phy_default->phy_id;
1277 net_probe(tp, KERN_INFO
1278 "%s: Using transceiver at address %d as default.\n",
1279 pci_name(tp->pci_dev), mii_if->phy_id);
1280 }
1281
1282 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1283 status &= (~BMCR_ISOLATE);
1284
1285 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1286 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1287
1288 return status;
1289 }
1290
1291 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1292 struct sis190_phy *phy, unsigned int phy_id,
1293 u16 mii_status)
1294 {
1295 void __iomem *ioaddr = tp->mmio_addr;
1296 struct mii_chip_info *p;
1297
1298 INIT_LIST_HEAD(&phy->list);
1299 phy->status = mii_status;
1300 phy->phy_id = phy_id;
1301
1302 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1303 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1304
1305 for (p = mii_chip_table; p->type; p++) {
1306 if ((p->id[0] == phy->id[0]) &&
1307 (p->id[1] == (phy->id[1] & 0xfff0))) {
1308 break;
1309 }
1310 }
1311
1312 if (p->id[1]) {
1313 phy->type = (p->type == MIX) ?
1314 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1315 LAN : HOME) : p->type;
1316 tp->features |= p->feature;
1317 } else
1318 phy->type = UNKNOWN;
1319
1320 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1321 pci_name(tp->pci_dev),
1322 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1323 }
1324
1325 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1326 {
1327 if (tp->features & F_PHY_88E1111) {
1328 void __iomem *ioaddr = tp->mmio_addr;
1329 int phy_id = tp->mii_if.phy_id;
1330 u16 reg[2][2] = {
1331 { 0x808b, 0x0ce1 },
1332 { 0x808f, 0x0c60 }
1333 }, *p;
1334
1335 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1336
1337 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1338 udelay(200);
1339 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1340 udelay(200);
1341 }
1342 }
1343
1344 /**
1345 * sis190_mii_probe - Probe MII PHY for sis190
1346 * @dev: the net device to probe for
1347 *
1348 * Search for total of 32 possible mii phy addresses.
1349 * Identify and set current phy if found one,
1350 * return error if it failed to found.
1351 */
1352 static int __devinit sis190_mii_probe(struct net_device *dev)
1353 {
1354 struct sis190_private *tp = netdev_priv(dev);
1355 struct mii_if_info *mii_if = &tp->mii_if;
1356 void __iomem *ioaddr = tp->mmio_addr;
1357 int phy_id;
1358 int rc = 0;
1359
1360 INIT_LIST_HEAD(&tp->first_phy);
1361
1362 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1363 struct sis190_phy *phy;
1364 u16 status;
1365
1366 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1367
1368 // Try next mii if the current one is not accessible.
1369 if (status == 0xffff || status == 0x0000)
1370 continue;
1371
1372 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1373 if (!phy) {
1374 sis190_free_phy(&tp->first_phy);
1375 rc = -ENOMEM;
1376 goto out;
1377 }
1378
1379 sis190_init_phy(dev, tp, phy, phy_id, status);
1380
1381 list_add(&tp->first_phy, &phy->list);
1382 }
1383
1384 if (list_empty(&tp->first_phy)) {
1385 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1386 pci_name(tp->pci_dev));
1387 rc = -EIO;
1388 goto out;
1389 }
1390
1391 /* Select default PHY for mac */
1392 sis190_default_phy(dev);
1393
1394 sis190_mii_probe_88e1111_fixup(tp);
1395
1396 mii_if->dev = dev;
1397 mii_if->mdio_read = __mdio_read;
1398 mii_if->mdio_write = __mdio_write;
1399 mii_if->phy_id_mask = PHY_ID_ANY;
1400 mii_if->reg_num_mask = MII_REG_ANY;
1401 out:
1402 return rc;
1403 }
1404
1405 static void __devexit sis190_mii_remove(struct net_device *dev)
1406 {
1407 struct sis190_private *tp = netdev_priv(dev);
1408
1409 sis190_free_phy(&tp->first_phy);
1410 }
1411
1412 static void sis190_release_board(struct pci_dev *pdev)
1413 {
1414 struct net_device *dev = pci_get_drvdata(pdev);
1415 struct sis190_private *tp = netdev_priv(dev);
1416
1417 iounmap(tp->mmio_addr);
1418 pci_release_regions(pdev);
1419 pci_disable_device(pdev);
1420 free_netdev(dev);
1421 }
1422
1423 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1424 {
1425 struct sis190_private *tp;
1426 struct net_device *dev;
1427 void __iomem *ioaddr;
1428 int rc;
1429
1430 dev = alloc_etherdev(sizeof(*tp));
1431 if (!dev) {
1432 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1433 rc = -ENOMEM;
1434 goto err_out_0;
1435 }
1436
1437 SET_MODULE_OWNER(dev);
1438 SET_NETDEV_DEV(dev, &pdev->dev);
1439
1440 tp = netdev_priv(dev);
1441 tp->dev = dev;
1442 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1443
1444 rc = pci_enable_device(pdev);
1445 if (rc < 0) {
1446 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1447 goto err_free_dev_1;
1448 }
1449
1450 rc = -ENODEV;
1451
1452 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1453 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1454 pci_name(pdev));
1455 goto err_pci_disable_2;
1456 }
1457 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1458 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1459 pci_name(pdev));
1460 goto err_pci_disable_2;
1461 }
1462
1463 rc = pci_request_regions(pdev, DRV_NAME);
1464 if (rc < 0) {
1465 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1466 pci_name(pdev));
1467 goto err_pci_disable_2;
1468 }
1469
1470 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1471 if (rc < 0) {
1472 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1473 pci_name(pdev));
1474 goto err_free_res_3;
1475 }
1476
1477 pci_set_master(pdev);
1478
1479 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1480 if (!ioaddr) {
1481 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1482 pci_name(pdev));
1483 rc = -EIO;
1484 goto err_free_res_3;
1485 }
1486
1487 tp->pci_dev = pdev;
1488 tp->mmio_addr = ioaddr;
1489
1490 sis190_irq_mask_and_ack(ioaddr);
1491
1492 sis190_soft_reset(ioaddr);
1493 out:
1494 return dev;
1495
1496 err_free_res_3:
1497 pci_release_regions(pdev);
1498 err_pci_disable_2:
1499 pci_disable_device(pdev);
1500 err_free_dev_1:
1501 free_netdev(dev);
1502 err_out_0:
1503 dev = ERR_PTR(rc);
1504 goto out;
1505 }
1506
1507 static void sis190_tx_timeout(struct net_device *dev)
1508 {
1509 struct sis190_private *tp = netdev_priv(dev);
1510 void __iomem *ioaddr = tp->mmio_addr;
1511 u8 tmp8;
1512
1513 /* Disable Tx, if not already */
1514 tmp8 = SIS_R8(TxControl);
1515 if (tmp8 & CmdTxEnb)
1516 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1517
1518
1519 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1520 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1521
1522 /* Disable interrupts by clearing the interrupt mask. */
1523 SIS_W32(IntrMask, 0x0000);
1524
1525 /* Stop a shared interrupt from scavenging while we are. */
1526 spin_lock_irq(&tp->lock);
1527 sis190_tx_clear(tp);
1528 spin_unlock_irq(&tp->lock);
1529
1530 /* ...and finally, reset everything. */
1531 sis190_hw_start(dev);
1532
1533 netif_wake_queue(dev);
1534 }
1535
1536 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1537 {
1538 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1539 }
1540
1541 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1542 struct net_device *dev)
1543 {
1544 struct sis190_private *tp = netdev_priv(dev);
1545 void __iomem *ioaddr = tp->mmio_addr;
1546 u16 sig;
1547 int i;
1548
1549 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1550 pci_name(pdev));
1551
1552 /* Check to see if there is a sane EEPROM */
1553 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1554
1555 if ((sig == 0xffff) || (sig == 0x0000)) {
1556 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1557 pci_name(pdev), sig);
1558 return -EIO;
1559 }
1560
1561 /* Get MAC address from EEPROM */
1562 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1563 __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1564
1565 ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
1566 }
1567
1568 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1569
1570 return 0;
1571 }
1572
1573 /**
1574 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1575 * @pdev: PCI device
1576 * @dev: network device to get address for
1577 *
1578 * SiS965 model, use APC CMOS RAM to store MAC address.
1579 * APC CMOS RAM is accessed through ISA bridge.
1580 * MAC address is read into @net_dev->dev_addr.
1581 */
1582 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1583 struct net_device *dev)
1584 {
1585 struct sis190_private *tp = netdev_priv(dev);
1586 struct pci_dev *isa_bridge;
1587 u8 reg, tmp8;
1588 int i;
1589
1590 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1591 pci_name(pdev));
1592
1593 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1594 if (!isa_bridge) {
1595 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1596 pci_name(pdev));
1597 return -EIO;
1598 }
1599
1600 /* Enable port 78h & 79h to access APC Registers. */
1601 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1602 reg = (tmp8 & ~0x02);
1603 pci_write_config_byte(isa_bridge, 0x48, reg);
1604 udelay(50);
1605 pci_read_config_byte(isa_bridge, 0x48, &reg);
1606
1607 for (i = 0; i < MAC_ADDR_LEN; i++) {
1608 outb(0x9 + i, 0x78);
1609 dev->dev_addr[i] = inb(0x79);
1610 }
1611
1612 outb(0x12, 0x78);
1613 reg = inb(0x79);
1614
1615 sis190_set_rgmii(tp, reg);
1616
1617 /* Restore the value to ISA Bridge */
1618 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1619 pci_dev_put(isa_bridge);
1620
1621 return 0;
1622 }
1623
1624 /**
1625 * sis190_init_rxfilter - Initialize the Rx filter
1626 * @dev: network device to initialize
1627 *
1628 * Set receive filter address to our MAC address
1629 * and enable packet filtering.
1630 */
1631 static inline void sis190_init_rxfilter(struct net_device *dev)
1632 {
1633 struct sis190_private *tp = netdev_priv(dev);
1634 void __iomem *ioaddr = tp->mmio_addr;
1635 u16 ctl;
1636 int i;
1637
1638 ctl = SIS_R16(RxMacControl);
1639 /*
1640 * Disable packet filtering before setting filter.
1641 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1642 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1643 */
1644 SIS_W16(RxMacControl, ctl & ~0x0f00);
1645
1646 for (i = 0; i < MAC_ADDR_LEN; i++)
1647 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1648
1649 SIS_W16(RxMacControl, ctl);
1650 SIS_PCI_COMMIT();
1651 }
1652
1653 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1654 {
1655 u8 from;
1656
1657 pci_read_config_byte(pdev, 0x73, &from);
1658
1659 return (from & 0x00000001) ?
1660 sis190_get_mac_addr_from_apc(pdev, dev) :
1661 sis190_get_mac_addr_from_eeprom(pdev, dev);
1662 }
1663
1664 static void sis190_set_speed_auto(struct net_device *dev)
1665 {
1666 struct sis190_private *tp = netdev_priv(dev);
1667 void __iomem *ioaddr = tp->mmio_addr;
1668 int phy_id = tp->mii_if.phy_id;
1669 int val;
1670
1671 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1672
1673 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1674
1675 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1676 // unchanged.
1677 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1678 ADVERTISE_100FULL | ADVERTISE_10FULL |
1679 ADVERTISE_100HALF | ADVERTISE_10HALF);
1680
1681 // Enable 1000 Full Mode.
1682 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1683
1684 // Enable auto-negotiation and restart auto-negotiation.
1685 mdio_write(ioaddr, phy_id, MII_BMCR,
1686 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1687 }
1688
1689 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1690 {
1691 struct sis190_private *tp = netdev_priv(dev);
1692
1693 return mii_ethtool_gset(&tp->mii_if, cmd);
1694 }
1695
1696 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1697 {
1698 struct sis190_private *tp = netdev_priv(dev);
1699
1700 return mii_ethtool_sset(&tp->mii_if, cmd);
1701 }
1702
1703 static void sis190_get_drvinfo(struct net_device *dev,
1704 struct ethtool_drvinfo *info)
1705 {
1706 struct sis190_private *tp = netdev_priv(dev);
1707
1708 strcpy(info->driver, DRV_NAME);
1709 strcpy(info->version, DRV_VERSION);
1710 strcpy(info->bus_info, pci_name(tp->pci_dev));
1711 }
1712
1713 static int sis190_get_regs_len(struct net_device *dev)
1714 {
1715 return SIS190_REGS_SIZE;
1716 }
1717
1718 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1719 void *p)
1720 {
1721 struct sis190_private *tp = netdev_priv(dev);
1722 unsigned long flags;
1723
1724 if (regs->len > SIS190_REGS_SIZE)
1725 regs->len = SIS190_REGS_SIZE;
1726
1727 spin_lock_irqsave(&tp->lock, flags);
1728 memcpy_fromio(p, tp->mmio_addr, regs->len);
1729 spin_unlock_irqrestore(&tp->lock, flags);
1730 }
1731
1732 static int sis190_nway_reset(struct net_device *dev)
1733 {
1734 struct sis190_private *tp = netdev_priv(dev);
1735
1736 return mii_nway_restart(&tp->mii_if);
1737 }
1738
1739 static u32 sis190_get_msglevel(struct net_device *dev)
1740 {
1741 struct sis190_private *tp = netdev_priv(dev);
1742
1743 return tp->msg_enable;
1744 }
1745
1746 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1747 {
1748 struct sis190_private *tp = netdev_priv(dev);
1749
1750 tp->msg_enable = value;
1751 }
1752
1753 static const struct ethtool_ops sis190_ethtool_ops = {
1754 .get_settings = sis190_get_settings,
1755 .set_settings = sis190_set_settings,
1756 .get_drvinfo = sis190_get_drvinfo,
1757 .get_regs_len = sis190_get_regs_len,
1758 .get_regs = sis190_get_regs,
1759 .get_link = ethtool_op_get_link,
1760 .get_msglevel = sis190_get_msglevel,
1761 .set_msglevel = sis190_set_msglevel,
1762 .nway_reset = sis190_nway_reset,
1763 };
1764
1765 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1766 {
1767 struct sis190_private *tp = netdev_priv(dev);
1768
1769 return !netif_running(dev) ? -EINVAL :
1770 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1771 }
1772
1773 static int __devinit sis190_init_one(struct pci_dev *pdev,
1774 const struct pci_device_id *ent)
1775 {
1776 static int printed_version = 0;
1777 struct sis190_private *tp;
1778 struct net_device *dev;
1779 void __iomem *ioaddr;
1780 int rc;
1781
1782 if (!printed_version) {
1783 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1784 printed_version = 1;
1785 }
1786
1787 dev = sis190_init_board(pdev);
1788 if (IS_ERR(dev)) {
1789 rc = PTR_ERR(dev);
1790 goto out;
1791 }
1792
1793 pci_set_drvdata(pdev, dev);
1794
1795 tp = netdev_priv(dev);
1796 ioaddr = tp->mmio_addr;
1797
1798 rc = sis190_get_mac_addr(pdev, dev);
1799 if (rc < 0)
1800 goto err_release_board;
1801
1802 sis190_init_rxfilter(dev);
1803
1804 INIT_WORK(&tp->phy_task, sis190_phy_task);
1805
1806 dev->open = sis190_open;
1807 dev->stop = sis190_close;
1808 dev->do_ioctl = sis190_ioctl;
1809 dev->get_stats = sis190_get_stats;
1810 dev->tx_timeout = sis190_tx_timeout;
1811 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1812 dev->hard_start_xmit = sis190_start_xmit;
1813 #ifdef CONFIG_NET_POLL_CONTROLLER
1814 dev->poll_controller = sis190_netpoll;
1815 #endif
1816 dev->set_multicast_list = sis190_set_rx_mode;
1817 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1818 dev->irq = pdev->irq;
1819 dev->base_addr = (unsigned long) 0xdead;
1820
1821 spin_lock_init(&tp->lock);
1822
1823 rc = sis190_mii_probe(dev);
1824 if (rc < 0)
1825 goto err_release_board;
1826
1827 rc = register_netdev(dev);
1828 if (rc < 0)
1829 goto err_remove_mii;
1830
1831 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1832 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1833 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1834 ioaddr, dev->irq,
1835 dev->dev_addr[0], dev->dev_addr[1],
1836 dev->dev_addr[2], dev->dev_addr[3],
1837 dev->dev_addr[4], dev->dev_addr[5]);
1838
1839 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1840 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1841
1842 netif_carrier_off(dev);
1843
1844 sis190_set_speed_auto(dev);
1845 out:
1846 return rc;
1847
1848 err_remove_mii:
1849 sis190_mii_remove(dev);
1850 err_release_board:
1851 sis190_release_board(pdev);
1852 goto out;
1853 }
1854
1855 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1856 {
1857 struct net_device *dev = pci_get_drvdata(pdev);
1858
1859 sis190_mii_remove(dev);
1860 unregister_netdev(dev);
1861 sis190_release_board(pdev);
1862 pci_set_drvdata(pdev, NULL);
1863 }
1864
1865 static struct pci_driver sis190_pci_driver = {
1866 .name = DRV_NAME,
1867 .id_table = sis190_pci_tbl,
1868 .probe = sis190_init_one,
1869 .remove = __devexit_p(sis190_remove_one),
1870 };
1871
1872 static int __init sis190_init_module(void)
1873 {
1874 return pci_register_driver(&sis190_pci_driver);
1875 }
1876
1877 static void __exit sis190_cleanup_module(void)
1878 {
1879 pci_unregister_driver(&sis190_pci_driver);
1880 }
1881
1882 module_init(sis190_init_module);
1883 module_exit(sis190_cleanup_module);
This page took 0.0658 seconds and 6 git commands to generate.