net: trans_start cleanups
[deliverable/linux.git] / drivers / net / tulip / tulip_core.c
1 /* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2
3 Copyright 2000,2001 The Linux Kernel Team
4 Written/copyright 1994-2001 by Donald Becker.
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8
9 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
10 for more information on this driver.
11
12 Please submit bugs to http://bugzilla.kernel.org/ .
13 */
14
15
16 #define DRV_NAME "tulip"
17 #ifdef CONFIG_TULIP_NAPI
18 #define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
19 #else
20 #define DRV_VERSION "1.1.15"
21 #endif
22 #define DRV_RELDATE "Feb 27, 2007"
23
24
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <linux/slab.h>
28 #include "tulip.h"
29 #include <linux/init.h>
30 #include <linux/etherdevice.h>
31 #include <linux/delay.h>
32 #include <linux/mii.h>
33 #include <linux/ethtool.h>
34 #include <linux/crc32.h>
35 #include <asm/unaligned.h>
36 #include <asm/uaccess.h>
37
38 #ifdef CONFIG_SPARC
39 #include <asm/prom.h>
40 #endif
41
42 static char version[] __devinitdata =
43 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
44
45 /* A few user-configurable values. */
46
47 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
48 static unsigned int max_interrupt_work = 25;
49
50 #define MAX_UNITS 8
51 /* Used to pass the full-duplex flag, etc. */
52 static int full_duplex[MAX_UNITS];
53 static int options[MAX_UNITS];
54 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
55
56 /* The possible media types that can be set in options[] are: */
57 const char * const medianame[32] = {
58 "10baseT", "10base2", "AUI", "100baseTx",
59 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
60 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
61 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
62 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
63 "","","","", "","","","", "","","","Transceiver reset",
64 };
65
66 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
67 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
68 defined(CONFIG_SPARC) || defined(__ia64__) || \
69 defined(__sh__) || defined(__mips__)
70 static int rx_copybreak = 1518;
71 #else
72 static int rx_copybreak = 100;
73 #endif
74
75 /*
76 Set the bus performance register.
77 Typical: Set 16 longword cache alignment, no burst limit.
78 Cache alignment bits 15:14 Burst length 13:8
79 0000 No alignment 0x00000000 unlimited 0800 8 longwords
80 4000 8 longwords 0100 1 longword 1000 16 longwords
81 8000 16 longwords 0200 2 longwords 2000 32 longwords
82 C000 32 longwords 0400 4 longwords
83 Warning: many older 486 systems are broken and require setting 0x00A04800
84 8 longword cache alignment, 8 longword burst.
85 ToDo: Non-Intel setting could be better.
86 */
87
88 #if defined(__alpha__) || defined(__ia64__)
89 static int csr0 = 0x01A00000 | 0xE000;
90 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
91 static int csr0 = 0x01A00000 | 0x8000;
92 #elif defined(CONFIG_SPARC) || defined(__hppa__)
93 /* The UltraSparc PCI controllers will disconnect at every 64-byte
94 * crossing anyways so it makes no sense to tell Tulip to burst
95 * any more than that.
96 */
97 static int csr0 = 0x01A00000 | 0x9000;
98 #elif defined(__arm__) || defined(__sh__)
99 static int csr0 = 0x01A00000 | 0x4800;
100 #elif defined(__mips__)
101 static int csr0 = 0x00200000 | 0x4000;
102 #else
103 #warning Processor architecture undefined!
104 static int csr0 = 0x00A00000 | 0x4800;
105 #endif
106
107 /* Operational parameters that usually are not changed. */
108 /* Time in jiffies before concluding the transmitter is hung. */
109 #define TX_TIMEOUT (4*HZ)
110
111
112 MODULE_AUTHOR("The Linux Kernel Team");
113 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
114 MODULE_LICENSE("GPL");
115 MODULE_VERSION(DRV_VERSION);
116 module_param(tulip_debug, int, 0);
117 module_param(max_interrupt_work, int, 0);
118 module_param(rx_copybreak, int, 0);
119 module_param(csr0, int, 0);
120 module_param_array(options, int, NULL, 0);
121 module_param_array(full_duplex, int, NULL, 0);
122
123 #define PFX DRV_NAME ": "
124
125 #ifdef TULIP_DEBUG
126 int tulip_debug = TULIP_DEBUG;
127 #else
128 int tulip_debug = 1;
129 #endif
130
131 static void tulip_timer(unsigned long data)
132 {
133 struct net_device *dev = (struct net_device *)data;
134 struct tulip_private *tp = netdev_priv(dev);
135
136 if (netif_running(dev))
137 schedule_work(&tp->media_work);
138 }
139
140 /*
141 * This table use during operation for capabilities and media timer.
142 *
143 * It is indexed via the values in 'enum chips'
144 */
145
146 struct tulip_chip_table tulip_tbl[] = {
147 { }, /* placeholder for array, slot unused currently */
148 { }, /* placeholder for array, slot unused currently */
149
150 /* DC21140 */
151 { "Digital DS21140 Tulip", 128, 0x0001ebef,
152 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
153 tulip_media_task },
154
155 /* DC21142, DC21143 */
156 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
157 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
158 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
159
160 /* LC82C168 */
161 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
162 HAS_MII | HAS_PNICNWAY, pnic_timer, },
163
164 /* MX98713 */
165 { "Macronix 98713 PMAC", 128, 0x0001ebef,
166 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
167
168 /* MX98715 */
169 { "Macronix 98715 PMAC", 256, 0x0001ebef,
170 HAS_MEDIA_TABLE, mxic_timer, },
171
172 /* MX98725 */
173 { "Macronix 98725 PMAC", 256, 0x0001ebef,
174 HAS_MEDIA_TABLE, mxic_timer, },
175
176 /* AX88140 */
177 { "ASIX AX88140", 128, 0x0001fbff,
178 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
179 | IS_ASIX, tulip_timer, tulip_media_task },
180
181 /* PNIC2 */
182 { "Lite-On PNIC-II", 256, 0x0801fbff,
183 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
184
185 /* COMET */
186 { "ADMtek Comet", 256, 0x0001abef,
187 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
188
189 /* COMPEX9881 */
190 { "Compex 9881 PMAC", 128, 0x0001ebef,
191 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
192
193 /* I21145 */
194 { "Intel DS21145 Tulip", 128, 0x0801fbff,
195 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
196 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
197
198 /* DM910X */
199 #ifdef CONFIG_TULIP_DM910X
200 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
201 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
202 tulip_timer, tulip_media_task },
203 #else
204 { NULL },
205 #endif
206
207 /* RS7112 */
208 { "Conexant LANfinity", 256, 0x0001ebef,
209 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
210
211 };
212
213
214 static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
215 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
216 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
217 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
218 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
219 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
220 /* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
221 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
222 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
223 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
230 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
231 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
232 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
233 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
234 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
235 #ifdef CONFIG_TULIP_DM910X
236 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
237 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
238 #endif
239 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
241 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
246 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
247 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
248 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
249 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
250 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
251 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
252 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
253 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
254 { } /* terminate list */
255 };
256 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
257
258
259 /* A full-duplex map for media types. */
260 const char tulip_media_cap[32] =
261 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
262
263 static void tulip_tx_timeout(struct net_device *dev);
264 static void tulip_init_ring(struct net_device *dev);
265 static void tulip_free_ring(struct net_device *dev);
266 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
267 struct net_device *dev);
268 static int tulip_open(struct net_device *dev);
269 static int tulip_close(struct net_device *dev);
270 static void tulip_up(struct net_device *dev);
271 static void tulip_down(struct net_device *dev);
272 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
273 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
274 static void set_rx_mode(struct net_device *dev);
275 #ifdef CONFIG_NET_POLL_CONTROLLER
276 static void poll_tulip(struct net_device *dev);
277 #endif
278
279 static void tulip_set_power_state (struct tulip_private *tp,
280 int sleep, int snooze)
281 {
282 if (tp->flags & HAS_ACPI) {
283 u32 tmp, newtmp;
284 pci_read_config_dword (tp->pdev, CFDD, &tmp);
285 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
286 if (sleep)
287 newtmp |= CFDD_Sleep;
288 else if (snooze)
289 newtmp |= CFDD_Snooze;
290 if (tmp != newtmp)
291 pci_write_config_dword (tp->pdev, CFDD, newtmp);
292 }
293
294 }
295
296
297 static void tulip_up(struct net_device *dev)
298 {
299 struct tulip_private *tp = netdev_priv(dev);
300 void __iomem *ioaddr = tp->base_addr;
301 int next_tick = 3*HZ;
302 u32 reg;
303 int i;
304
305 #ifdef CONFIG_TULIP_NAPI
306 napi_enable(&tp->napi);
307 #endif
308
309 /* Wake the chip from sleep/snooze mode. */
310 tulip_set_power_state (tp, 0, 0);
311
312 /* On some chip revs we must set the MII/SYM port before the reset!? */
313 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
314 iowrite32(0x00040000, ioaddr + CSR6);
315
316 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
317 iowrite32(0x00000001, ioaddr + CSR0);
318 pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */
319 udelay(100);
320
321 /* Deassert reset.
322 Wait the specified 50 PCI cycles after a reset by initializing
323 Tx and Rx queues and the address filter list. */
324 iowrite32(tp->csr0, ioaddr + CSR0);
325 pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */
326 udelay(100);
327
328 if (tulip_debug > 1)
329 printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n",
330 dev->name, dev->irq);
331
332 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
333 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
334 tp->cur_rx = tp->cur_tx = 0;
335 tp->dirty_rx = tp->dirty_tx = 0;
336
337 if (tp->flags & MC_HASH_ONLY) {
338 u32 addr_low = get_unaligned_le32(dev->dev_addr);
339 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
340 if (tp->chip_id == AX88140) {
341 iowrite32(0, ioaddr + CSR13);
342 iowrite32(addr_low, ioaddr + CSR14);
343 iowrite32(1, ioaddr + CSR13);
344 iowrite32(addr_high, ioaddr + CSR14);
345 } else if (tp->flags & COMET_MAC_ADDR) {
346 iowrite32(addr_low, ioaddr + 0xA4);
347 iowrite32(addr_high, ioaddr + 0xA8);
348 iowrite32(0, ioaddr + 0xAC);
349 iowrite32(0, ioaddr + 0xB0);
350 }
351 } else {
352 /* This is set_rx_mode(), but without starting the transmitter. */
353 u16 *eaddrs = (u16 *)dev->dev_addr;
354 u16 *setup_frm = &tp->setup_frame[15*6];
355 dma_addr_t mapping;
356
357 /* 21140 bug: you must add the broadcast address. */
358 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
359 /* Fill the final entry of the table with our physical address. */
360 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
361 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
362 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
363
364 mapping = pci_map_single(tp->pdev, tp->setup_frame,
365 sizeof(tp->setup_frame),
366 PCI_DMA_TODEVICE);
367 tp->tx_buffers[tp->cur_tx].skb = NULL;
368 tp->tx_buffers[tp->cur_tx].mapping = mapping;
369
370 /* Put the setup frame on the Tx list. */
371 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
372 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
373 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
374
375 tp->cur_tx++;
376 }
377
378 tp->saved_if_port = dev->if_port;
379 if (dev->if_port == 0)
380 dev->if_port = tp->default_port;
381
382 /* Allow selecting a default media. */
383 i = 0;
384 if (tp->mtable == NULL)
385 goto media_picked;
386 if (dev->if_port) {
387 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
388 (dev->if_port == 12 ? 0 : dev->if_port);
389 for (i = 0; i < tp->mtable->leafcount; i++)
390 if (tp->mtable->mleaf[i].media == looking_for) {
391 dev_info(&dev->dev,
392 "Using user-specified media %s\n",
393 medianame[dev->if_port]);
394 goto media_picked;
395 }
396 }
397 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
398 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
399 for (i = 0; i < tp->mtable->leafcount; i++)
400 if (tp->mtable->mleaf[i].media == looking_for) {
401 dev_info(&dev->dev,
402 "Using EEPROM-set media %s\n",
403 medianame[looking_for]);
404 goto media_picked;
405 }
406 }
407 /* Start sensing first non-full-duplex media. */
408 for (i = tp->mtable->leafcount - 1;
409 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
410 ;
411 media_picked:
412
413 tp->csr6 = 0;
414 tp->cur_index = i;
415 tp->nwayset = 0;
416
417 if (dev->if_port) {
418 if (tp->chip_id == DC21143 &&
419 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
420 /* We must reset the media CSRs when we force-select MII mode. */
421 iowrite32(0x0000, ioaddr + CSR13);
422 iowrite32(0x0000, ioaddr + CSR14);
423 iowrite32(0x0008, ioaddr + CSR15);
424 }
425 tulip_select_media(dev, 1);
426 } else if (tp->chip_id == DC21142) {
427 if (tp->mii_cnt) {
428 tulip_select_media(dev, 1);
429 if (tulip_debug > 1)
430 dev_info(&dev->dev,
431 "Using MII transceiver %d, status %04x\n",
432 tp->phys[0],
433 tulip_mdio_read(dev, tp->phys[0], 1));
434 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
435 tp->csr6 = csr6_mask_hdcap;
436 dev->if_port = 11;
437 iowrite32(0x0000, ioaddr + CSR13);
438 iowrite32(0x0000, ioaddr + CSR14);
439 } else
440 t21142_start_nway(dev);
441 } else if (tp->chip_id == PNIC2) {
442 /* for initial startup advertise 10/100 Full and Half */
443 tp->sym_advertise = 0x01E0;
444 /* enable autonegotiate end interrupt */
445 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
446 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
447 pnic2_start_nway(dev);
448 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
449 if (tp->mii_cnt) {
450 dev->if_port = 11;
451 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
452 iowrite32(0x0001, ioaddr + CSR15);
453 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
454 pnic_do_nway(dev);
455 else {
456 /* Start with 10mbps to do autonegotiation. */
457 iowrite32(0x32, ioaddr + CSR12);
458 tp->csr6 = 0x00420000;
459 iowrite32(0x0001B078, ioaddr + 0xB8);
460 iowrite32(0x0201B078, ioaddr + 0xB8);
461 next_tick = 1*HZ;
462 }
463 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
464 ! tp->medialock) {
465 dev->if_port = 0;
466 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
467 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
468 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
469 /* Provided by BOLO, Macronix - 12/10/1998. */
470 dev->if_port = 0;
471 tp->csr6 = 0x01a80200;
472 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
473 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
474 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
475 /* Enable automatic Tx underrun recovery. */
476 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
477 dev->if_port = tp->mii_cnt ? 11 : 0;
478 tp->csr6 = 0x00040000;
479 } else if (tp->chip_id == AX88140) {
480 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
481 } else
482 tulip_select_media(dev, 1);
483
484 /* Start the chip's Tx to process setup frame. */
485 tulip_stop_rxtx(tp);
486 barrier();
487 udelay(5);
488 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
489
490 /* Enable interrupts by setting the interrupt mask. */
491 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
492 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
493 tulip_start_rxtx(tp);
494 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
495
496 if (tulip_debug > 2) {
497 printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
498 dev->name, ioread32(ioaddr + CSR0),
499 ioread32(ioaddr + CSR5),
500 ioread32(ioaddr + CSR6));
501 }
502
503 /* Set the timer to switch to check for link beat and perhaps switch
504 to an alternate media type. */
505 tp->timer.expires = RUN_AT(next_tick);
506 add_timer(&tp->timer);
507 #ifdef CONFIG_TULIP_NAPI
508 init_timer(&tp->oom_timer);
509 tp->oom_timer.data = (unsigned long)dev;
510 tp->oom_timer.function = oom_timer;
511 #endif
512 }
513
514 static int
515 tulip_open(struct net_device *dev)
516 {
517 int retval;
518
519 tulip_init_ring (dev);
520
521 retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev);
522 if (retval)
523 goto free_ring;
524
525 tulip_up (dev);
526
527 netif_start_queue (dev);
528
529 return 0;
530
531 free_ring:
532 tulip_free_ring (dev);
533 return retval;
534 }
535
536
537 static void tulip_tx_timeout(struct net_device *dev)
538 {
539 struct tulip_private *tp = netdev_priv(dev);
540 void __iomem *ioaddr = tp->base_addr;
541 unsigned long flags;
542
543 spin_lock_irqsave (&tp->lock, flags);
544
545 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
546 /* Do nothing -- the media monitor should handle this. */
547 if (tulip_debug > 1)
548 dev_warn(&dev->dev,
549 "Transmit timeout using MII device\n");
550 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
551 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
552 tp->chip_id == DM910X) {
553 dev_warn(&dev->dev,
554 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
555 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
556 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
557 ioread32(ioaddr + CSR15));
558 tp->timeout_recovery = 1;
559 schedule_work(&tp->media_work);
560 goto out_unlock;
561 } else if (tp->chip_id == PNIC2) {
562 dev_warn(&dev->dev,
563 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
564 (int)ioread32(ioaddr + CSR5),
565 (int)ioread32(ioaddr + CSR6),
566 (int)ioread32(ioaddr + CSR7),
567 (int)ioread32(ioaddr + CSR12));
568 } else {
569 dev_warn(&dev->dev,
570 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
571 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
572 dev->if_port = 0;
573 }
574
575 #if defined(way_too_many_messages)
576 if (tulip_debug > 3) {
577 int i;
578 for (i = 0; i < RX_RING_SIZE; i++) {
579 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
580 int j;
581 printk(KERN_DEBUG
582 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
583 i,
584 (unsigned int)tp->rx_ring[i].status,
585 (unsigned int)tp->rx_ring[i].length,
586 (unsigned int)tp->rx_ring[i].buffer1,
587 (unsigned int)tp->rx_ring[i].buffer2,
588 buf[0], buf[1], buf[2]);
589 for (j = 0; buf[j] != 0xee && j < 1600; j++)
590 if (j < 100)
591 pr_cont(" %02x", buf[j]);
592 pr_cont(" j=%d\n", j);
593 }
594 printk(KERN_DEBUG " Rx ring %08x: ", (int)tp->rx_ring);
595 for (i = 0; i < RX_RING_SIZE; i++)
596 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
597 printk(KERN_DEBUG " Tx ring %08x: ", (int)tp->tx_ring);
598 for (i = 0; i < TX_RING_SIZE; i++)
599 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
600 pr_cont("\n");
601 }
602 #endif
603
604 tulip_tx_timeout_complete(tp, ioaddr);
605
606 out_unlock:
607 spin_unlock_irqrestore (&tp->lock, flags);
608 dev->trans_start = jiffies; /* prevent tx timeout */
609 netif_wake_queue (dev);
610 }
611
612
613 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
614 static void tulip_init_ring(struct net_device *dev)
615 {
616 struct tulip_private *tp = netdev_priv(dev);
617 int i;
618
619 tp->susp_rx = 0;
620 tp->ttimer = 0;
621 tp->nir = 0;
622
623 for (i = 0; i < RX_RING_SIZE; i++) {
624 tp->rx_ring[i].status = 0x00000000;
625 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
626 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
627 tp->rx_buffers[i].skb = NULL;
628 tp->rx_buffers[i].mapping = 0;
629 }
630 /* Mark the last entry as wrapping the ring. */
631 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
632 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
633
634 for (i = 0; i < RX_RING_SIZE; i++) {
635 dma_addr_t mapping;
636
637 /* Note the receive buffer must be longword aligned.
638 dev_alloc_skb() provides 16 byte alignment. But do *not*
639 use skb_reserve() to align the IP header! */
640 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
641 tp->rx_buffers[i].skb = skb;
642 if (skb == NULL)
643 break;
644 mapping = pci_map_single(tp->pdev, skb->data,
645 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
646 tp->rx_buffers[i].mapping = mapping;
647 skb->dev = dev; /* Mark as being used by this device. */
648 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
649 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
650 }
651 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
652
653 /* The Tx buffer descriptor is filled in as needed, but we
654 do need to clear the ownership bit. */
655 for (i = 0; i < TX_RING_SIZE; i++) {
656 tp->tx_buffers[i].skb = NULL;
657 tp->tx_buffers[i].mapping = 0;
658 tp->tx_ring[i].status = 0x00000000;
659 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
660 }
661 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
662 }
663
664 static netdev_tx_t
665 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
666 {
667 struct tulip_private *tp = netdev_priv(dev);
668 int entry;
669 u32 flag;
670 dma_addr_t mapping;
671 unsigned long flags;
672
673 spin_lock_irqsave(&tp->lock, flags);
674
675 /* Calculate the next Tx descriptor entry. */
676 entry = tp->cur_tx % TX_RING_SIZE;
677
678 tp->tx_buffers[entry].skb = skb;
679 mapping = pci_map_single(tp->pdev, skb->data,
680 skb->len, PCI_DMA_TODEVICE);
681 tp->tx_buffers[entry].mapping = mapping;
682 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
683
684 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
685 flag = 0x60000000; /* No interrupt */
686 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
687 flag = 0xe0000000; /* Tx-done intr. */
688 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
689 flag = 0x60000000; /* No Tx-done intr. */
690 } else { /* Leave room for set_rx_mode() to fill entries. */
691 flag = 0xe0000000; /* Tx-done intr. */
692 netif_stop_queue(dev);
693 }
694 if (entry == TX_RING_SIZE-1)
695 flag = 0xe0000000 | DESC_RING_WRAP;
696
697 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
698 /* if we were using Transmit Automatic Polling, we would need a
699 * wmb() here. */
700 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
701 wmb();
702
703 tp->cur_tx++;
704
705 /* Trigger an immediate transmit demand. */
706 iowrite32(0, tp->base_addr + CSR1);
707
708 spin_unlock_irqrestore(&tp->lock, flags);
709
710 return NETDEV_TX_OK;
711 }
712
713 static void tulip_clean_tx_ring(struct tulip_private *tp)
714 {
715 unsigned int dirty_tx;
716
717 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
718 dirty_tx++) {
719 int entry = dirty_tx % TX_RING_SIZE;
720 int status = le32_to_cpu(tp->tx_ring[entry].status);
721
722 if (status < 0) {
723 tp->stats.tx_errors++; /* It wasn't Txed */
724 tp->tx_ring[entry].status = 0;
725 }
726
727 /* Check for Tx filter setup frames. */
728 if (tp->tx_buffers[entry].skb == NULL) {
729 /* test because dummy frames not mapped */
730 if (tp->tx_buffers[entry].mapping)
731 pci_unmap_single(tp->pdev,
732 tp->tx_buffers[entry].mapping,
733 sizeof(tp->setup_frame),
734 PCI_DMA_TODEVICE);
735 continue;
736 }
737
738 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
739 tp->tx_buffers[entry].skb->len,
740 PCI_DMA_TODEVICE);
741
742 /* Free the original skb. */
743 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
744 tp->tx_buffers[entry].skb = NULL;
745 tp->tx_buffers[entry].mapping = 0;
746 }
747 }
748
749 static void tulip_down (struct net_device *dev)
750 {
751 struct tulip_private *tp = netdev_priv(dev);
752 void __iomem *ioaddr = tp->base_addr;
753 unsigned long flags;
754
755 cancel_work_sync(&tp->media_work);
756
757 #ifdef CONFIG_TULIP_NAPI
758 napi_disable(&tp->napi);
759 #endif
760
761 del_timer_sync (&tp->timer);
762 #ifdef CONFIG_TULIP_NAPI
763 del_timer_sync (&tp->oom_timer);
764 #endif
765 spin_lock_irqsave (&tp->lock, flags);
766
767 /* Disable interrupts by clearing the interrupt mask. */
768 iowrite32 (0x00000000, ioaddr + CSR7);
769
770 /* Stop the Tx and Rx processes. */
771 tulip_stop_rxtx(tp);
772
773 /* prepare receive buffers */
774 tulip_refill_rx(dev);
775
776 /* release any unconsumed transmit buffers */
777 tulip_clean_tx_ring(tp);
778
779 if (ioread32 (ioaddr + CSR6) != 0xffffffff)
780 tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff;
781
782 spin_unlock_irqrestore (&tp->lock, flags);
783
784 init_timer(&tp->timer);
785 tp->timer.data = (unsigned long)dev;
786 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
787
788 dev->if_port = tp->saved_if_port;
789
790 /* Leave the driver in snooze, not sleep, mode. */
791 tulip_set_power_state (tp, 0, 1);
792 }
793
794 static void tulip_free_ring (struct net_device *dev)
795 {
796 struct tulip_private *tp = netdev_priv(dev);
797 int i;
798
799 /* Free all the skbuffs in the Rx queue. */
800 for (i = 0; i < RX_RING_SIZE; i++) {
801 struct sk_buff *skb = tp->rx_buffers[i].skb;
802 dma_addr_t mapping = tp->rx_buffers[i].mapping;
803
804 tp->rx_buffers[i].skb = NULL;
805 tp->rx_buffers[i].mapping = 0;
806
807 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
808 tp->rx_ring[i].length = 0;
809 /* An invalid address. */
810 tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
811 if (skb) {
812 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
813 PCI_DMA_FROMDEVICE);
814 dev_kfree_skb (skb);
815 }
816 }
817
818 for (i = 0; i < TX_RING_SIZE; i++) {
819 struct sk_buff *skb = tp->tx_buffers[i].skb;
820
821 if (skb != NULL) {
822 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
823 skb->len, PCI_DMA_TODEVICE);
824 dev_kfree_skb (skb);
825 }
826 tp->tx_buffers[i].skb = NULL;
827 tp->tx_buffers[i].mapping = 0;
828 }
829 }
830
831 static int tulip_close (struct net_device *dev)
832 {
833 struct tulip_private *tp = netdev_priv(dev);
834 void __iomem *ioaddr = tp->base_addr;
835
836 netif_stop_queue (dev);
837
838 tulip_down (dev);
839
840 if (tulip_debug > 1)
841 dev_printk(KERN_DEBUG, &dev->dev,
842 "Shutting down ethercard, status was %02x\n",
843 ioread32 (ioaddr + CSR5));
844
845 free_irq (dev->irq, dev);
846
847 tulip_free_ring (dev);
848
849 return 0;
850 }
851
852 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
853 {
854 struct tulip_private *tp = netdev_priv(dev);
855 void __iomem *ioaddr = tp->base_addr;
856
857 if (netif_running(dev)) {
858 unsigned long flags;
859
860 spin_lock_irqsave (&tp->lock, flags);
861
862 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
863
864 spin_unlock_irqrestore(&tp->lock, flags);
865 }
866
867 return &tp->stats;
868 }
869
870
871 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
872 {
873 struct tulip_private *np = netdev_priv(dev);
874 strcpy(info->driver, DRV_NAME);
875 strcpy(info->version, DRV_VERSION);
876 strcpy(info->bus_info, pci_name(np->pdev));
877 }
878
879 static const struct ethtool_ops ops = {
880 .get_drvinfo = tulip_get_drvinfo
881 };
882
883 /* Provide ioctl() calls to examine the MII xcvr state. */
884 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
885 {
886 struct tulip_private *tp = netdev_priv(dev);
887 void __iomem *ioaddr = tp->base_addr;
888 struct mii_ioctl_data *data = if_mii(rq);
889 const unsigned int phy_idx = 0;
890 int phy = tp->phys[phy_idx] & 0x1f;
891 unsigned int regnum = data->reg_num;
892
893 switch (cmd) {
894 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
895 if (tp->mii_cnt)
896 data->phy_id = phy;
897 else if (tp->flags & HAS_NWAY)
898 data->phy_id = 32;
899 else if (tp->chip_id == COMET)
900 data->phy_id = 1;
901 else
902 return -ENODEV;
903
904 case SIOCGMIIREG: /* Read MII PHY register. */
905 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
906 int csr12 = ioread32 (ioaddr + CSR12);
907 int csr14 = ioread32 (ioaddr + CSR14);
908 switch (regnum) {
909 case 0:
910 if (((csr14<<5) & 0x1000) ||
911 (dev->if_port == 5 && tp->nwayset))
912 data->val_out = 0x1000;
913 else
914 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
915 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
916 break;
917 case 1:
918 data->val_out =
919 0x1848 +
920 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
921 ((csr12&0x06) == 6 ? 0 : 4);
922 data->val_out |= 0x6048;
923 break;
924 case 4:
925 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
926 data->val_out =
927 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
928 ((csr14 >> 1) & 0x20) + 1;
929 data->val_out |= ((csr14 >> 9) & 0x03C0);
930 break;
931 case 5: data->val_out = tp->lpar; break;
932 default: data->val_out = 0; break;
933 }
934 } else {
935 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
936 }
937 return 0;
938
939 case SIOCSMIIREG: /* Write MII PHY register. */
940 if (regnum & ~0x1f)
941 return -EINVAL;
942 if (data->phy_id == phy) {
943 u16 value = data->val_in;
944 switch (regnum) {
945 case 0: /* Check for autonegotiation on or reset. */
946 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
947 if (tp->full_duplex_lock)
948 tp->full_duplex = (value & 0x0100) ? 1 : 0;
949 break;
950 case 4:
951 tp->advertising[phy_idx] =
952 tp->mii_advertise = data->val_in;
953 break;
954 }
955 }
956 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
957 u16 value = data->val_in;
958 if (regnum == 0) {
959 if ((value & 0x1200) == 0x1200) {
960 if (tp->chip_id == PNIC2) {
961 pnic2_start_nway (dev);
962 } else {
963 t21142_start_nway (dev);
964 }
965 }
966 } else if (regnum == 4)
967 tp->sym_advertise = value;
968 } else {
969 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
970 }
971 return 0;
972 default:
973 return -EOPNOTSUPP;
974 }
975
976 return -EOPNOTSUPP;
977 }
978
979
980 /* Set or clear the multicast filter for this adaptor.
981 Note that we only use exclusion around actually queueing the
982 new frame, not around filling tp->setup_frame. This is non-deterministic
983 when re-entered but still correct. */
984
985 #undef set_bit_le
986 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
987
988 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
989 {
990 struct tulip_private *tp = netdev_priv(dev);
991 u16 hash_table[32];
992 struct netdev_hw_addr *ha;
993 int i;
994 u16 *eaddrs;
995
996 memset(hash_table, 0, sizeof(hash_table));
997 set_bit_le(255, hash_table); /* Broadcast entry */
998 /* This should work on big-endian machines as well. */
999 netdev_for_each_mc_addr(ha, dev) {
1000 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1001
1002 set_bit_le(index, hash_table);
1003 }
1004 for (i = 0; i < 32; i++) {
1005 *setup_frm++ = hash_table[i];
1006 *setup_frm++ = hash_table[i];
1007 }
1008 setup_frm = &tp->setup_frame[13*6];
1009
1010 /* Fill the final entry with our physical address. */
1011 eaddrs = (u16 *)dev->dev_addr;
1012 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1013 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1014 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1015 }
1016
1017 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1018 {
1019 struct tulip_private *tp = netdev_priv(dev);
1020 struct netdev_hw_addr *ha;
1021 u16 *eaddrs;
1022
1023 /* We have <= 14 addresses so we can use the wonderful
1024 16 address perfect filtering of the Tulip. */
1025 netdev_for_each_mc_addr(ha, dev) {
1026 eaddrs = (u16 *) ha->addr;
1027 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1028 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1029 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1030 }
1031 /* Fill the unused entries with the broadcast address. */
1032 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1033 setup_frm = &tp->setup_frame[15*6];
1034
1035 /* Fill the final entry with our physical address. */
1036 eaddrs = (u16 *)dev->dev_addr;
1037 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1038 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1039 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1040 }
1041
1042
1043 static void set_rx_mode(struct net_device *dev)
1044 {
1045 struct tulip_private *tp = netdev_priv(dev);
1046 void __iomem *ioaddr = tp->base_addr;
1047 int csr6;
1048
1049 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1050
1051 tp->csr6 &= ~0x00D5;
1052 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1053 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1054 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1055 } else if ((netdev_mc_count(dev) > 1000) ||
1056 (dev->flags & IFF_ALLMULTI)) {
1057 /* Too many to filter well -- accept all multicasts. */
1058 tp->csr6 |= AcceptAllMulticast;
1059 csr6 |= AcceptAllMulticast;
1060 } else if (tp->flags & MC_HASH_ONLY) {
1061 /* Some work-alikes have only a 64-entry hash filter table. */
1062 /* Should verify correctness on big-endian/__powerpc__ */
1063 struct netdev_hw_addr *ha;
1064 if (netdev_mc_count(dev) > 64) {
1065 /* Arbitrary non-effective limit. */
1066 tp->csr6 |= AcceptAllMulticast;
1067 csr6 |= AcceptAllMulticast;
1068 } else {
1069 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1070 int filterbit;
1071 netdev_for_each_mc_addr(ha, dev) {
1072 if (tp->flags & COMET_MAC_ADDR)
1073 filterbit = ether_crc_le(ETH_ALEN,
1074 ha->addr);
1075 else
1076 filterbit = ether_crc(ETH_ALEN,
1077 ha->addr) >> 26;
1078 filterbit &= 0x3f;
1079 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1080 if (tulip_debug > 2)
1081 dev_info(&dev->dev,
1082 "Added filter for %pM %08x bit %d\n",
1083 ha->addr,
1084 ether_crc(ETH_ALEN, ha->addr),
1085 filterbit);
1086 }
1087 if (mc_filter[0] == tp->mc_filter[0] &&
1088 mc_filter[1] == tp->mc_filter[1])
1089 ; /* No change. */
1090 else if (tp->flags & IS_ASIX) {
1091 iowrite32(2, ioaddr + CSR13);
1092 iowrite32(mc_filter[0], ioaddr + CSR14);
1093 iowrite32(3, ioaddr + CSR13);
1094 iowrite32(mc_filter[1], ioaddr + CSR14);
1095 } else if (tp->flags & COMET_MAC_ADDR) {
1096 iowrite32(mc_filter[0], ioaddr + 0xAC);
1097 iowrite32(mc_filter[1], ioaddr + 0xB0);
1098 }
1099 tp->mc_filter[0] = mc_filter[0];
1100 tp->mc_filter[1] = mc_filter[1];
1101 }
1102 } else {
1103 unsigned long flags;
1104 u32 tx_flags = 0x08000000 | 192;
1105
1106 /* Note that only the low-address shortword of setup_frame is valid!
1107 The values are doubled for big-endian architectures. */
1108 if (netdev_mc_count(dev) > 14) {
1109 /* Must use a multicast hash table. */
1110 build_setup_frame_hash(tp->setup_frame, dev);
1111 tx_flags = 0x08400000 | 192;
1112 } else {
1113 build_setup_frame_perfect(tp->setup_frame, dev);
1114 }
1115
1116 spin_lock_irqsave(&tp->lock, flags);
1117
1118 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1119 /* Same setup recently queued, we need not add it. */
1120 } else {
1121 unsigned int entry;
1122 int dummy = -1;
1123
1124 /* Now add this frame to the Tx list. */
1125
1126 entry = tp->cur_tx++ % TX_RING_SIZE;
1127
1128 if (entry != 0) {
1129 /* Avoid a chip errata by prefixing a dummy entry. */
1130 tp->tx_buffers[entry].skb = NULL;
1131 tp->tx_buffers[entry].mapping = 0;
1132 tp->tx_ring[entry].length =
1133 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1134 tp->tx_ring[entry].buffer1 = 0;
1135 /* Must set DescOwned later to avoid race with chip */
1136 dummy = entry;
1137 entry = tp->cur_tx++ % TX_RING_SIZE;
1138
1139 }
1140
1141 tp->tx_buffers[entry].skb = NULL;
1142 tp->tx_buffers[entry].mapping =
1143 pci_map_single(tp->pdev, tp->setup_frame,
1144 sizeof(tp->setup_frame),
1145 PCI_DMA_TODEVICE);
1146 /* Put the setup frame on the Tx list. */
1147 if (entry == TX_RING_SIZE-1)
1148 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
1149 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1150 tp->tx_ring[entry].buffer1 =
1151 cpu_to_le32(tp->tx_buffers[entry].mapping);
1152 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1153 if (dummy >= 0)
1154 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1155 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1156 netif_stop_queue(dev);
1157
1158 /* Trigger an immediate transmit demand. */
1159 iowrite32(0, ioaddr + CSR1);
1160 }
1161
1162 spin_unlock_irqrestore(&tp->lock, flags);
1163 }
1164
1165 iowrite32(csr6, ioaddr + CSR6);
1166 }
1167
1168 #ifdef CONFIG_TULIP_MWI
1169 static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1170 struct net_device *dev)
1171 {
1172 struct tulip_private *tp = netdev_priv(dev);
1173 u8 cache;
1174 u16 pci_command;
1175 u32 csr0;
1176
1177 if (tulip_debug > 3)
1178 printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev));
1179
1180 tp->csr0 = csr0 = 0;
1181
1182 /* if we have any cache line size at all, we can do MRM and MWI */
1183 csr0 |= MRM | MWI;
1184
1185 /* Enable MWI in the standard PCI command bit.
1186 * Check for the case where MWI is desired but not available
1187 */
1188 pci_try_set_mwi(pdev);
1189
1190 /* read result from hardware (in case bit refused to enable) */
1191 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1192 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1193 csr0 &= ~MWI;
1194
1195 /* if cache line size hardwired to zero, no MWI */
1196 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1197 if ((csr0 & MWI) && (cache == 0)) {
1198 csr0 &= ~MWI;
1199 pci_clear_mwi(pdev);
1200 }
1201
1202 /* assign per-cacheline-size cache alignment and
1203 * burst length values
1204 */
1205 switch (cache) {
1206 case 8:
1207 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1208 break;
1209 case 16:
1210 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1211 break;
1212 case 32:
1213 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1214 break;
1215 default:
1216 cache = 0;
1217 break;
1218 }
1219
1220 /* if we have a good cache line size, we by now have a good
1221 * csr0, so save it and exit
1222 */
1223 if (cache)
1224 goto out;
1225
1226 /* we don't have a good csr0 or cache line size, disable MWI */
1227 if (csr0 & MWI) {
1228 pci_clear_mwi(pdev);
1229 csr0 &= ~MWI;
1230 }
1231
1232 /* sane defaults for burst length and cache alignment
1233 * originally from de4x5 driver
1234 */
1235 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1236
1237 out:
1238 tp->csr0 = csr0;
1239 if (tulip_debug > 2)
1240 printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n",
1241 pci_name(pdev), cache, csr0);
1242 }
1243 #endif
1244
1245 /*
1246 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1247 * is the DM910X and the on chip ULi devices
1248 */
1249
1250 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1251 {
1252 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1253 return 1;
1254 return 0;
1255 }
1256
1257 static const struct net_device_ops tulip_netdev_ops = {
1258 .ndo_open = tulip_open,
1259 .ndo_start_xmit = tulip_start_xmit,
1260 .ndo_tx_timeout = tulip_tx_timeout,
1261 .ndo_stop = tulip_close,
1262 .ndo_get_stats = tulip_get_stats,
1263 .ndo_do_ioctl = private_ioctl,
1264 .ndo_set_multicast_list = set_rx_mode,
1265 .ndo_change_mtu = eth_change_mtu,
1266 .ndo_set_mac_address = eth_mac_addr,
1267 .ndo_validate_addr = eth_validate_addr,
1268 #ifdef CONFIG_NET_POLL_CONTROLLER
1269 .ndo_poll_controller = poll_tulip,
1270 #endif
1271 };
1272
1273 static int __devinit tulip_init_one (struct pci_dev *pdev,
1274 const struct pci_device_id *ent)
1275 {
1276 struct tulip_private *tp;
1277 /* See note below on the multiport cards. */
1278 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1279 static struct pci_device_id early_486_chipsets[] = {
1280 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1281 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1282 { },
1283 };
1284 static int last_irq;
1285 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1286 int i, irq;
1287 unsigned short sum;
1288 unsigned char *ee_data;
1289 struct net_device *dev;
1290 void __iomem *ioaddr;
1291 static int board_idx = -1;
1292 int chip_idx = ent->driver_data;
1293 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1294 unsigned int eeprom_missing = 0;
1295 unsigned int force_csr0 = 0;
1296
1297 #ifndef MODULE
1298 if (tulip_debug > 0)
1299 printk_once(KERN_INFO "%s", version);
1300 #endif
1301
1302 board_idx++;
1303
1304 /*
1305 * Lan media wire a tulip chip to a wan interface. Needs a very
1306 * different driver (lmc driver)
1307 */
1308
1309 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1310 pr_err(PFX "skipping LMC card\n");
1311 return -ENODEV;
1312 }
1313
1314 /*
1315 * DM910x chips should be handled by the dmfe driver, except
1316 * on-board chips on SPARC systems. Also, early DM9100s need
1317 * software CRC which only the dmfe driver supports.
1318 */
1319
1320 #ifdef CONFIG_TULIP_DM910X
1321 if (chip_idx == DM910X) {
1322 struct device_node *dp;
1323
1324 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1325 pdev->revision < 0x30) {
1326 pr_info(PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
1327 return -ENODEV;
1328 }
1329
1330 dp = pci_device_to_OF_node(pdev);
1331 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1332 pr_info(PFX "skipping DM910x expansion card (use dmfe)\n");
1333 return -ENODEV;
1334 }
1335 }
1336 #endif
1337
1338 /*
1339 * Looks for early PCI chipsets where people report hangs
1340 * without the workarounds being on.
1341 */
1342
1343 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1344 aligned. Aries might need this too. The Saturn errata are not
1345 pretty reading but thankfully it's an old 486 chipset.
1346
1347 2. The dreaded SiS496 486 chipset. Same workaround as Intel
1348 Saturn.
1349 */
1350
1351 if (pci_dev_present(early_486_chipsets)) {
1352 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1353 force_csr0 = 1;
1354 }
1355
1356 /* bugfix: the ASIX must have a burst limit or horrible things happen. */
1357 if (chip_idx == AX88140) {
1358 if ((csr0 & 0x3f00) == 0)
1359 csr0 |= 0x2000;
1360 }
1361
1362 /* PNIC doesn't have MWI/MRL/MRM... */
1363 if (chip_idx == LC82C168)
1364 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1365
1366 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1367 if (tulip_uli_dm_quirk(pdev)) {
1368 csr0 &= ~0x01f100ff;
1369 #if defined(CONFIG_SPARC)
1370 csr0 = (csr0 & ~0xff00) | 0xe000;
1371 #endif
1372 }
1373 /*
1374 * And back to business
1375 */
1376
1377 i = pci_enable_device(pdev);
1378 if (i) {
1379 pr_err(PFX "Cannot enable tulip board #%d, aborting\n",
1380 board_idx);
1381 return i;
1382 }
1383
1384 irq = pdev->irq;
1385
1386 /* alloc_etherdev ensures aligned and zeroed private structures */
1387 dev = alloc_etherdev (sizeof (*tp));
1388 if (!dev) {
1389 pr_err(PFX "ether device alloc failed, aborting\n");
1390 return -ENOMEM;
1391 }
1392
1393 SET_NETDEV_DEV(dev, &pdev->dev);
1394 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1395 pr_err(PFX "%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1396 pci_name(pdev),
1397 (unsigned long long)pci_resource_len (pdev, 0),
1398 (unsigned long long)pci_resource_start (pdev, 0));
1399 goto err_out_free_netdev;
1400 }
1401
1402 /* grab all resources from both PIO and MMIO regions, as we
1403 * don't want anyone else messing around with our hardware */
1404 if (pci_request_regions (pdev, DRV_NAME))
1405 goto err_out_free_netdev;
1406
1407 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1408
1409 if (!ioaddr)
1410 goto err_out_free_res;
1411
1412 /*
1413 * initialize private data structure 'tp'
1414 * it is zeroed and aligned in alloc_etherdev
1415 */
1416 tp = netdev_priv(dev);
1417 tp->dev = dev;
1418
1419 tp->rx_ring = pci_alloc_consistent(pdev,
1420 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1421 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1422 &tp->rx_ring_dma);
1423 if (!tp->rx_ring)
1424 goto err_out_mtable;
1425 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1426 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1427
1428 tp->chip_id = chip_idx;
1429 tp->flags = tulip_tbl[chip_idx].flags;
1430 tp->pdev = pdev;
1431 tp->base_addr = ioaddr;
1432 tp->revision = pdev->revision;
1433 tp->csr0 = csr0;
1434 spin_lock_init(&tp->lock);
1435 spin_lock_init(&tp->mii_lock);
1436 init_timer(&tp->timer);
1437 tp->timer.data = (unsigned long)dev;
1438 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1439
1440 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1441
1442 dev->base_addr = (unsigned long)ioaddr;
1443
1444 #ifdef CONFIG_TULIP_MWI
1445 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1446 tulip_mwi_config (pdev, dev);
1447 #endif
1448
1449 /* Stop the chip's Tx and Rx processes. */
1450 tulip_stop_rxtx(tp);
1451
1452 pci_set_master(pdev);
1453
1454 #ifdef CONFIG_GSC
1455 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1456 switch (pdev->subsystem_device) {
1457 default:
1458 break;
1459 case 0x1061:
1460 case 0x1062:
1461 case 0x1063:
1462 case 0x1098:
1463 case 0x1099:
1464 case 0x10EE:
1465 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1466 chip_name = "GSC DS21140 Tulip";
1467 }
1468 }
1469 #endif
1470
1471 /* Clear the missed-packet counter. */
1472 ioread32(ioaddr + CSR8);
1473
1474 /* The station address ROM is read byte serially. The register must
1475 be polled, waiting for the value to be read bit serially from the
1476 EEPROM.
1477 */
1478 ee_data = tp->eeprom;
1479 memset(ee_data, 0, sizeof(tp->eeprom));
1480 sum = 0;
1481 if (chip_idx == LC82C168) {
1482 for (i = 0; i < 3; i++) {
1483 int value, boguscnt = 100000;
1484 iowrite32(0x600 | i, ioaddr + 0x98);
1485 do {
1486 value = ioread32(ioaddr + CSR9);
1487 } while (value < 0 && --boguscnt > 0);
1488 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1489 sum += value & 0xffff;
1490 }
1491 } else if (chip_idx == COMET) {
1492 /* No need to read the EEPROM. */
1493 put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1494 put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1495 for (i = 0; i < 6; i ++)
1496 sum += dev->dev_addr[i];
1497 } else {
1498 /* A serial EEPROM interface, we read now and sort it out later. */
1499 int sa_offset = 0;
1500 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1501 int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1502
1503 if (ee_max_addr > sizeof(tp->eeprom))
1504 ee_max_addr = sizeof(tp->eeprom);
1505
1506 for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1507 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1508 ee_data[i] = data & 0xff;
1509 ee_data[i + 1] = data >> 8;
1510 }
1511
1512 /* DEC now has a specification (see Notes) but early board makers
1513 just put the address in the first EEPROM locations. */
1514 /* This does memcmp(ee_data, ee_data+16, 8) */
1515 for (i = 0; i < 8; i ++)
1516 if (ee_data[i] != ee_data[16+i])
1517 sa_offset = 20;
1518 if (chip_idx == CONEXANT) {
1519 /* Check that the tuple type and length is correct. */
1520 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1521 sa_offset = 0x19A;
1522 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1523 ee_data[2] == 0) {
1524 sa_offset = 2; /* Grrr, damn Matrox boards. */
1525 multiport_cnt = 4;
1526 }
1527 #ifdef CONFIG_MIPS_COBALT
1528 if ((pdev->bus->number == 0) &&
1529 ((PCI_SLOT(pdev->devfn) == 7) ||
1530 (PCI_SLOT(pdev->devfn) == 12))) {
1531 /* Cobalt MAC address in first EEPROM locations. */
1532 sa_offset = 0;
1533 /* Ensure our media table fixup get's applied */
1534 memcpy(ee_data + 16, ee_data, 8);
1535 }
1536 #endif
1537 #ifdef CONFIG_GSC
1538 /* Check to see if we have a broken srom */
1539 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1540 /* pci_vendor_id and subsystem_id are swapped */
1541 ee_data[0] = ee_data[2];
1542 ee_data[1] = ee_data[3];
1543 ee_data[2] = 0x61;
1544 ee_data[3] = 0x10;
1545
1546 /* HSC-PCI boards need to be byte-swaped and shifted
1547 * up 1 word. This shift needs to happen at the end
1548 * of the MAC first because of the 2 byte overlap.
1549 */
1550 for (i = 4; i >= 0; i -= 2) {
1551 ee_data[17 + i + 3] = ee_data[17 + i];
1552 ee_data[16 + i + 5] = ee_data[16 + i];
1553 }
1554 }
1555 #endif
1556
1557 for (i = 0; i < 6; i ++) {
1558 dev->dev_addr[i] = ee_data[i + sa_offset];
1559 sum += ee_data[i + sa_offset];
1560 }
1561 }
1562 /* Lite-On boards have the address byte-swapped. */
1563 if ((dev->dev_addr[0] == 0xA0 ||
1564 dev->dev_addr[0] == 0xC0 ||
1565 dev->dev_addr[0] == 0x02) &&
1566 dev->dev_addr[1] == 0x00)
1567 for (i = 0; i < 6; i+=2) {
1568 char tmp = dev->dev_addr[i];
1569 dev->dev_addr[i] = dev->dev_addr[i+1];
1570 dev->dev_addr[i+1] = tmp;
1571 }
1572 /* On the Zynx 315 Etherarray and other multiport boards only the
1573 first Tulip has an EEPROM.
1574 On Sparc systems the mac address is held in the OBP property
1575 "local-mac-address".
1576 The addresses of the subsequent ports are derived from the first.
1577 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1578 that here as well. */
1579 if (sum == 0 || sum == 6*0xff) {
1580 #if defined(CONFIG_SPARC)
1581 struct device_node *dp = pci_device_to_OF_node(pdev);
1582 const unsigned char *addr;
1583 int len;
1584 #endif
1585 eeprom_missing = 1;
1586 for (i = 0; i < 5; i++)
1587 dev->dev_addr[i] = last_phys_addr[i];
1588 dev->dev_addr[i] = last_phys_addr[i] + 1;
1589 #if defined(CONFIG_SPARC)
1590 addr = of_get_property(dp, "local-mac-address", &len);
1591 if (addr && len == 6)
1592 memcpy(dev->dev_addr, addr, 6);
1593 #endif
1594 #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1595 if (last_irq)
1596 irq = last_irq;
1597 #endif
1598 }
1599
1600 for (i = 0; i < 6; i++)
1601 last_phys_addr[i] = dev->dev_addr[i];
1602 last_irq = irq;
1603 dev->irq = irq;
1604
1605 /* The lower four bits are the media type. */
1606 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1607 if (options[board_idx] & MEDIA_MASK)
1608 tp->default_port = options[board_idx] & MEDIA_MASK;
1609 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1610 tp->full_duplex = 1;
1611 if (mtu[board_idx] > 0)
1612 dev->mtu = mtu[board_idx];
1613 }
1614 if (dev->mem_start & MEDIA_MASK)
1615 tp->default_port = dev->mem_start & MEDIA_MASK;
1616 if (tp->default_port) {
1617 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1618 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1619 tp->medialock = 1;
1620 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1621 tp->full_duplex = 1;
1622 }
1623 if (tp->full_duplex)
1624 tp->full_duplex_lock = 1;
1625
1626 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1627 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
1628 tp->mii_advertise = media2advert[tp->default_port - 9];
1629 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1630 }
1631
1632 if (tp->flags & HAS_MEDIA_TABLE) {
1633 sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
1634 tulip_parse_eeprom(dev);
1635 strcpy(dev->name, "eth%d"); /* un-hack */
1636 }
1637
1638 if ((tp->flags & ALWAYS_CHECK_MII) ||
1639 (tp->mtable && tp->mtable->has_mii) ||
1640 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1641 if (tp->mtable && tp->mtable->has_mii) {
1642 for (i = 0; i < tp->mtable->leafcount; i++)
1643 if (tp->mtable->mleaf[i].media == 11) {
1644 tp->cur_index = i;
1645 tp->saved_if_port = dev->if_port;
1646 tulip_select_media(dev, 2);
1647 dev->if_port = tp->saved_if_port;
1648 break;
1649 }
1650 }
1651
1652 /* Find the connected MII xcvrs.
1653 Doing this in open() would allow detecting external xcvrs
1654 later, but takes much time. */
1655 tulip_find_mii (dev, board_idx);
1656 }
1657
1658 /* The Tulip-specific entries in the device structure. */
1659 dev->netdev_ops = &tulip_netdev_ops;
1660 dev->watchdog_timeo = TX_TIMEOUT;
1661 #ifdef CONFIG_TULIP_NAPI
1662 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1663 #endif
1664 SET_ETHTOOL_OPS(dev, &ops);
1665
1666 if (register_netdev(dev))
1667 goto err_out_free_ring;
1668
1669 pci_set_drvdata(pdev, dev);
1670
1671 dev_info(&dev->dev,
1672 #ifdef CONFIG_TULIP_MMIO
1673 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1674 #else
1675 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1676 #endif
1677 chip_name, pdev->revision,
1678 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1679 eeprom_missing ? " EEPROM not present," : "",
1680 dev->dev_addr, irq);
1681
1682 if (tp->chip_id == PNIC2)
1683 tp->link_change = pnic2_lnk_change;
1684 else if (tp->flags & HAS_NWAY)
1685 tp->link_change = t21142_lnk_change;
1686 else if (tp->flags & HAS_PNICNWAY)
1687 tp->link_change = pnic_lnk_change;
1688
1689 /* Reset the xcvr interface and turn on heartbeat. */
1690 switch (chip_idx) {
1691 case DC21140:
1692 case DM910X:
1693 default:
1694 if (tp->mtable)
1695 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1696 break;
1697 case DC21142:
1698 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1699 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1700 iowrite32(0x0000, ioaddr + CSR13);
1701 iowrite32(0x0000, ioaddr + CSR14);
1702 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1703 } else
1704 t21142_start_nway(dev);
1705 break;
1706 case PNIC2:
1707 /* just do a reset for sanity sake */
1708 iowrite32(0x0000, ioaddr + CSR13);
1709 iowrite32(0x0000, ioaddr + CSR14);
1710 break;
1711 case LC82C168:
1712 if ( ! tp->mii_cnt) {
1713 tp->nway = 1;
1714 tp->nwayset = 0;
1715 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1716 iowrite32(0x30, ioaddr + CSR12);
1717 iowrite32(0x0001F078, ioaddr + CSR6);
1718 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1719 }
1720 break;
1721 case MX98713:
1722 case COMPEX9881:
1723 iowrite32(0x00000000, ioaddr + CSR6);
1724 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1725 iowrite32(0x00000001, ioaddr + CSR13);
1726 break;
1727 case MX98715:
1728 case MX98725:
1729 iowrite32(0x01a80000, ioaddr + CSR6);
1730 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1731 iowrite32(0x00001000, ioaddr + CSR12);
1732 break;
1733 case COMET:
1734 /* No initialization necessary. */
1735 break;
1736 }
1737
1738 /* put the chip in snooze mode until opened */
1739 tulip_set_power_state (tp, 0, 1);
1740
1741 return 0;
1742
1743 err_out_free_ring:
1744 pci_free_consistent (pdev,
1745 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1746 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1747 tp->rx_ring, tp->rx_ring_dma);
1748
1749 err_out_mtable:
1750 kfree (tp->mtable);
1751 pci_iounmap(pdev, ioaddr);
1752
1753 err_out_free_res:
1754 pci_release_regions (pdev);
1755
1756 err_out_free_netdev:
1757 free_netdev (dev);
1758 return -ENODEV;
1759 }
1760
1761
1762 #ifdef CONFIG_PM
1763
1764 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1765 {
1766 struct net_device *dev = pci_get_drvdata(pdev);
1767
1768 if (!dev)
1769 return -EINVAL;
1770
1771 if (!netif_running(dev))
1772 goto save_state;
1773
1774 tulip_down(dev);
1775
1776 netif_device_detach(dev);
1777 free_irq(dev->irq, dev);
1778
1779 save_state:
1780 pci_save_state(pdev);
1781 pci_disable_device(pdev);
1782 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1783
1784 return 0;
1785 }
1786
1787
1788 static int tulip_resume(struct pci_dev *pdev)
1789 {
1790 struct net_device *dev = pci_get_drvdata(pdev);
1791 int retval;
1792
1793 if (!dev)
1794 return -EINVAL;
1795
1796 pci_set_power_state(pdev, PCI_D0);
1797 pci_restore_state(pdev);
1798
1799 if (!netif_running(dev))
1800 return 0;
1801
1802 if ((retval = pci_enable_device(pdev))) {
1803 pr_err(PFX "pci_enable_device failed in resume\n");
1804 return retval;
1805 }
1806
1807 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
1808 pr_err(PFX "request_irq failed in resume\n");
1809 return retval;
1810 }
1811
1812 netif_device_attach(dev);
1813
1814 if (netif_running(dev))
1815 tulip_up(dev);
1816
1817 return 0;
1818 }
1819
1820 #endif /* CONFIG_PM */
1821
1822
1823 static void __devexit tulip_remove_one (struct pci_dev *pdev)
1824 {
1825 struct net_device *dev = pci_get_drvdata (pdev);
1826 struct tulip_private *tp;
1827
1828 if (!dev)
1829 return;
1830
1831 tp = netdev_priv(dev);
1832 unregister_netdev(dev);
1833 pci_free_consistent (pdev,
1834 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1835 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1836 tp->rx_ring, tp->rx_ring_dma);
1837 kfree (tp->mtable);
1838 pci_iounmap(pdev, tp->base_addr);
1839 free_netdev (dev);
1840 pci_release_regions (pdev);
1841 pci_set_drvdata (pdev, NULL);
1842
1843 /* pci_power_off (pdev, -1); */
1844 }
1845
1846 #ifdef CONFIG_NET_POLL_CONTROLLER
1847 /*
1848 * Polling 'interrupt' - used by things like netconsole to send skbs
1849 * without having to re-enable interrupts. It's not called while
1850 * the interrupt routine is executing.
1851 */
1852
1853 static void poll_tulip (struct net_device *dev)
1854 {
1855 /* disable_irq here is not very nice, but with the lockless
1856 interrupt handler we have no other choice. */
1857 disable_irq(dev->irq);
1858 tulip_interrupt (dev->irq, dev);
1859 enable_irq(dev->irq);
1860 }
1861 #endif
1862
1863 static struct pci_driver tulip_driver = {
1864 .name = DRV_NAME,
1865 .id_table = tulip_pci_tbl,
1866 .probe = tulip_init_one,
1867 .remove = __devexit_p(tulip_remove_one),
1868 #ifdef CONFIG_PM
1869 .suspend = tulip_suspend,
1870 .resume = tulip_resume,
1871 #endif /* CONFIG_PM */
1872 };
1873
1874
1875 static int __init tulip_init (void)
1876 {
1877 #ifdef MODULE
1878 pr_info("%s", version);
1879 #endif
1880
1881 /* copy module parms into globals */
1882 tulip_rx_copybreak = rx_copybreak;
1883 tulip_max_interrupt_work = max_interrupt_work;
1884
1885 /* probe for and init boards */
1886 return pci_register_driver(&tulip_driver);
1887 }
1888
1889
1890 static void __exit tulip_cleanup (void)
1891 {
1892 pci_unregister_driver (&tulip_driver);
1893 }
1894
1895
1896 module_init(tulip_init);
1897 module_exit(tulip_cleanup);
This page took 0.357905 seconds and 5 git commands to generate.